Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (102 commits)
  crypto: sha-s390 - Fix warnings in import function
  crypto: vmac - New hash algorithm for intel_txt support
  crypto: api - Do not displace newly registered algorithms
  crypto: ansi_cprng - Fix module initialization
  crypto: xcbc - Fix alignment calculation of xcbc_tfm_ctx
  crypto: fips - Depend on ansi_cprng
  crypto: blkcipher - Do not use eseqiv on stream ciphers
  crypto: ctr - Use chainiv on raw counter mode
  Revert crypto: fips - Select CPRNG
  crypto: rng - Fix typo
  crypto: talitos - add support for 36 bit addressing
  crypto: talitos - align locks on cache lines
  crypto: talitos - simplify hmac data size calculation
  crypto: mv_cesa - Add support for Orion5X crypto engine
  crypto: cryptd - Add support to access underlaying shash
  crypto: gcm - Use GHASH digest algorithm
  crypto: ghash - Add GHASH digest algorithm for GCM
  crypto: authenc - Convert to ahash
  crypto: api - Fix aligned ctx helper
  crypto: hmac - Prehash ipad/opad
  ...
This commit is contained in:
Linus Torvalds 2009-09-11 09:38:37 -07:00
commit 332a339218
53 changed files with 4521 additions and 1518 deletions

View File

@ -250,8 +250,9 @@ static int des3_128_setkey(struct crypto_tfm *tfm, const u8 *key,
const u8 *temp_key = key; const u8 *temp_key = key;
u32 *flags = &tfm->crt_flags; u32 *flags = &tfm->crt_flags;
if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE))) { if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE)) &&
*flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED; (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL; return -EINVAL;
} }
for (i = 0; i < 2; i++, temp_key += DES_KEY_SIZE) { for (i = 0; i < 2; i++, temp_key += DES_KEY_SIZE) {
@ -411,9 +412,9 @@ static int des3_192_setkey(struct crypto_tfm *tfm, const u8 *key,
if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) && if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
memcmp(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2], memcmp(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2],
DES_KEY_SIZE))) { DES_KEY_SIZE)) &&
(*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
*flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED; *flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL; return -EINVAL;
} }
for (i = 0; i < 3; i++, temp_key += DES_KEY_SIZE) { for (i = 0; i < 3; i++, temp_key += DES_KEY_SIZE) {

View File

@ -46,12 +46,38 @@ static int sha1_init(struct shash_desc *desc)
return 0; return 0;
} }
static int sha1_export(struct shash_desc *desc, void *out)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
struct sha1_state *octx = out;
octx->count = sctx->count;
memcpy(octx->state, sctx->state, sizeof(octx->state));
memcpy(octx->buffer, sctx->buf, sizeof(octx->buffer));
return 0;
}
static int sha1_import(struct shash_desc *desc, const void *in)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
const struct sha1_state *ictx = in;
sctx->count = ictx->count;
memcpy(sctx->state, ictx->state, sizeof(ictx->state));
memcpy(sctx->buf, ictx->buffer, sizeof(ictx->buffer));
sctx->func = KIMD_SHA_1;
return 0;
}
static struct shash_alg alg = { static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE, .digestsize = SHA1_DIGEST_SIZE,
.init = sha1_init, .init = sha1_init,
.update = s390_sha_update, .update = s390_sha_update,
.final = s390_sha_final, .final = s390_sha_final,
.export = sha1_export,
.import = sha1_import,
.descsize = sizeof(struct s390_sha_ctx), .descsize = sizeof(struct s390_sha_ctx),
.statesize = sizeof(struct sha1_state),
.base = { .base = {
.cra_name = "sha1", .cra_name = "sha1",
.cra_driver_name= "sha1-s390", .cra_driver_name= "sha1-s390",

View File

@ -42,12 +42,38 @@ static int sha256_init(struct shash_desc *desc)
return 0; return 0;
} }
static int sha256_export(struct shash_desc *desc, void *out)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
struct sha256_state *octx = out;
octx->count = sctx->count;
memcpy(octx->state, sctx->state, sizeof(octx->state));
memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
return 0;
}
static int sha256_import(struct shash_desc *desc, const void *in)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
const struct sha256_state *ictx = in;
sctx->count = ictx->count;
memcpy(sctx->state, ictx->state, sizeof(ictx->state));
memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
sctx->func = KIMD_SHA_256;
return 0;
}
static struct shash_alg alg = { static struct shash_alg alg = {
.digestsize = SHA256_DIGEST_SIZE, .digestsize = SHA256_DIGEST_SIZE,
.init = sha256_init, .init = sha256_init,
.update = s390_sha_update, .update = s390_sha_update,
.final = s390_sha_final, .final = s390_sha_final,
.export = sha256_export,
.import = sha256_import,
.descsize = sizeof(struct s390_sha_ctx), .descsize = sizeof(struct s390_sha_ctx),
.statesize = sizeof(struct sha256_state),
.base = { .base = {
.cra_name = "sha256", .cra_name = "sha256",
.cra_driver_name= "sha256-s390", .cra_driver_name= "sha256-s390",

View File

@ -13,7 +13,10 @@
* *
*/ */
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/sha.h>
#include <linux/errno.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
#include "sha.h" #include "sha.h"
@ -37,12 +40,42 @@ static int sha512_init(struct shash_desc *desc)
return 0; return 0;
} }
static int sha512_export(struct shash_desc *desc, void *out)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
struct sha512_state *octx = out;
octx->count[0] = sctx->count;
octx->count[1] = 0;
memcpy(octx->state, sctx->state, sizeof(octx->state));
memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
return 0;
}
static int sha512_import(struct shash_desc *desc, const void *in)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
const struct sha512_state *ictx = in;
if (unlikely(ictx->count[1]))
return -ERANGE;
sctx->count = ictx->count[0];
memcpy(sctx->state, ictx->state, sizeof(ictx->state));
memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
sctx->func = KIMD_SHA_512;
return 0;
}
static struct shash_alg sha512_alg = { static struct shash_alg sha512_alg = {
.digestsize = SHA512_DIGEST_SIZE, .digestsize = SHA512_DIGEST_SIZE,
.init = sha512_init, .init = sha512_init,
.update = s390_sha_update, .update = s390_sha_update,
.final = s390_sha_final, .final = s390_sha_final,
.export = sha512_export,
.import = sha512_import,
.descsize = sizeof(struct s390_sha_ctx), .descsize = sizeof(struct s390_sha_ctx),
.statesize = sizeof(struct sha512_state),
.base = { .base = {
.cra_name = "sha512", .cra_name = "sha512",
.cra_driver_name= "sha512-s390", .cra_driver_name= "sha512-s390",
@ -78,7 +111,10 @@ static struct shash_alg sha384_alg = {
.init = sha384_init, .init = sha384_init,
.update = s390_sha_update, .update = s390_sha_update,
.final = s390_sha_final, .final = s390_sha_final,
.export = sha512_export,
.import = sha512_import,
.descsize = sizeof(struct s390_sha_ctx), .descsize = sizeof(struct s390_sha_ctx),
.statesize = sizeof(struct sha512_state),
.base = { .base = {
.cra_name = "sha384", .cra_name = "sha384",
.cra_driver_name= "sha384-s390", .cra_driver_name= "sha384-s390",

View File

@ -636,7 +636,7 @@ static int __init aesni_init(void)
int err; int err;
if (!cpu_has_aes) { if (!cpu_has_aes) {
printk(KERN_ERR "Intel AES-NI instructions are not detected.\n"); printk(KERN_INFO "Intel AES-NI instructions are not detected.\n");
return -ENODEV; return -ENODEV;
} }
if ((err = crypto_register_alg(&aesni_alg))) if ((err = crypto_register_alg(&aesni_alg)))

View File

@ -23,11 +23,13 @@ comment "Crypto core or helper"
config CRYPTO_FIPS config CRYPTO_FIPS
bool "FIPS 200 compliance" bool "FIPS 200 compliance"
depends on CRYPTO_ANSI_CPRNG
help help
This options enables the fips boot option which is This options enables the fips boot option which is
required if you want to system to operate in a FIPS 200 required if you want to system to operate in a FIPS 200
certification. You should say no unless you know what certification. You should say no unless you know what
this is. this is. Note that CRYPTO_ANSI_CPRNG is requred if this
option is selected
config CRYPTO_ALGAPI config CRYPTO_ALGAPI
tristate tristate
@ -156,7 +158,7 @@ config CRYPTO_GCM
tristate "GCM/GMAC support" tristate "GCM/GMAC support"
select CRYPTO_CTR select CRYPTO_CTR
select CRYPTO_AEAD select CRYPTO_AEAD
select CRYPTO_GF128MUL select CRYPTO_GHASH
help help
Support for Galois/Counter Mode (GCM) and Galois Message Support for Galois/Counter Mode (GCM) and Galois Message
Authentication Code (GMAC). Required for IPSec. Authentication Code (GMAC). Required for IPSec.
@ -267,6 +269,18 @@ config CRYPTO_XCBC
http://csrc.nist.gov/encryption/modes/proposedmodes/ http://csrc.nist.gov/encryption/modes/proposedmodes/
xcbc-mac/xcbc-mac-spec.pdf xcbc-mac/xcbc-mac-spec.pdf
config CRYPTO_VMAC
tristate "VMAC support"
depends on EXPERIMENTAL
select CRYPTO_HASH
select CRYPTO_MANAGER
help
VMAC is a message authentication algorithm designed for
very high speed on 64-bit architectures.
See also:
<http://fastcrypto.org/vmac>
comment "Digest" comment "Digest"
config CRYPTO_CRC32C config CRYPTO_CRC32C
@ -289,6 +303,13 @@ config CRYPTO_CRC32C_INTEL
gain performance compared with software implementation. gain performance compared with software implementation.
Module will be crc32c-intel. Module will be crc32c-intel.
config CRYPTO_GHASH
tristate "GHASH digest algorithm"
select CRYPTO_SHASH
select CRYPTO_GF128MUL
help
GHASH is message digest algorithm for GCM (Galois/Counter Mode).
config CRYPTO_MD4 config CRYPTO_MD4
tristate "MD4 digest algorithm" tristate "MD4 digest algorithm"
select CRYPTO_HASH select CRYPTO_HASH
@ -780,13 +801,14 @@ comment "Random Number Generation"
config CRYPTO_ANSI_CPRNG config CRYPTO_ANSI_CPRNG
tristate "Pseudo Random Number Generation for Cryptographic modules" tristate "Pseudo Random Number Generation for Cryptographic modules"
default m
select CRYPTO_AES select CRYPTO_AES
select CRYPTO_RNG select CRYPTO_RNG
select CRYPTO_FIPS
help help
This option enables the generic pseudo random number generator This option enables the generic pseudo random number generator
for cryptographic modules. Uses the Algorithm specified in for cryptographic modules. Uses the Algorithm specified in
ANSI X9.31 A.2.4 ANSI X9.31 A.2.4. Not this option must be enabled if CRYPTO_FIPS
is selected
source "drivers/crypto/Kconfig" source "drivers/crypto/Kconfig"

View File

@ -3,7 +3,7 @@
# #
obj-$(CONFIG_CRYPTO) += crypto.o obj-$(CONFIG_CRYPTO) += crypto.o
crypto-objs := api.o cipher.o digest.o compress.o crypto-objs := api.o cipher.o compress.o
obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o
@ -22,7 +22,6 @@ obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o
obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o
obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
crypto_hash-objs := hash.o
crypto_hash-objs += ahash.o crypto_hash-objs += ahash.o
crypto_hash-objs += shash.o crypto_hash-objs += shash.o
obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
@ -33,6 +32,7 @@ cryptomgr-objs := algboss.o testmgr.o
obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
obj-$(CONFIG_CRYPTO_HMAC) += hmac.o obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
obj-$(CONFIG_CRYPTO_VMAC) += vmac.o
obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o
obj-$(CONFIG_CRYPTO_MD4) += md4.o obj-$(CONFIG_CRYPTO_MD4) += md4.o
@ -83,6 +83,7 @@ obj-$(CONFIG_CRYPTO_RNG2) += rng.o
obj-$(CONFIG_CRYPTO_RNG2) += krng.o obj-$(CONFIG_CRYPTO_RNG2) += krng.o
obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o
obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o
# #
# generic algorithms and the async_tx api # generic algorithms and the async_tx api

View File

@ -14,6 +14,7 @@
*/ */
#include <crypto/internal/skcipher.h> #include <crypto/internal/skcipher.h>
#include <linux/cpumask.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/kernel.h> #include <linux/kernel.h>
@ -25,6 +26,8 @@
#include "internal.h" #include "internal.h"
static const char *skcipher_default_geniv __read_mostly;
static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key, static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int keylen) unsigned int keylen)
{ {
@ -180,7 +183,14 @@ EXPORT_SYMBOL_GPL(crypto_givcipher_type);
const char *crypto_default_geniv(const struct crypto_alg *alg) const char *crypto_default_geniv(const struct crypto_alg *alg)
{ {
return alg->cra_flags & CRYPTO_ALG_ASYNC ? "eseqiv" : "chainiv"; if (((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
alg->cra_ablkcipher.ivsize) !=
alg->cra_blocksize)
return "chainiv";
return alg->cra_flags & CRYPTO_ALG_ASYNC ?
"eseqiv" : skcipher_default_geniv;
} }
static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask) static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
@ -201,8 +211,9 @@ static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
int err; int err;
larval = crypto_larval_lookup(alg->cra_driver_name, larval = crypto_larval_lookup(alg->cra_driver_name,
(type & ~CRYPTO_ALG_TYPE_MASK) |
CRYPTO_ALG_TYPE_GIVCIPHER, CRYPTO_ALG_TYPE_GIVCIPHER,
CRYPTO_ALG_TYPE_MASK); mask | CRYPTO_ALG_TYPE_MASK);
err = PTR_ERR(larval); err = PTR_ERR(larval);
if (IS_ERR(larval)) if (IS_ERR(larval))
goto out; goto out;
@ -360,3 +371,17 @@ err:
return ERR_PTR(err); return ERR_PTR(err);
} }
EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher); EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher);
static int __init skcipher_module_init(void)
{
skcipher_default_geniv = num_possible_cpus() > 1 ?
"eseqiv" : "chainiv";
return 0;
}
static void skcipher_module_exit(void)
{
}
module_init(skcipher_module_init);
module_exit(skcipher_module_exit);

View File

@ -1174,7 +1174,7 @@ EXPORT_SYMBOL_GPL(crypto_il_tab);
ctx->key_enc[6 * i + 11] = t; \ ctx->key_enc[6 * i + 11] = t; \
} while (0) } while (0)
#define loop8(i) do { \ #define loop8tophalf(i) do { \
t = ror32(t, 8); \ t = ror32(t, 8); \
t = ls_box(t) ^ rco_tab[i]; \ t = ls_box(t) ^ rco_tab[i]; \
t ^= ctx->key_enc[8 * i]; \ t ^= ctx->key_enc[8 * i]; \
@ -1185,6 +1185,10 @@ EXPORT_SYMBOL_GPL(crypto_il_tab);
ctx->key_enc[8 * i + 10] = t; \ ctx->key_enc[8 * i + 10] = t; \
t ^= ctx->key_enc[8 * i + 3]; \ t ^= ctx->key_enc[8 * i + 3]; \
ctx->key_enc[8 * i + 11] = t; \ ctx->key_enc[8 * i + 11] = t; \
} while (0)
#define loop8(i) do { \
loop8tophalf(i); \
t = ctx->key_enc[8 * i + 4] ^ ls_box(t); \ t = ctx->key_enc[8 * i + 4] ^ ls_box(t); \
ctx->key_enc[8 * i + 12] = t; \ ctx->key_enc[8 * i + 12] = t; \
t ^= ctx->key_enc[8 * i + 5]; \ t ^= ctx->key_enc[8 * i + 5]; \
@ -1245,8 +1249,9 @@ int crypto_aes_expand_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
ctx->key_enc[5] = le32_to_cpu(key[5]); ctx->key_enc[5] = le32_to_cpu(key[5]);
ctx->key_enc[6] = le32_to_cpu(key[6]); ctx->key_enc[6] = le32_to_cpu(key[6]);
t = ctx->key_enc[7] = le32_to_cpu(key[7]); t = ctx->key_enc[7] = le32_to_cpu(key[7]);
for (i = 0; i < 7; ++i) for (i = 0; i < 6; ++i)
loop8(i); loop8(i);
loop8tophalf(i);
break; break;
} }

View File

@ -24,6 +24,19 @@
#include "internal.h" #include "internal.h"
struct ahash_request_priv {
crypto_completion_t complete;
void *data;
u8 *result;
void *ubuf[] CRYPTO_MINALIGN_ATTR;
};
static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
{
return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
halg);
}
static int hash_walk_next(struct crypto_hash_walk *walk) static int hash_walk_next(struct crypto_hash_walk *walk)
{ {
unsigned int alignmask = walk->alignmask; unsigned int alignmask = walk->alignmask;
@ -132,36 +145,34 @@ int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen) unsigned int keylen)
{ {
struct ahash_alg *ahash = crypto_ahash_alg(tfm);
unsigned long alignmask = crypto_ahash_alignmask(tfm); unsigned long alignmask = crypto_ahash_alignmask(tfm);
int ret; int ret;
u8 *buffer, *alignbuffer; u8 *buffer, *alignbuffer;
unsigned long absize; unsigned long absize;
absize = keylen + alignmask; absize = keylen + alignmask;
buffer = kmalloc(absize, GFP_ATOMIC); buffer = kmalloc(absize, GFP_KERNEL);
if (!buffer) if (!buffer)
return -ENOMEM; return -ENOMEM;
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(alignbuffer, key, keylen); memcpy(alignbuffer, key, keylen);
ret = ahash->setkey(tfm, alignbuffer, keylen); ret = tfm->setkey(tfm, alignbuffer, keylen);
memset(alignbuffer, 0, keylen); kzfree(buffer);
kfree(buffer);
return ret; return ret;
} }
static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key, int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen) unsigned int keylen)
{ {
struct ahash_alg *ahash = crypto_ahash_alg(tfm);
unsigned long alignmask = crypto_ahash_alignmask(tfm); unsigned long alignmask = crypto_ahash_alignmask(tfm);
if ((unsigned long)key & alignmask) if ((unsigned long)key & alignmask)
return ahash_setkey_unaligned(tfm, key, keylen); return ahash_setkey_unaligned(tfm, key, keylen);
return ahash->setkey(tfm, key, keylen); return tfm->setkey(tfm, key, keylen);
} }
EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen) unsigned int keylen)
@ -169,42 +180,219 @@ static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
return -ENOSYS; return -ENOSYS;
} }
int crypto_ahash_import(struct ahash_request *req, const u8 *in) static inline unsigned int ahash_align_buffer_size(unsigned len,
unsigned long mask)
{
return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
}
static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
{
struct ahash_request_priv *priv = req->priv;
if (err == -EINPROGRESS)
return;
if (!err)
memcpy(priv->result, req->result,
crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
kzfree(priv);
}
static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
{
struct ahash_request *areq = req->data;
struct ahash_request_priv *priv = areq->priv;
crypto_completion_t complete = priv->complete;
void *data = priv->data;
ahash_op_unaligned_finish(areq, err);
complete(data, err);
}
static int ahash_op_unaligned(struct ahash_request *req,
int (*op)(struct ahash_request *))
{ {
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ahash_alg *alg = crypto_ahash_alg(tfm); unsigned long alignmask = crypto_ahash_alignmask(tfm);
unsigned int ds = crypto_ahash_digestsize(tfm);
struct ahash_request_priv *priv;
int err;
memcpy(ahash_request_ctx(req), in, crypto_ahash_reqsize(tfm)); priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC);
if (!priv)
return -ENOMEM;
if (alg->reinit) priv->result = req->result;
alg->reinit(req); priv->complete = req->base.complete;
priv->data = req->base.data;
req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
req->base.complete = ahash_op_unaligned_done;
req->base.data = req;
req->priv = priv;
err = op(req);
ahash_op_unaligned_finish(req, err);
return err;
}
static int crypto_ahash_op(struct ahash_request *req,
int (*op)(struct ahash_request *))
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
unsigned long alignmask = crypto_ahash_alignmask(tfm);
if ((unsigned long)req->result & alignmask)
return ahash_op_unaligned(req, op);
return op(req);
}
int crypto_ahash_final(struct ahash_request *req)
{
return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
}
EXPORT_SYMBOL_GPL(crypto_ahash_final);
int crypto_ahash_finup(struct ahash_request *req)
{
return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
}
EXPORT_SYMBOL_GPL(crypto_ahash_finup);
int crypto_ahash_digest(struct ahash_request *req)
{
return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
static void ahash_def_finup_finish2(struct ahash_request *req, int err)
{
struct ahash_request_priv *priv = req->priv;
if (err == -EINPROGRESS)
return;
if (!err)
memcpy(priv->result, req->result,
crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
kzfree(priv);
}
static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
{
struct ahash_request *areq = req->data;
struct ahash_request_priv *priv = areq->priv;
crypto_completion_t complete = priv->complete;
void *data = priv->data;
ahash_def_finup_finish2(areq, err);
complete(data, err);
}
static int ahash_def_finup_finish1(struct ahash_request *req, int err)
{
if (err)
goto out;
req->base.complete = ahash_def_finup_done2;
req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_ahash_reqtfm(req)->final(req);
out:
ahash_def_finup_finish2(req, err);
return err;
}
static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
{
struct ahash_request *areq = req->data;
struct ahash_request_priv *priv = areq->priv;
crypto_completion_t complete = priv->complete;
void *data = priv->data;
err = ahash_def_finup_finish1(areq, err);
complete(data, err);
}
static int ahash_def_finup(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
unsigned long alignmask = crypto_ahash_alignmask(tfm);
unsigned int ds = crypto_ahash_digestsize(tfm);
struct ahash_request_priv *priv;
priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC);
if (!priv)
return -ENOMEM;
priv->result = req->result;
priv->complete = req->base.complete;
priv->data = req->base.data;
req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
req->base.complete = ahash_def_finup_done1;
req->base.data = req;
req->priv = priv;
return ahash_def_finup_finish1(req, tfm->update(req));
}
static int ahash_no_export(struct ahash_request *req, void *out)
{
return -ENOSYS;
}
static int ahash_no_import(struct ahash_request *req, const void *in)
{
return -ENOSYS;
}
static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
struct ahash_alg *alg = crypto_ahash_alg(hash);
hash->setkey = ahash_nosetkey;
hash->export = ahash_no_export;
hash->import = ahash_no_import;
if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
return crypto_init_shash_ops_async(tfm);
hash->init = alg->init;
hash->update = alg->update;
hash->final = alg->final;
hash->finup = alg->finup ?: ahash_def_finup;
hash->digest = alg->digest;
if (alg->setkey)
hash->setkey = alg->setkey;
if (alg->export)
hash->export = alg->export;
if (alg->import)
hash->import = alg->import;
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(crypto_ahash_import);
static unsigned int crypto_ahash_ctxsize(struct crypto_alg *alg, u32 type, static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
u32 mask)
{ {
if (alg->cra_type == &crypto_ahash_type)
return alg->cra_ctxsize; return alg->cra_ctxsize;
}
static int crypto_init_ahash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) return sizeof(struct crypto_shash *);
{
struct ahash_alg *alg = &tfm->__crt_alg->cra_ahash;
struct ahash_tfm *crt = &tfm->crt_ahash;
if (alg->digestsize > PAGE_SIZE / 8)
return -EINVAL;
crt->init = alg->init;
crt->update = alg->update;
crt->final = alg->final;
crt->digest = alg->digest;
crt->setkey = alg->setkey ? ahash_setkey : ahash_nosetkey;
crt->digestsize = alg->digestsize;
return 0;
} }
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
@ -215,17 +403,101 @@ static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
"yes" : "no"); "yes" : "no");
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "digestsize : %u\n", alg->cra_ahash.digestsize); seq_printf(m, "digestsize : %u\n",
__crypto_hash_alg_common(alg)->digestsize);
} }
const struct crypto_type crypto_ahash_type = { const struct crypto_type crypto_ahash_type = {
.ctxsize = crypto_ahash_ctxsize, .extsize = crypto_ahash_extsize,
.init = crypto_init_ahash_ops, .init_tfm = crypto_ahash_init_tfm,
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
.show = crypto_ahash_show, .show = crypto_ahash_show,
#endif #endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
.type = CRYPTO_ALG_TYPE_AHASH,
.tfmsize = offsetof(struct crypto_ahash, base),
}; };
EXPORT_SYMBOL_GPL(crypto_ahash_type); EXPORT_SYMBOL_GPL(crypto_ahash_type);
struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
u32 mask)
{
return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
static int ahash_prepare_alg(struct ahash_alg *alg)
{
struct crypto_alg *base = &alg->halg.base;
if (alg->halg.digestsize > PAGE_SIZE / 8 ||
alg->halg.statesize > PAGE_SIZE / 8)
return -EINVAL;
base->cra_type = &crypto_ahash_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
return 0;
}
int crypto_register_ahash(struct ahash_alg *alg)
{
struct crypto_alg *base = &alg->halg.base;
int err;
err = ahash_prepare_alg(alg);
if (err)
return err;
return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_ahash);
int crypto_unregister_ahash(struct ahash_alg *alg)
{
return crypto_unregister_alg(&alg->halg.base);
}
EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
int ahash_register_instance(struct crypto_template *tmpl,
struct ahash_instance *inst)
{
int err;
err = ahash_prepare_alg(&inst->alg);
if (err)
return err;
return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
}
EXPORT_SYMBOL_GPL(ahash_register_instance);
void ahash_free_instance(struct crypto_instance *inst)
{
crypto_drop_spawn(crypto_instance_ctx(inst));
kfree(ahash_instance(inst));
}
EXPORT_SYMBOL_GPL(ahash_free_instance);
int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
struct hash_alg_common *alg,
struct crypto_instance *inst)
{
return crypto_init_spawn2(&spawn->base, &alg->base, inst,
&crypto_ahash_type);
}
EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
{
struct crypto_alg *alg;
alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
}
EXPORT_SYMBOL_GPL(ahash_attr_alg);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); MODULE_DESCRIPTION("Asynchronous cryptographic hash type");

View File

@ -81,16 +81,35 @@ static void crypto_destroy_instance(struct crypto_alg *alg)
crypto_tmpl_put(tmpl); crypto_tmpl_put(tmpl);
} }
static void crypto_remove_spawn(struct crypto_spawn *spawn, static struct list_head *crypto_more_spawns(struct crypto_alg *alg,
struct list_head *list, struct list_head *stack,
struct list_head *top,
struct list_head *secondary_spawns) struct list_head *secondary_spawns)
{
struct crypto_spawn *spawn, *n;
if (list_empty(stack))
return NULL;
spawn = list_first_entry(stack, struct crypto_spawn, list);
n = list_entry(spawn->list.next, struct crypto_spawn, list);
if (spawn->alg && &n->list != stack && !n->alg)
n->alg = (n->list.next == stack) ? alg :
&list_entry(n->list.next, struct crypto_spawn,
list)->inst->alg;
list_move(&spawn->list, secondary_spawns);
return &n->list == stack ? top : &n->inst->alg.cra_users;
}
static void crypto_remove_spawn(struct crypto_spawn *spawn,
struct list_head *list)
{ {
struct crypto_instance *inst = spawn->inst; struct crypto_instance *inst = spawn->inst;
struct crypto_template *tmpl = inst->tmpl; struct crypto_template *tmpl = inst->tmpl;
list_del_init(&spawn->list);
spawn->alg = NULL;
if (crypto_is_dead(&inst->alg)) if (crypto_is_dead(&inst->alg))
return; return;
@ -106,25 +125,55 @@ static void crypto_remove_spawn(struct crypto_spawn *spawn,
hlist_del(&inst->list); hlist_del(&inst->list);
inst->alg.cra_destroy = crypto_destroy_instance; inst->alg.cra_destroy = crypto_destroy_instance;
list_splice(&inst->alg.cra_users, secondary_spawns); BUG_ON(!list_empty(&inst->alg.cra_users));
} }
static void crypto_remove_spawns(struct list_head *spawns, static void crypto_remove_spawns(struct crypto_alg *alg,
struct list_head *list, u32 new_type) struct list_head *list,
struct crypto_alg *nalg)
{ {
u32 new_type = (nalg ?: alg)->cra_flags;
struct crypto_spawn *spawn, *n; struct crypto_spawn *spawn, *n;
LIST_HEAD(secondary_spawns); LIST_HEAD(secondary_spawns);
struct list_head *spawns;
LIST_HEAD(stack);
LIST_HEAD(top);
spawns = &alg->cra_users;
list_for_each_entry_safe(spawn, n, spawns, list) { list_for_each_entry_safe(spawn, n, spawns, list) {
if ((spawn->alg->cra_flags ^ new_type) & spawn->mask) if ((spawn->alg->cra_flags ^ new_type) & spawn->mask)
continue; continue;
crypto_remove_spawn(spawn, list, &secondary_spawns); list_move(&spawn->list, &top);
} }
while (!list_empty(&secondary_spawns)) { spawns = &top;
list_for_each_entry_safe(spawn, n, &secondary_spawns, list) do {
crypto_remove_spawn(spawn, list, &secondary_spawns); while (!list_empty(spawns)) {
struct crypto_instance *inst;
spawn = list_first_entry(spawns, struct crypto_spawn,
list);
inst = spawn->inst;
BUG_ON(&inst->alg == alg);
list_move(&spawn->list, &stack);
if (&inst->alg == nalg)
break;
spawn->alg = NULL;
spawns = &inst->alg.cra_users;
}
} while ((spawns = crypto_more_spawns(alg, &stack, &top,
&secondary_spawns)));
list_for_each_entry_safe(spawn, n, &secondary_spawns, list) {
if (spawn->alg)
list_move(&spawn->list, &spawn->alg->cra_users);
else
crypto_remove_spawn(spawn, list);
} }
} }
@ -258,7 +307,7 @@ found:
q->cra_priority > alg->cra_priority) q->cra_priority > alg->cra_priority)
continue; continue;
crypto_remove_spawns(&q->cra_users, &list, alg->cra_flags); crypto_remove_spawns(q, &list, alg);
} }
complete: complete:
@ -330,7 +379,7 @@ static int crypto_remove_alg(struct crypto_alg *alg, struct list_head *list)
crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, alg); crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, alg);
list_del_init(&alg->cra_list); list_del_init(&alg->cra_list);
crypto_remove_spawns(&alg->cra_users, list, alg->cra_flags); crypto_remove_spawns(alg, list, NULL);
return 0; return 0;
} }
@ -488,20 +537,38 @@ int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
} }
EXPORT_SYMBOL_GPL(crypto_init_spawn); EXPORT_SYMBOL_GPL(crypto_init_spawn);
int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
struct crypto_instance *inst,
const struct crypto_type *frontend)
{
int err = -EINVAL;
if (frontend && (alg->cra_flags ^ frontend->type) & frontend->maskset)
goto out;
spawn->frontend = frontend;
err = crypto_init_spawn(spawn, alg, inst, frontend->maskset);
out:
return err;
}
EXPORT_SYMBOL_GPL(crypto_init_spawn2);
void crypto_drop_spawn(struct crypto_spawn *spawn) void crypto_drop_spawn(struct crypto_spawn *spawn)
{ {
if (!spawn->alg)
return;
down_write(&crypto_alg_sem); down_write(&crypto_alg_sem);
list_del(&spawn->list); list_del(&spawn->list);
up_write(&crypto_alg_sem); up_write(&crypto_alg_sem);
} }
EXPORT_SYMBOL_GPL(crypto_drop_spawn); EXPORT_SYMBOL_GPL(crypto_drop_spawn);
struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, static struct crypto_alg *crypto_spawn_alg(struct crypto_spawn *spawn)
u32 mask)
{ {
struct crypto_alg *alg; struct crypto_alg *alg;
struct crypto_alg *alg2; struct crypto_alg *alg2;
struct crypto_tfm *tfm;
down_read(&crypto_alg_sem); down_read(&crypto_alg_sem);
alg = spawn->alg; alg = spawn->alg;
@ -516,6 +583,19 @@ struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
return ERR_PTR(-EAGAIN); return ERR_PTR(-EAGAIN);
} }
return alg;
}
struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
u32 mask)
{
struct crypto_alg *alg;
struct crypto_tfm *tfm;
alg = crypto_spawn_alg(spawn);
if (IS_ERR(alg))
return ERR_CAST(alg);
tfm = ERR_PTR(-EINVAL); tfm = ERR_PTR(-EINVAL);
if (unlikely((alg->cra_flags ^ type) & mask)) if (unlikely((alg->cra_flags ^ type) & mask))
goto out_put_alg; goto out_put_alg;
@ -532,6 +612,27 @@ out_put_alg:
} }
EXPORT_SYMBOL_GPL(crypto_spawn_tfm); EXPORT_SYMBOL_GPL(crypto_spawn_tfm);
void *crypto_spawn_tfm2(struct crypto_spawn *spawn)
{
struct crypto_alg *alg;
struct crypto_tfm *tfm;
alg = crypto_spawn_alg(spawn);
if (IS_ERR(alg))
return ERR_CAST(alg);
tfm = crypto_create_tfm(alg, spawn->frontend);
if (IS_ERR(tfm))
goto out_put_alg;
return tfm;
out_put_alg:
crypto_mod_put(alg);
return tfm;
}
EXPORT_SYMBOL_GPL(crypto_spawn_tfm2);
int crypto_register_notifier(struct notifier_block *nb) int crypto_register_notifier(struct notifier_block *nb)
{ {
return blocking_notifier_chain_register(&crypto_chain, nb); return blocking_notifier_chain_register(&crypto_chain, nb);
@ -595,7 +696,9 @@ const char *crypto_attr_alg_name(struct rtattr *rta)
} }
EXPORT_SYMBOL_GPL(crypto_attr_alg_name); EXPORT_SYMBOL_GPL(crypto_attr_alg_name);
struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask) struct crypto_alg *crypto_attr_alg2(struct rtattr *rta,
const struct crypto_type *frontend,
u32 type, u32 mask)
{ {
const char *name; const char *name;
int err; int err;
@ -605,9 +708,9 @@ struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask)
if (IS_ERR(name)) if (IS_ERR(name))
return ERR_PTR(err); return ERR_PTR(err);
return crypto_alg_mod_lookup(name, type, mask); return crypto_find_alg(name, frontend, type, mask);
} }
EXPORT_SYMBOL_GPL(crypto_attr_alg); EXPORT_SYMBOL_GPL(crypto_attr_alg2);
int crypto_attr_u32(struct rtattr *rta, u32 *num) int crypto_attr_u32(struct rtattr *rta, u32 *num)
{ {
@ -627,17 +730,20 @@ int crypto_attr_u32(struct rtattr *rta, u32 *num)
} }
EXPORT_SYMBOL_GPL(crypto_attr_u32); EXPORT_SYMBOL_GPL(crypto_attr_u32);
struct crypto_instance *crypto_alloc_instance(const char *name, void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
struct crypto_alg *alg) unsigned int head)
{ {
struct crypto_instance *inst; struct crypto_instance *inst;
struct crypto_spawn *spawn; char *p;
int err; int err;
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); p = kzalloc(head + sizeof(*inst) + sizeof(struct crypto_spawn),
if (!inst) GFP_KERNEL);
if (!p)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
inst = (void *)(p + head);
err = -ENAMETOOLONG; err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name, if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name,
alg->cra_name) >= CRYPTO_MAX_ALG_NAME) alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
@ -647,6 +753,25 @@ struct crypto_instance *crypto_alloc_instance(const char *name,
name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst; goto err_free_inst;
return p;
err_free_inst:
kfree(p);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(crypto_alloc_instance2);
struct crypto_instance *crypto_alloc_instance(const char *name,
struct crypto_alg *alg)
{
struct crypto_instance *inst;
struct crypto_spawn *spawn;
int err;
inst = crypto_alloc_instance2(name, alg, 0);
if (IS_ERR(inst))
goto out;
spawn = crypto_instance_ctx(inst); spawn = crypto_instance_ctx(inst);
err = crypto_init_spawn(spawn, alg, inst, err = crypto_init_spawn(spawn, alg, inst,
CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
@ -658,7 +783,10 @@ struct crypto_instance *crypto_alloc_instance(const char *name,
err_free_inst: err_free_inst:
kfree(inst); kfree(inst);
return ERR_PTR(err); inst = ERR_PTR(err);
out:
return inst;
} }
EXPORT_SYMBOL_GPL(crypto_alloc_instance); EXPORT_SYMBOL_GPL(crypto_alloc_instance);

View File

@ -68,6 +68,11 @@ static int cryptomgr_probe(void *data)
goto err; goto err;
do { do {
if (tmpl->create) {
err = tmpl->create(tmpl, param->tb);
continue;
}
inst = tmpl->alloc(param->tb); inst = tmpl->alloc(param->tb);
if (IS_ERR(inst)) if (IS_ERR(inst))
err = PTR_ERR(inst); err = PTR_ERR(inst);

View File

@ -187,7 +187,6 @@ static int _get_more_prng_bytes(struct prng_context *ctx)
/* Our exported functions */ /* Our exported functions */
static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx) static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx)
{ {
unsigned long flags;
unsigned char *ptr = buf; unsigned char *ptr = buf;
unsigned int byte_count = (unsigned int)nbytes; unsigned int byte_count = (unsigned int)nbytes;
int err; int err;
@ -196,7 +195,7 @@ static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx)
if (nbytes < 0) if (nbytes < 0)
return -EINVAL; return -EINVAL;
spin_lock_irqsave(&ctx->prng_lock, flags); spin_lock_bh(&ctx->prng_lock);
err = -EINVAL; err = -EINVAL;
if (ctx->flags & PRNG_NEED_RESET) if (ctx->flags & PRNG_NEED_RESET)
@ -268,7 +267,7 @@ empty_rbuf:
goto remainder; goto remainder;
done: done:
spin_unlock_irqrestore(&ctx->prng_lock, flags); spin_unlock_bh(&ctx->prng_lock);
dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n", dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n",
err, ctx); err, ctx);
return err; return err;
@ -284,10 +283,9 @@ static int reset_prng_context(struct prng_context *ctx,
unsigned char *V, unsigned char *DT) unsigned char *V, unsigned char *DT)
{ {
int ret; int ret;
int rc = -EINVAL;
unsigned char *prng_key; unsigned char *prng_key;
spin_lock(&ctx->prng_lock); spin_lock_bh(&ctx->prng_lock);
ctx->flags |= PRNG_NEED_RESET; ctx->flags |= PRNG_NEED_RESET;
prng_key = (key != NULL) ? key : (unsigned char *)DEFAULT_PRNG_KEY; prng_key = (key != NULL) ? key : (unsigned char *)DEFAULT_PRNG_KEY;
@ -308,34 +306,20 @@ static int reset_prng_context(struct prng_context *ctx,
memset(ctx->rand_data, 0, DEFAULT_BLK_SZ); memset(ctx->rand_data, 0, DEFAULT_BLK_SZ);
memset(ctx->last_rand_data, 0, DEFAULT_BLK_SZ); memset(ctx->last_rand_data, 0, DEFAULT_BLK_SZ);
if (ctx->tfm)
crypto_free_cipher(ctx->tfm);
ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
if (IS_ERR(ctx->tfm)) {
dbgprint(KERN_CRIT "Failed to alloc tfm for context %p\n",
ctx);
ctx->tfm = NULL;
goto out;
}
ctx->rand_data_valid = DEFAULT_BLK_SZ; ctx->rand_data_valid = DEFAULT_BLK_SZ;
ret = crypto_cipher_setkey(ctx->tfm, prng_key, klen); ret = crypto_cipher_setkey(ctx->tfm, prng_key, klen);
if (ret) { if (ret) {
dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n", dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n",
crypto_cipher_get_flags(ctx->tfm)); crypto_cipher_get_flags(ctx->tfm));
crypto_free_cipher(ctx->tfm);
goto out; goto out;
} }
rc = 0; ret = 0;
ctx->flags &= ~PRNG_NEED_RESET; ctx->flags &= ~PRNG_NEED_RESET;
out: out:
spin_unlock(&ctx->prng_lock); spin_unlock_bh(&ctx->prng_lock);
return ret;
return rc;
} }
static int cprng_init(struct crypto_tfm *tfm) static int cprng_init(struct crypto_tfm *tfm)
@ -343,6 +327,12 @@ static int cprng_init(struct crypto_tfm *tfm)
struct prng_context *ctx = crypto_tfm_ctx(tfm); struct prng_context *ctx = crypto_tfm_ctx(tfm);
spin_lock_init(&ctx->prng_lock); spin_lock_init(&ctx->prng_lock);
ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
if (IS_ERR(ctx->tfm)) {
dbgprint(KERN_CRIT "Failed to alloc tfm for context %p\n",
ctx);
return PTR_ERR(ctx->tfm);
}
if (reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL) < 0) if (reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL) < 0)
return -EINVAL; return -EINVAL;
@ -418,17 +408,10 @@ static struct crypto_alg rng_alg = {
/* Module initalization */ /* Module initalization */
static int __init prng_mod_init(void) static int __init prng_mod_init(void)
{ {
int ret = 0;
if (fips_enabled) if (fips_enabled)
rng_alg.cra_priority += 200; rng_alg.cra_priority += 200;
ret = crypto_register_alg(&rng_alg); return crypto_register_alg(&rng_alg);
if (ret)
goto out;
out:
return 0;
} }
static void __exit prng_mod_fini(void) static void __exit prng_mod_fini(void)

View File

@ -286,13 +286,6 @@ static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
case CRYPTO_ALG_TYPE_CIPHER: case CRYPTO_ALG_TYPE_CIPHER:
return crypto_init_cipher_ops(tfm); return crypto_init_cipher_ops(tfm);
case CRYPTO_ALG_TYPE_DIGEST:
if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) !=
CRYPTO_ALG_TYPE_HASH_MASK)
return crypto_init_digest_ops_async(tfm);
else
return crypto_init_digest_ops(tfm);
case CRYPTO_ALG_TYPE_COMPRESS: case CRYPTO_ALG_TYPE_COMPRESS:
return crypto_init_compress_ops(tfm); return crypto_init_compress_ops(tfm);
@ -319,10 +312,6 @@ static void crypto_exit_ops(struct crypto_tfm *tfm)
crypto_exit_cipher_ops(tfm); crypto_exit_cipher_ops(tfm);
break; break;
case CRYPTO_ALG_TYPE_DIGEST:
crypto_exit_digest_ops(tfm);
break;
case CRYPTO_ALG_TYPE_COMPRESS: case CRYPTO_ALG_TYPE_COMPRESS:
crypto_exit_compress_ops(tfm); crypto_exit_compress_ops(tfm);
break; break;
@ -350,10 +339,6 @@ static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask)
len += crypto_cipher_ctxsize(alg); len += crypto_cipher_ctxsize(alg);
break; break;
case CRYPTO_ALG_TYPE_DIGEST:
len += crypto_digest_ctxsize(alg);
break;
case CRYPTO_ALG_TYPE_COMPRESS: case CRYPTO_ALG_TYPE_COMPRESS:
len += crypto_compress_ctxsize(alg); len += crypto_compress_ctxsize(alg);
break; break;
@ -472,7 +457,7 @@ void *crypto_create_tfm(struct crypto_alg *alg,
int err = -ENOMEM; int err = -ENOMEM;
tfmsize = frontend->tfmsize; tfmsize = frontend->tfmsize;
total = tfmsize + sizeof(*tfm) + frontend->extsize(alg, frontend); total = tfmsize + sizeof(*tfm) + frontend->extsize(alg);
mem = kzalloc(total, GFP_KERNEL); mem = kzalloc(total, GFP_KERNEL);
if (mem == NULL) if (mem == NULL)
@ -481,7 +466,7 @@ void *crypto_create_tfm(struct crypto_alg *alg,
tfm = (struct crypto_tfm *)(mem + tfmsize); tfm = (struct crypto_tfm *)(mem + tfmsize);
tfm->__crt_alg = alg; tfm->__crt_alg = alg;
err = frontend->init_tfm(tfm, frontend); err = frontend->init_tfm(tfm);
if (err) if (err)
goto out_free_tfm; goto out_free_tfm;
@ -503,6 +488,27 @@ out:
} }
EXPORT_SYMBOL_GPL(crypto_create_tfm); EXPORT_SYMBOL_GPL(crypto_create_tfm);
struct crypto_alg *crypto_find_alg(const char *alg_name,
const struct crypto_type *frontend,
u32 type, u32 mask)
{
struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask) =
crypto_alg_mod_lookup;
if (frontend) {
type &= frontend->maskclear;
mask &= frontend->maskclear;
type |= frontend->type;
mask |= frontend->maskset;
if (frontend->lookup)
lookup = frontend->lookup;
}
return lookup(alg_name, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_find_alg);
/* /*
* crypto_alloc_tfm - Locate algorithm and allocate transform * crypto_alloc_tfm - Locate algorithm and allocate transform
* @alg_name: Name of algorithm * @alg_name: Name of algorithm
@ -526,21 +532,13 @@ EXPORT_SYMBOL_GPL(crypto_create_tfm);
void *crypto_alloc_tfm(const char *alg_name, void *crypto_alloc_tfm(const char *alg_name,
const struct crypto_type *frontend, u32 type, u32 mask) const struct crypto_type *frontend, u32 type, u32 mask)
{ {
struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask);
void *tfm; void *tfm;
int err; int err;
type &= frontend->maskclear;
mask &= frontend->maskclear;
type |= frontend->type;
mask |= frontend->maskset;
lookup = frontend->lookup ?: crypto_alg_mod_lookup;
for (;;) { for (;;) {
struct crypto_alg *alg; struct crypto_alg *alg;
alg = lookup(alg_name, type, mask); alg = crypto_find_alg(alg_name, frontend, type, mask);
if (IS_ERR(alg)) { if (IS_ERR(alg)) {
err = PTR_ERR(alg); err = PTR_ERR(alg);
goto err; goto err;

View File

@ -23,24 +23,36 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
typedef u8 *(*authenc_ahash_t)(struct aead_request *req, unsigned int flags);
struct authenc_instance_ctx { struct authenc_instance_ctx {
struct crypto_spawn auth; struct crypto_ahash_spawn auth;
struct crypto_skcipher_spawn enc; struct crypto_skcipher_spawn enc;
}; };
struct crypto_authenc_ctx { struct crypto_authenc_ctx {
spinlock_t auth_lock; unsigned int reqoff;
struct crypto_hash *auth; struct crypto_ahash *auth;
struct crypto_ablkcipher *enc; struct crypto_ablkcipher *enc;
}; };
struct authenc_request_ctx {
unsigned int cryptlen;
struct scatterlist *sg;
struct scatterlist asg[2];
struct scatterlist cipher[2];
crypto_completion_t complete;
crypto_completion_t update_complete;
char tail[];
};
static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
unsigned int keylen) unsigned int keylen)
{ {
unsigned int authkeylen; unsigned int authkeylen;
unsigned int enckeylen; unsigned int enckeylen;
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
struct crypto_hash *auth = ctx->auth; struct crypto_ahash *auth = ctx->auth;
struct crypto_ablkcipher *enc = ctx->enc; struct crypto_ablkcipher *enc = ctx->enc;
struct rtattr *rta = (void *)key; struct rtattr *rta = (void *)key;
struct crypto_authenc_key_param *param; struct crypto_authenc_key_param *param;
@ -64,11 +76,11 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
authkeylen = keylen - enckeylen; authkeylen = keylen - enckeylen;
crypto_hash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
crypto_hash_set_flags(auth, crypto_aead_get_flags(authenc) & crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) &
CRYPTO_TFM_REQ_MASK); CRYPTO_TFM_REQ_MASK);
err = crypto_hash_setkey(auth, key, authkeylen); err = crypto_ahash_setkey(auth, key, authkeylen);
crypto_aead_set_flags(authenc, crypto_hash_get_flags(auth) & crypto_aead_set_flags(authenc, crypto_ahash_get_flags(auth) &
CRYPTO_TFM_RES_MASK); CRYPTO_TFM_RES_MASK);
if (err) if (err)
@ -103,40 +115,198 @@ static void authenc_chain(struct scatterlist *head, struct scatterlist *sg,
sg_mark_end(head); sg_mark_end(head);
} }
static u8 *crypto_authenc_hash(struct aead_request *req, unsigned int flags, static void authenc_geniv_ahash_update_done(struct crypto_async_request *areq,
struct scatterlist *cipher, int err)
unsigned int cryptlen) {
struct aead_request *req = areq->data;
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
if (err)
goto out;
ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
areq_ctx->cryptlen);
ahash_request_set_callback(ahreq, aead_request_flags(req) &
CRYPTO_TFM_REQ_MAY_SLEEP,
areq_ctx->complete, req);
err = crypto_ahash_finup(ahreq);
if (err)
goto out;
scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
areq_ctx->cryptlen,
crypto_aead_authsize(authenc), 1);
out:
aead_request_complete(req, err);
}
static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err)
{
struct aead_request *req = areq->data;
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
if (err)
goto out;
scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
areq_ctx->cryptlen,
crypto_aead_authsize(authenc), 1);
out:
aead_request_complete(req, err);
}
static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
int err)
{
u8 *ihash;
unsigned int authsize;
struct ablkcipher_request *abreq;
struct aead_request *req = areq->data;
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
if (err)
goto out;
ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
areq_ctx->cryptlen);
ahash_request_set_callback(ahreq, aead_request_flags(req) &
CRYPTO_TFM_REQ_MAY_SLEEP,
areq_ctx->complete, req);
err = crypto_ahash_finup(ahreq);
if (err)
goto out;
authsize = crypto_aead_authsize(authenc);
ihash = ahreq->result + authsize;
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG: 0;
if (err)
goto out;
abreq = aead_request_ctx(req);
ablkcipher_request_set_tfm(abreq, ctx->enc);
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
req->base.complete, req->base.data);
ablkcipher_request_set_crypt(abreq, req->src, req->dst,
req->cryptlen, req->iv);
err = crypto_ablkcipher_decrypt(abreq);
out:
aead_request_complete(req, err);
}
static void authenc_verify_ahash_done(struct crypto_async_request *areq,
int err)
{
u8 *ihash;
unsigned int authsize;
struct ablkcipher_request *abreq;
struct aead_request *req = areq->data;
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
if (err)
goto out;
authsize = crypto_aead_authsize(authenc);
ihash = ahreq->result + authsize;
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG: 0;
if (err)
goto out;
abreq = aead_request_ctx(req);
ablkcipher_request_set_tfm(abreq, ctx->enc);
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
req->base.complete, req->base.data);
ablkcipher_request_set_crypt(abreq, req->src, req->dst,
req->cryptlen, req->iv);
err = crypto_ablkcipher_decrypt(abreq);
out:
aead_request_complete(req, err);
}
static u8 *crypto_authenc_ahash_fb(struct aead_request *req, unsigned int flags)
{ {
struct crypto_aead *authenc = crypto_aead_reqtfm(req); struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
struct crypto_hash *auth = ctx->auth; struct crypto_ahash *auth = ctx->auth;
struct hash_desc desc = { struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
.tfm = auth, struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
.flags = aead_request_flags(req) & flags, u8 *hash = areq_ctx->tail;
};
u8 *hash = aead_request_ctx(req);
int err; int err;
hash = (u8 *)ALIGN((unsigned long)hash + crypto_hash_alignmask(auth), hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
crypto_hash_alignmask(auth) + 1); crypto_ahash_alignmask(auth) + 1);
spin_lock_bh(&ctx->auth_lock); ahash_request_set_tfm(ahreq, auth);
err = crypto_hash_init(&desc);
err = crypto_ahash_init(ahreq);
if (err) if (err)
goto auth_unlock; return ERR_PTR(err);
err = crypto_hash_update(&desc, req->assoc, req->assoclen); ahash_request_set_crypt(ahreq, req->assoc, hash, req->assoclen);
ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
areq_ctx->update_complete, req);
err = crypto_ahash_update(ahreq);
if (err) if (err)
goto auth_unlock; return ERR_PTR(err);
err = crypto_hash_update(&desc, cipher, cryptlen); ahash_request_set_crypt(ahreq, areq_ctx->sg, hash,
areq_ctx->cryptlen);
ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
areq_ctx->complete, req);
err = crypto_ahash_finup(ahreq);
if (err) if (err)
goto auth_unlock; return ERR_PTR(err);
err = crypto_hash_final(&desc, hash); return hash;
auth_unlock: }
spin_unlock_bh(&ctx->auth_lock);
static u8 *crypto_authenc_ahash(struct aead_request *req, unsigned int flags)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
struct crypto_ahash *auth = ctx->auth;
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
u8 *hash = areq_ctx->tail;
int err;
hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
crypto_ahash_alignmask(auth) + 1);
ahash_request_set_tfm(ahreq, auth);
ahash_request_set_crypt(ahreq, areq_ctx->sg, hash,
areq_ctx->cryptlen);
ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
areq_ctx->complete, req);
err = crypto_ahash_digest(ahreq);
if (err) if (err)
return ERR_PTR(err); return ERR_PTR(err);
@ -147,11 +317,15 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv,
unsigned int flags) unsigned int flags)
{ {
struct crypto_aead *authenc = crypto_aead_reqtfm(req); struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct scatterlist *dst = req->dst; struct scatterlist *dst = req->dst;
struct scatterlist cipher[2]; struct scatterlist *assoc = req->assoc;
struct page *dstp; struct scatterlist *cipher = areq_ctx->cipher;
struct scatterlist *asg = areq_ctx->asg;
unsigned int ivsize = crypto_aead_ivsize(authenc); unsigned int ivsize = crypto_aead_ivsize(authenc);
unsigned int cryptlen; unsigned int cryptlen = req->cryptlen;
authenc_ahash_t authenc_ahash_fn = crypto_authenc_ahash_fb;
struct page *dstp;
u8 *vdst; u8 *vdst;
u8 *hash; u8 *hash;
@ -163,10 +337,25 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv,
sg_set_buf(cipher, iv, ivsize); sg_set_buf(cipher, iv, ivsize);
authenc_chain(cipher, dst, vdst == iv + ivsize); authenc_chain(cipher, dst, vdst == iv + ivsize);
dst = cipher; dst = cipher;
cryptlen += ivsize;
} }
cryptlen = req->cryptlen + ivsize; if (sg_is_last(assoc)) {
hash = crypto_authenc_hash(req, flags, dst, cryptlen); authenc_ahash_fn = crypto_authenc_ahash;
sg_init_table(asg, 2);
sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
authenc_chain(asg, dst, 0);
dst = asg;
cryptlen += req->assoclen;
}
areq_ctx->cryptlen = cryptlen;
areq_ctx->sg = dst;
areq_ctx->complete = authenc_geniv_ahash_done;
areq_ctx->update_complete = authenc_geniv_ahash_update_done;
hash = authenc_ahash_fn(req, flags);
if (IS_ERR(hash)) if (IS_ERR(hash))
return PTR_ERR(hash); return PTR_ERR(hash);
@ -256,22 +445,25 @@ static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req)
} }
static int crypto_authenc_verify(struct aead_request *req, static int crypto_authenc_verify(struct aead_request *req,
struct scatterlist *cipher, authenc_ahash_t authenc_ahash_fn)
unsigned int cryptlen)
{ {
struct crypto_aead *authenc = crypto_aead_reqtfm(req); struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
u8 *ohash; u8 *ohash;
u8 *ihash; u8 *ihash;
unsigned int authsize; unsigned int authsize;
ohash = crypto_authenc_hash(req, CRYPTO_TFM_REQ_MAY_SLEEP, cipher, areq_ctx->complete = authenc_verify_ahash_done;
cryptlen); areq_ctx->complete = authenc_verify_ahash_update_done;
ohash = authenc_ahash_fn(req, CRYPTO_TFM_REQ_MAY_SLEEP);
if (IS_ERR(ohash)) if (IS_ERR(ohash))
return PTR_ERR(ohash); return PTR_ERR(ohash);
authsize = crypto_aead_authsize(authenc); authsize = crypto_aead_authsize(authenc);
ihash = ohash + authsize; ihash = ohash + authsize;
scatterwalk_map_and_copy(ihash, cipher, cryptlen, authsize, 0); scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
return memcmp(ihash, ohash, authsize) ? -EBADMSG: 0; return memcmp(ihash, ohash, authsize) ? -EBADMSG: 0;
} }
@ -279,10 +471,14 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
unsigned int cryptlen) unsigned int cryptlen)
{ {
struct crypto_aead *authenc = crypto_aead_reqtfm(req); struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct scatterlist *src = req->src; struct scatterlist *src = req->src;
struct scatterlist cipher[2]; struct scatterlist *assoc = req->assoc;
struct page *srcp; struct scatterlist *cipher = areq_ctx->cipher;
struct scatterlist *asg = areq_ctx->asg;
unsigned int ivsize = crypto_aead_ivsize(authenc); unsigned int ivsize = crypto_aead_ivsize(authenc);
authenc_ahash_t authenc_ahash_fn = crypto_authenc_ahash_fb;
struct page *srcp;
u8 *vsrc; u8 *vsrc;
srcp = sg_page(src); srcp = sg_page(src);
@ -293,9 +489,22 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
sg_set_buf(cipher, iv, ivsize); sg_set_buf(cipher, iv, ivsize);
authenc_chain(cipher, src, vsrc == iv + ivsize); authenc_chain(cipher, src, vsrc == iv + ivsize);
src = cipher; src = cipher;
cryptlen += ivsize;
} }
return crypto_authenc_verify(req, src, cryptlen + ivsize); if (sg_is_last(assoc)) {
authenc_ahash_fn = crypto_authenc_ahash;
sg_init_table(asg, 2);
sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
authenc_chain(asg, src, 0);
src = asg;
cryptlen += req->assoclen;
}
areq_ctx->cryptlen = cryptlen;
areq_ctx->sg = src;
return crypto_authenc_verify(req, authenc_ahash_fn);
} }
static int crypto_authenc_decrypt(struct aead_request *req) static int crypto_authenc_decrypt(struct aead_request *req)
@ -326,38 +535,41 @@ static int crypto_authenc_decrypt(struct aead_request *req)
static int crypto_authenc_init_tfm(struct crypto_tfm *tfm) static int crypto_authenc_init_tfm(struct crypto_tfm *tfm)
{ {
struct crypto_instance *inst = (void *)tfm->__crt_alg; struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct authenc_instance_ctx *ictx = crypto_instance_ctx(inst); struct authenc_instance_ctx *ictx = crypto_instance_ctx(inst);
struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_hash *auth; struct crypto_ahash *auth;
struct crypto_ablkcipher *enc; struct crypto_ablkcipher *enc;
int err; int err;
auth = crypto_spawn_hash(&ictx->auth); auth = crypto_spawn_ahash(&ictx->auth);
if (IS_ERR(auth)) if (IS_ERR(auth))
return PTR_ERR(auth); return PTR_ERR(auth);
ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) +
crypto_ahash_alignmask(auth),
crypto_ahash_alignmask(auth) + 1);
enc = crypto_spawn_skcipher(&ictx->enc); enc = crypto_spawn_skcipher(&ictx->enc);
err = PTR_ERR(enc); err = PTR_ERR(enc);
if (IS_ERR(enc)) if (IS_ERR(enc))
goto err_free_hash; goto err_free_ahash;
ctx->auth = auth; ctx->auth = auth;
ctx->enc = enc; ctx->enc = enc;
tfm->crt_aead.reqsize = max_t(unsigned int, tfm->crt_aead.reqsize = max_t(unsigned int,
(crypto_hash_alignmask(auth) & crypto_ahash_reqsize(auth) + ctx->reqoff +
~(crypto_tfm_ctx_alignment() - 1)) + sizeof(struct authenc_request_ctx) +
crypto_hash_digestsize(auth) * 2, sizeof(struct ahash_request),
sizeof(struct skcipher_givcrypt_request) + sizeof(struct skcipher_givcrypt_request) +
crypto_ablkcipher_reqsize(enc) + crypto_ablkcipher_reqsize(enc) +
crypto_ablkcipher_ivsize(enc)); crypto_ablkcipher_ivsize(enc));
spin_lock_init(&ctx->auth_lock);
return 0; return 0;
err_free_hash: err_free_ahash:
crypto_free_hash(auth); crypto_free_ahash(auth);
return err; return err;
} }
@ -365,7 +577,7 @@ static void crypto_authenc_exit_tfm(struct crypto_tfm *tfm)
{ {
struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_hash(ctx->auth); crypto_free_ahash(ctx->auth);
crypto_free_ablkcipher(ctx->enc); crypto_free_ablkcipher(ctx->enc);
} }
@ -373,7 +585,8 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
{ {
struct crypto_attr_type *algt; struct crypto_attr_type *algt;
struct crypto_instance *inst; struct crypto_instance *inst;
struct crypto_alg *auth; struct hash_alg_common *auth;
struct crypto_alg *auth_base;
struct crypto_alg *enc; struct crypto_alg *enc;
struct authenc_instance_ctx *ctx; struct authenc_instance_ctx *ctx;
const char *enc_name; const char *enc_name;
@ -387,11 +600,13 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
auth = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
CRYPTO_ALG_TYPE_HASH_MASK); CRYPTO_ALG_TYPE_AHASH_MASK);
if (IS_ERR(auth)) if (IS_ERR(auth))
return ERR_PTR(PTR_ERR(auth)); return ERR_PTR(PTR_ERR(auth));
auth_base = &auth->base;
enc_name = crypto_attr_alg_name(tb[2]); enc_name = crypto_attr_alg_name(tb[2]);
err = PTR_ERR(enc_name); err = PTR_ERR(enc_name);
if (IS_ERR(enc_name)) if (IS_ERR(enc_name))
@ -404,7 +619,7 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
ctx = crypto_instance_ctx(inst); ctx = crypto_instance_ctx(inst);
err = crypto_init_spawn(&ctx->auth, auth, inst, CRYPTO_ALG_TYPE_MASK); err = crypto_init_ahash_spawn(&ctx->auth, auth, inst);
if (err) if (err)
goto err_free_inst; goto err_free_inst;
@ -419,28 +634,25 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
err = -ENAMETOOLONG; err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
"authenc(%s,%s)", auth->cra_name, enc->cra_name) >= "authenc(%s,%s)", auth_base->cra_name, enc->cra_name) >=
CRYPTO_MAX_ALG_NAME) CRYPTO_MAX_ALG_NAME)
goto err_drop_enc; goto err_drop_enc;
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"authenc(%s,%s)", auth->cra_driver_name, "authenc(%s,%s)", auth_base->cra_driver_name,
enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_drop_enc; goto err_drop_enc;
inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC; inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.cra_priority = enc->cra_priority * 10 + auth->cra_priority; inst->alg.cra_priority = enc->cra_priority *
10 + auth_base->cra_priority;
inst->alg.cra_blocksize = enc->cra_blocksize; inst->alg.cra_blocksize = enc->cra_blocksize;
inst->alg.cra_alignmask = auth->cra_alignmask | enc->cra_alignmask; inst->alg.cra_alignmask = auth_base->cra_alignmask | enc->cra_alignmask;
inst->alg.cra_type = &crypto_aead_type; inst->alg.cra_type = &crypto_aead_type;
inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize; inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize;
inst->alg.cra_aead.maxauthsize = auth->cra_type == &crypto_hash_type ? inst->alg.cra_aead.maxauthsize = auth->digestsize;
auth->cra_hash.digestsize :
auth->cra_type ?
__crypto_shash_alg(auth)->digestsize :
auth->cra_digest.dia_digestsize;
inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_ctx); inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_ctx);
@ -453,13 +665,13 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
inst->alg.cra_aead.givencrypt = crypto_authenc_givencrypt; inst->alg.cra_aead.givencrypt = crypto_authenc_givencrypt;
out: out:
crypto_mod_put(auth); crypto_mod_put(auth_base);
return inst; return inst;
err_drop_enc: err_drop_enc:
crypto_drop_skcipher(&ctx->enc); crypto_drop_skcipher(&ctx->enc);
err_drop_auth: err_drop_auth:
crypto_drop_spawn(&ctx->auth); crypto_drop_ahash(&ctx->auth);
err_free_inst: err_free_inst:
kfree(inst); kfree(inst);
out_put_auth: out_put_auth:
@ -472,7 +684,7 @@ static void crypto_authenc_free(struct crypto_instance *inst)
struct authenc_instance_ctx *ctx = crypto_instance_ctx(inst); struct authenc_instance_ctx *ctx = crypto_instance_ctx(inst);
crypto_drop_skcipher(&ctx->enc); crypto_drop_skcipher(&ctx->enc);
crypto_drop_spawn(&ctx->auth); crypto_drop_ahash(&ctx->auth);
kfree(inst); kfree(inst);
} }

View File

@ -39,6 +39,11 @@ struct cryptd_instance_ctx {
struct cryptd_queue *queue; struct cryptd_queue *queue;
}; };
struct hashd_instance_ctx {
struct crypto_shash_spawn spawn;
struct cryptd_queue *queue;
};
struct cryptd_blkcipher_ctx { struct cryptd_blkcipher_ctx {
struct crypto_blkcipher *child; struct crypto_blkcipher *child;
}; };
@ -48,11 +53,12 @@ struct cryptd_blkcipher_request_ctx {
}; };
struct cryptd_hash_ctx { struct cryptd_hash_ctx {
struct crypto_hash *child; struct crypto_shash *child;
}; };
struct cryptd_hash_request_ctx { struct cryptd_hash_request_ctx {
crypto_completion_t complete; crypto_completion_t complete;
struct shash_desc desc;
}; };
static void cryptd_queue_worker(struct work_struct *work); static void cryptd_queue_worker(struct work_struct *work);
@ -249,32 +255,24 @@ static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
crypto_free_blkcipher(ctx->child); crypto_free_blkcipher(ctx->child);
} }
static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
struct cryptd_queue *queue) unsigned int tail)
{ {
char *p;
struct crypto_instance *inst; struct crypto_instance *inst;
struct cryptd_instance_ctx *ctx;
int err; int err;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
if (!inst) { if (!p)
inst = ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
goto out;
} inst = (void *)(p + head);
err = -ENAMETOOLONG; err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto out_free_inst; goto out_free_inst;
ctx = crypto_instance_ctx(inst);
err = crypto_init_spawn(&ctx->spawn, alg, inst,
CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
if (err)
goto out_free_inst;
ctx->queue = queue;
memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
inst->alg.cra_priority = alg->cra_priority + 50; inst->alg.cra_priority = alg->cra_priority + 50;
@ -282,29 +280,41 @@ static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg,
inst->alg.cra_alignmask = alg->cra_alignmask; inst->alg.cra_alignmask = alg->cra_alignmask;
out: out:
return inst; return p;
out_free_inst: out_free_inst:
kfree(inst); kfree(p);
inst = ERR_PTR(err); p = ERR_PTR(err);
goto out; goto out;
} }
static struct crypto_instance *cryptd_alloc_blkcipher( static int cryptd_create_blkcipher(struct crypto_template *tmpl,
struct rtattr **tb, struct cryptd_queue *queue) struct rtattr **tb,
struct cryptd_queue *queue)
{ {
struct cryptd_instance_ctx *ctx;
struct crypto_instance *inst; struct crypto_instance *inst;
struct crypto_alg *alg; struct crypto_alg *alg;
int err;
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
CRYPTO_ALG_TYPE_MASK); CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg)) if (IS_ERR(alg))
return ERR_CAST(alg); return PTR_ERR(alg);
inst = cryptd_alloc_instance(alg, queue); inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
err = PTR_ERR(inst);
if (IS_ERR(inst)) if (IS_ERR(inst))
goto out_put_alg; goto out_put_alg;
ctx = crypto_instance_ctx(inst);
ctx->queue = queue;
err = crypto_init_spawn(&ctx->spawn, alg, inst,
CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
if (err)
goto out_free_inst;
inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
inst->alg.cra_type = &crypto_ablkcipher_type; inst->alg.cra_type = &crypto_ablkcipher_type;
@ -323,26 +333,34 @@ static struct crypto_instance *cryptd_alloc_blkcipher(
inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
err = crypto_register_instance(tmpl, inst);
if (err) {
crypto_drop_spawn(&ctx->spawn);
out_free_inst:
kfree(inst);
}
out_put_alg: out_put_alg:
crypto_mod_put(alg); crypto_mod_put(alg);
return inst; return err;
} }
static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
{ {
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
struct crypto_spawn *spawn = &ictx->spawn; struct crypto_shash_spawn *spawn = &ictx->spawn;
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_hash *cipher; struct crypto_shash *hash;
cipher = crypto_spawn_hash(spawn); hash = crypto_spawn_shash(spawn);
if (IS_ERR(cipher)) if (IS_ERR(hash))
return PTR_ERR(cipher); return PTR_ERR(hash);
ctx->child = cipher; ctx->child = hash;
tfm->crt_ahash.reqsize = crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct cryptd_hash_request_ctx); sizeof(struct cryptd_hash_request_ctx) +
crypto_shash_descsize(hash));
return 0; return 0;
} }
@ -350,21 +368,21 @@ static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
{ {
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_hash(ctx->child); crypto_free_shash(ctx->child);
} }
static int cryptd_hash_setkey(struct crypto_ahash *parent, static int cryptd_hash_setkey(struct crypto_ahash *parent,
const u8 *key, unsigned int keylen) const u8 *key, unsigned int keylen)
{ {
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
struct crypto_hash *child = ctx->child; struct crypto_shash *child = ctx->child;
int err; int err;
crypto_hash_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_hash_set_flags(child, crypto_ahash_get_flags(parent) & crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
CRYPTO_TFM_REQ_MASK); CRYPTO_TFM_REQ_MASK);
err = crypto_hash_setkey(child, key, keylen); err = crypto_shash_setkey(child, key, keylen);
crypto_ahash_set_flags(parent, crypto_hash_get_flags(child) & crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
CRYPTO_TFM_RES_MASK); CRYPTO_TFM_RES_MASK);
return err; return err;
} }
@ -386,20 +404,18 @@ static int cryptd_hash_enqueue(struct ahash_request *req,
static void cryptd_hash_init(struct crypto_async_request *req_async, int err) static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
{ {
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
struct crypto_hash *child = ctx->child; struct crypto_shash *child = ctx->child;
struct ahash_request *req = ahash_request_cast(req_async); struct ahash_request *req = ahash_request_cast(req_async);
struct cryptd_hash_request_ctx *rctx; struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
struct hash_desc desc; struct shash_desc *desc = &rctx->desc;
rctx = ahash_request_ctx(req);
if (unlikely(err == -EINPROGRESS)) if (unlikely(err == -EINPROGRESS))
goto out; goto out;
desc.tfm = child; desc->tfm = child;
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_hash_crt(child)->init(&desc); err = crypto_shash_init(desc);
req->base.complete = rctx->complete; req->base.complete = rctx->complete;
@ -416,23 +432,15 @@ static int cryptd_hash_init_enqueue(struct ahash_request *req)
static void cryptd_hash_update(struct crypto_async_request *req_async, int err) static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
{ {
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
struct crypto_hash *child = ctx->child;
struct ahash_request *req = ahash_request_cast(req_async); struct ahash_request *req = ahash_request_cast(req_async);
struct cryptd_hash_request_ctx *rctx; struct cryptd_hash_request_ctx *rctx;
struct hash_desc desc;
rctx = ahash_request_ctx(req); rctx = ahash_request_ctx(req);
if (unlikely(err == -EINPROGRESS)) if (unlikely(err == -EINPROGRESS))
goto out; goto out;
desc.tfm = child; err = shash_ahash_update(req, &rctx->desc);
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_hash_crt(child)->update(&desc,
req->src,
req->nbytes);
req->base.complete = rctx->complete; req->base.complete = rctx->complete;
@ -449,21 +457,13 @@ static int cryptd_hash_update_enqueue(struct ahash_request *req)
static void cryptd_hash_final(struct crypto_async_request *req_async, int err) static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
{ {
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
struct crypto_hash *child = ctx->child;
struct ahash_request *req = ahash_request_cast(req_async); struct ahash_request *req = ahash_request_cast(req_async);
struct cryptd_hash_request_ctx *rctx; struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
struct hash_desc desc;
rctx = ahash_request_ctx(req);
if (unlikely(err == -EINPROGRESS)) if (unlikely(err == -EINPROGRESS))
goto out; goto out;
desc.tfm = child; err = crypto_shash_final(&rctx->desc, req->result);
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_hash_crt(child)->final(&desc, req->result);
req->base.complete = rctx->complete; req->base.complete = rctx->complete;
@ -478,26 +478,44 @@ static int cryptd_hash_final_enqueue(struct ahash_request *req)
return cryptd_hash_enqueue(req, cryptd_hash_final); return cryptd_hash_enqueue(req, cryptd_hash_final);
} }
static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
{ {
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
struct crypto_hash *child = ctx->child;
struct ahash_request *req = ahash_request_cast(req_async); struct ahash_request *req = ahash_request_cast(req_async);
struct cryptd_hash_request_ctx *rctx; struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
struct hash_desc desc;
rctx = ahash_request_ctx(req);
if (unlikely(err == -EINPROGRESS)) if (unlikely(err == -EINPROGRESS))
goto out; goto out;
desc.tfm = child; err = shash_ahash_finup(req, &rctx->desc);
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_hash_crt(child)->digest(&desc, req->base.complete = rctx->complete;
req->src,
req->nbytes, out:
req->result); local_bh_disable();
rctx->complete(&req->base, err);
local_bh_enable();
}
static int cryptd_hash_finup_enqueue(struct ahash_request *req)
{
return cryptd_hash_enqueue(req, cryptd_hash_finup);
}
static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
{
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
struct crypto_shash *child = ctx->child;
struct ahash_request *req = ahash_request_cast(req_async);
struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
struct shash_desc *desc = &rctx->desc;
if (unlikely(err == -EINPROGRESS))
goto out;
desc->tfm = child;
desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = shash_ahash_digest(req, desc);
req->base.complete = rctx->complete; req->base.complete = rctx->complete;
@ -512,64 +530,108 @@ static int cryptd_hash_digest_enqueue(struct ahash_request *req)
return cryptd_hash_enqueue(req, cryptd_hash_digest); return cryptd_hash_enqueue(req, cryptd_hash_digest);
} }
static struct crypto_instance *cryptd_alloc_hash( static int cryptd_hash_export(struct ahash_request *req, void *out)
struct rtattr **tb, struct cryptd_queue *queue)
{ {
struct crypto_instance *inst; struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
return crypto_shash_export(&rctx->desc, out);
}
static int cryptd_hash_import(struct ahash_request *req, const void *in)
{
struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
return crypto_shash_import(&rctx->desc, in);
}
static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
struct cryptd_queue *queue)
{
struct hashd_instance_ctx *ctx;
struct ahash_instance *inst;
struct shash_alg *salg;
struct crypto_alg *alg; struct crypto_alg *alg;
int err;
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH, salg = shash_attr_alg(tb[1], 0, 0);
CRYPTO_ALG_TYPE_HASH_MASK); if (IS_ERR(salg))
if (IS_ERR(alg)) return PTR_ERR(salg);
return ERR_PTR(PTR_ERR(alg));
inst = cryptd_alloc_instance(alg, queue); alg = &salg->base;
inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
sizeof(*ctx));
err = PTR_ERR(inst);
if (IS_ERR(inst)) if (IS_ERR(inst))
goto out_put_alg; goto out_put_alg;
inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC; ctx = ahash_instance_ctx(inst);
inst->alg.cra_type = &crypto_ahash_type; ctx->queue = queue;
inst->alg.cra_ahash.digestsize = alg->cra_hash.digestsize; err = crypto_init_shash_spawn(&ctx->spawn, salg,
inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx); ahash_crypto_instance(inst));
if (err)
goto out_free_inst;
inst->alg.cra_init = cryptd_hash_init_tfm; inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC;
inst->alg.cra_exit = cryptd_hash_exit_tfm;
inst->alg.cra_ahash.init = cryptd_hash_init_enqueue; inst->alg.halg.digestsize = salg->digestsize;
inst->alg.cra_ahash.update = cryptd_hash_update_enqueue; inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
inst->alg.cra_ahash.final = cryptd_hash_final_enqueue;
inst->alg.cra_ahash.setkey = cryptd_hash_setkey; inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue; inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
inst->alg.init = cryptd_hash_init_enqueue;
inst->alg.update = cryptd_hash_update_enqueue;
inst->alg.final = cryptd_hash_final_enqueue;
inst->alg.finup = cryptd_hash_finup_enqueue;
inst->alg.export = cryptd_hash_export;
inst->alg.import = cryptd_hash_import;
inst->alg.setkey = cryptd_hash_setkey;
inst->alg.digest = cryptd_hash_digest_enqueue;
err = ahash_register_instance(tmpl, inst);
if (err) {
crypto_drop_shash(&ctx->spawn);
out_free_inst:
kfree(inst);
}
out_put_alg: out_put_alg:
crypto_mod_put(alg); crypto_mod_put(alg);
return inst; return err;
} }
static struct cryptd_queue queue; static struct cryptd_queue queue;
static struct crypto_instance *cryptd_alloc(struct rtattr **tb) static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
{ {
struct crypto_attr_type *algt; struct crypto_attr_type *algt;
algt = crypto_get_attr_type(tb); algt = crypto_get_attr_type(tb);
if (IS_ERR(algt)) if (IS_ERR(algt))
return ERR_CAST(algt); return PTR_ERR(algt);
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_BLKCIPHER: case CRYPTO_ALG_TYPE_BLKCIPHER:
return cryptd_alloc_blkcipher(tb, &queue); return cryptd_create_blkcipher(tmpl, tb, &queue);
case CRYPTO_ALG_TYPE_DIGEST: case CRYPTO_ALG_TYPE_DIGEST:
return cryptd_alloc_hash(tb, &queue); return cryptd_create_hash(tmpl, tb, &queue);
} }
return ERR_PTR(-EINVAL); return -EINVAL;
} }
static void cryptd_free(struct crypto_instance *inst) static void cryptd_free(struct crypto_instance *inst)
{ {
struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_AHASH:
crypto_drop_shash(&hctx->spawn);
kfree(ahash_instance(inst));
return;
}
crypto_drop_spawn(&ctx->spawn); crypto_drop_spawn(&ctx->spawn);
kfree(inst); kfree(inst);
@ -577,7 +639,7 @@ static void cryptd_free(struct crypto_instance *inst)
static struct crypto_template cryptd_tmpl = { static struct crypto_template cryptd_tmpl = {
.name = "cryptd", .name = "cryptd",
.alloc = cryptd_alloc, .create = cryptd_create,
.free = cryptd_free, .free = cryptd_free,
.module = THIS_MODULE, .module = THIS_MODULE,
}; };
@ -620,6 +682,41 @@ void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
} }
EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
u32 type, u32 mask)
{
char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
struct crypto_ahash *tfm;
if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
"cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
return ERR_PTR(-EINVAL);
tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
if (IS_ERR(tfm))
return ERR_CAST(tfm);
if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
crypto_free_ahash(tfm);
return ERR_PTR(-EINVAL);
}
return __cryptd_ahash_cast(tfm);
}
EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
{
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
return ctx->child;
}
EXPORT_SYMBOL_GPL(cryptd_ahash_child);
void cryptd_free_ahash(struct cryptd_ahash *tfm)
{
crypto_free_ahash(&tfm->base);
}
EXPORT_SYMBOL_GPL(cryptd_free_ahash);
static int __init cryptd_init(void) static int __init cryptd_init(void)
{ {
int err; int err;

View File

@ -219,6 +219,8 @@ static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb)
inst->alg.cra_blkcipher.encrypt = crypto_ctr_crypt; inst->alg.cra_blkcipher.encrypt = crypto_ctr_crypt;
inst->alg.cra_blkcipher.decrypt = crypto_ctr_crypt; inst->alg.cra_blkcipher.decrypt = crypto_ctr_crypt;
inst->alg.cra_blkcipher.geniv = "chainiv";
out: out:
crypto_mod_put(alg); crypto_mod_put(alg);
return inst; return inst;

View File

@ -11,7 +11,10 @@
#include <crypto/gf128mul.h> #include <crypto/gf128mul.h>
#include <crypto/internal/aead.h> #include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h> #include <crypto/internal/skcipher.h>
#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h> #include <crypto/scatterwalk.h>
#include <crypto/hash.h>
#include "internal.h"
#include <linux/completion.h> #include <linux/completion.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/init.h> #include <linux/init.h>
@ -21,11 +24,12 @@
struct gcm_instance_ctx { struct gcm_instance_ctx {
struct crypto_skcipher_spawn ctr; struct crypto_skcipher_spawn ctr;
struct crypto_ahash_spawn ghash;
}; };
struct crypto_gcm_ctx { struct crypto_gcm_ctx {
struct crypto_ablkcipher *ctr; struct crypto_ablkcipher *ctr;
struct gf128mul_4k *gf128; struct crypto_ahash *ghash;
}; };
struct crypto_rfc4106_ctx { struct crypto_rfc4106_ctx {
@ -34,10 +38,9 @@ struct crypto_rfc4106_ctx {
}; };
struct crypto_gcm_ghash_ctx { struct crypto_gcm_ghash_ctx {
u32 bytes; unsigned int cryptlen;
u32 flags; struct scatterlist *src;
struct gf128mul_4k *gf128; crypto_completion_t complete;
u8 buffer[16];
}; };
struct crypto_gcm_req_priv_ctx { struct crypto_gcm_req_priv_ctx {
@ -45,8 +48,11 @@ struct crypto_gcm_req_priv_ctx {
u8 iauth_tag[16]; u8 iauth_tag[16];
struct scatterlist src[2]; struct scatterlist src[2];
struct scatterlist dst[2]; struct scatterlist dst[2];
struct crypto_gcm_ghash_ctx ghash; struct crypto_gcm_ghash_ctx ghash_ctx;
union {
struct ahash_request ahreq;
struct ablkcipher_request abreq; struct ablkcipher_request abreq;
} u;
}; };
struct crypto_gcm_setkey_result { struct crypto_gcm_setkey_result {
@ -54,6 +60,8 @@ struct crypto_gcm_setkey_result {
struct completion completion; struct completion completion;
}; };
static void *gcm_zeroes;
static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx( static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx(
struct aead_request *req) struct aead_request *req)
{ {
@ -62,113 +70,6 @@ static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx(
return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1);
} }
static void crypto_gcm_ghash_init(struct crypto_gcm_ghash_ctx *ctx, u32 flags,
struct gf128mul_4k *gf128)
{
ctx->bytes = 0;
ctx->flags = flags;
ctx->gf128 = gf128;
memset(ctx->buffer, 0, 16);
}
static void crypto_gcm_ghash_update(struct crypto_gcm_ghash_ctx *ctx,
const u8 *src, unsigned int srclen)
{
u8 *dst = ctx->buffer;
if (ctx->bytes) {
int n = min(srclen, ctx->bytes);
u8 *pos = dst + (16 - ctx->bytes);
ctx->bytes -= n;
srclen -= n;
while (n--)
*pos++ ^= *src++;
if (!ctx->bytes)
gf128mul_4k_lle((be128 *)dst, ctx->gf128);
}
while (srclen >= 16) {
crypto_xor(dst, src, 16);
gf128mul_4k_lle((be128 *)dst, ctx->gf128);
src += 16;
srclen -= 16;
}
if (srclen) {
ctx->bytes = 16 - srclen;
while (srclen--)
*dst++ ^= *src++;
}
}
static void crypto_gcm_ghash_update_sg(struct crypto_gcm_ghash_ctx *ctx,
struct scatterlist *sg, int len)
{
struct scatter_walk walk;
u8 *src;
int n;
if (!len)
return;
scatterwalk_start(&walk, sg);
while (len) {
n = scatterwalk_clamp(&walk, len);
if (!n) {
scatterwalk_start(&walk, scatterwalk_sg_next(walk.sg));
n = scatterwalk_clamp(&walk, len);
}
src = scatterwalk_map(&walk, 0);
crypto_gcm_ghash_update(ctx, src, n);
len -= n;
scatterwalk_unmap(src, 0);
scatterwalk_advance(&walk, n);
scatterwalk_done(&walk, 0, len);
if (len)
crypto_yield(ctx->flags);
}
}
static void crypto_gcm_ghash_flush(struct crypto_gcm_ghash_ctx *ctx)
{
u8 *dst = ctx->buffer;
if (ctx->bytes) {
u8 *tmp = dst + (16 - ctx->bytes);
while (ctx->bytes--)
*tmp++ ^= 0;
gf128mul_4k_lle((be128 *)dst, ctx->gf128);
}
ctx->bytes = 0;
}
static void crypto_gcm_ghash_final_xor(struct crypto_gcm_ghash_ctx *ctx,
unsigned int authlen,
unsigned int cryptlen, u8 *dst)
{
u8 *buf = ctx->buffer;
u128 lengths;
lengths.a = cpu_to_be64(authlen * 8);
lengths.b = cpu_to_be64(cryptlen * 8);
crypto_gcm_ghash_flush(ctx);
crypto_xor(buf, (u8 *)&lengths, 16);
gf128mul_4k_lle((be128 *)buf, ctx->gf128);
crypto_xor(dst, buf, 16);
}
static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err) static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err)
{ {
struct crypto_gcm_setkey_result *result = req->data; struct crypto_gcm_setkey_result *result = req->data;
@ -184,6 +85,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen) unsigned int keylen)
{ {
struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead);
struct crypto_ahash *ghash = ctx->ghash;
struct crypto_ablkcipher *ctr = ctx->ctr; struct crypto_ablkcipher *ctr = ctx->ctr;
struct { struct {
be128 hash; be128 hash;
@ -233,13 +135,12 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
if (err) if (err)
goto out; goto out;
if (ctx->gf128 != NULL) crypto_ahash_clear_flags(ghash, CRYPTO_TFM_REQ_MASK);
gf128mul_free_4k(ctx->gf128); crypto_ahash_set_flags(ghash, crypto_aead_get_flags(aead) &
CRYPTO_TFM_REQ_MASK);
ctx->gf128 = gf128mul_init_4k_lle(&data->hash); err = crypto_ahash_setkey(ghash, (u8 *)&data->hash, sizeof(be128));
crypto_aead_set_flags(aead, crypto_ahash_get_flags(ghash) &
if (ctx->gf128 == NULL) CRYPTO_TFM_RES_MASK);
err = -ENOMEM;
out: out:
kfree(data); kfree(data);
@ -272,8 +173,6 @@ static void crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req,
struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead);
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
u32 flags = req->base.tfm->crt_flags;
struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash;
struct scatterlist *dst; struct scatterlist *dst;
__be32 counter = cpu_to_be32(1); __be32 counter = cpu_to_be32(1);
@ -296,108 +195,398 @@ static void crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req,
ablkcipher_request_set_crypt(ablk_req, pctx->src, dst, ablkcipher_request_set_crypt(ablk_req, pctx->src, dst,
cryptlen + sizeof(pctx->auth_tag), cryptlen + sizeof(pctx->auth_tag),
req->iv); req->iv);
crypto_gcm_ghash_init(ghash, flags, ctx->gf128);
crypto_gcm_ghash_update_sg(ghash, req->assoc, req->assoclen);
crypto_gcm_ghash_flush(ghash);
} }
static int crypto_gcm_hash(struct aead_request *req) static inline unsigned int gcm_remain(unsigned int len)
{ {
struct crypto_aead *aead = crypto_aead_reqtfm(req); len &= 0xfU;
return len ? 16 - len : 0;
}
static void gcm_hash_len_done(struct crypto_async_request *areq, int err);
static void gcm_hash_final_done(struct crypto_async_request *areq, int err);
static int gcm_hash_update(struct aead_request *req,
struct crypto_gcm_req_priv_ctx *pctx,
crypto_completion_t complete,
struct scatterlist *src,
unsigned int len)
{
struct ahash_request *ahreq = &pctx->u.ahreq;
ahash_request_set_callback(ahreq, aead_request_flags(req),
complete, req);
ahash_request_set_crypt(ahreq, src, NULL, len);
return crypto_ahash_update(ahreq);
}
static int gcm_hash_remain(struct aead_request *req,
struct crypto_gcm_req_priv_ctx *pctx,
unsigned int remain,
crypto_completion_t complete)
{
struct ahash_request *ahreq = &pctx->u.ahreq;
ahash_request_set_callback(ahreq, aead_request_flags(req),
complete, req);
sg_init_one(pctx->src, gcm_zeroes, remain);
ahash_request_set_crypt(ahreq, pctx->src, NULL, remain);
return crypto_ahash_update(ahreq);
}
static int gcm_hash_len(struct aead_request *req,
struct crypto_gcm_req_priv_ctx *pctx)
{
struct ahash_request *ahreq = &pctx->u.ahreq;
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
u128 lengths;
lengths.a = cpu_to_be64(req->assoclen * 8);
lengths.b = cpu_to_be64(gctx->cryptlen * 8);
memcpy(pctx->iauth_tag, &lengths, 16);
sg_init_one(pctx->src, pctx->iauth_tag, 16);
ahash_request_set_callback(ahreq, aead_request_flags(req),
gcm_hash_len_done, req);
ahash_request_set_crypt(ahreq, pctx->src,
NULL, sizeof(lengths));
return crypto_ahash_update(ahreq);
}
static int gcm_hash_final(struct aead_request *req,
struct crypto_gcm_req_priv_ctx *pctx)
{
struct ahash_request *ahreq = &pctx->u.ahreq;
ahash_request_set_callback(ahreq, aead_request_flags(req),
gcm_hash_final_done, req);
ahash_request_set_crypt(ahreq, NULL, pctx->iauth_tag, 0);
return crypto_ahash_final(ahreq);
}
static void gcm_hash_final_done(struct crypto_async_request *areq,
int err)
{
struct aead_request *req = areq->data;
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
u8 *auth_tag = pctx->auth_tag; struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash;
crypto_gcm_ghash_update_sg(ghash, req->dst, req->cryptlen); if (!err)
crypto_gcm_ghash_final_xor(ghash, req->assoclen, req->cryptlen, crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16);
auth_tag);
gctx->complete(areq, err);
}
static void gcm_hash_len_done(struct crypto_async_request *areq,
int err)
{
struct aead_request *req = areq->data;
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
if (!err) {
err = gcm_hash_final(req, pctx);
if (err == -EINPROGRESS || err == -EBUSY)
return;
}
gcm_hash_final_done(areq, err);
}
static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq,
int err)
{
struct aead_request *req = areq->data;
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
if (!err) {
err = gcm_hash_len(req, pctx);
if (err == -EINPROGRESS || err == -EBUSY)
return;
}
gcm_hash_len_done(areq, err);
}
static void gcm_hash_crypt_done(struct crypto_async_request *areq,
int err)
{
struct aead_request *req = areq->data;
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
unsigned int remain;
if (!err) {
remain = gcm_remain(gctx->cryptlen);
BUG_ON(!remain);
err = gcm_hash_remain(req, pctx, remain,
gcm_hash_crypt_remain_done);
if (err == -EINPROGRESS || err == -EBUSY)
return;
}
gcm_hash_crypt_remain_done(areq, err);
}
static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq,
int err)
{
struct aead_request *req = areq->data;
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
crypto_completion_t complete;
unsigned int remain = 0;
if (!err && gctx->cryptlen) {
remain = gcm_remain(gctx->cryptlen);
complete = remain ? gcm_hash_crypt_done :
gcm_hash_crypt_remain_done;
err = gcm_hash_update(req, pctx, complete,
gctx->src, gctx->cryptlen);
if (err == -EINPROGRESS || err == -EBUSY)
return;
}
if (remain)
gcm_hash_crypt_done(areq, err);
else
gcm_hash_crypt_remain_done(areq, err);
}
static void gcm_hash_assoc_done(struct crypto_async_request *areq,
int err)
{
struct aead_request *req = areq->data;
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
unsigned int remain;
if (!err) {
remain = gcm_remain(req->assoclen);
BUG_ON(!remain);
err = gcm_hash_remain(req, pctx, remain,
gcm_hash_assoc_remain_done);
if (err == -EINPROGRESS || err == -EBUSY)
return;
}
gcm_hash_assoc_remain_done(areq, err);
}
static void gcm_hash_init_done(struct crypto_async_request *areq,
int err)
{
struct aead_request *req = areq->data;
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
crypto_completion_t complete;
unsigned int remain = 0;
if (!err && req->assoclen) {
remain = gcm_remain(req->assoclen);
complete = remain ? gcm_hash_assoc_done :
gcm_hash_assoc_remain_done;
err = gcm_hash_update(req, pctx, complete,
req->assoc, req->assoclen);
if (err == -EINPROGRESS || err == -EBUSY)
return;
}
if (remain)
gcm_hash_assoc_done(areq, err);
else
gcm_hash_assoc_remain_done(areq, err);
}
static int gcm_hash(struct aead_request *req,
struct crypto_gcm_req_priv_ctx *pctx)
{
struct ahash_request *ahreq = &pctx->u.ahreq;
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
unsigned int remain;
crypto_completion_t complete;
int err;
ahash_request_set_tfm(ahreq, ctx->ghash);
ahash_request_set_callback(ahreq, aead_request_flags(req),
gcm_hash_init_done, req);
err = crypto_ahash_init(ahreq);
if (err)
return err;
remain = gcm_remain(req->assoclen);
complete = remain ? gcm_hash_assoc_done : gcm_hash_assoc_remain_done;
err = gcm_hash_update(req, pctx, complete, req->assoc, req->assoclen);
if (err)
return err;
if (remain) {
err = gcm_hash_remain(req, pctx, remain,
gcm_hash_assoc_remain_done);
if (err)
return err;
}
remain = gcm_remain(gctx->cryptlen);
complete = remain ? gcm_hash_crypt_done : gcm_hash_crypt_remain_done;
err = gcm_hash_update(req, pctx, complete, gctx->src, gctx->cryptlen);
if (err)
return err;
if (remain) {
err = gcm_hash_remain(req, pctx, remain,
gcm_hash_crypt_remain_done);
if (err)
return err;
}
err = gcm_hash_len(req, pctx);
if (err)
return err;
err = gcm_hash_final(req, pctx);
if (err)
return err;
scatterwalk_map_and_copy(auth_tag, req->dst, req->cryptlen,
crypto_aead_authsize(aead), 1);
return 0; return 0;
} }
static void crypto_gcm_encrypt_done(struct crypto_async_request *areq, int err) static void gcm_enc_copy_hash(struct aead_request *req,
struct crypto_gcm_req_priv_ctx *pctx)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
u8 *auth_tag = pctx->auth_tag;
scatterwalk_map_and_copy(auth_tag, req->dst, req->cryptlen,
crypto_aead_authsize(aead), 1);
}
static void gcm_enc_hash_done(struct crypto_async_request *areq,
int err)
{ {
struct aead_request *req = areq->data; struct aead_request *req = areq->data;
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
if (!err) if (!err)
err = crypto_gcm_hash(req); gcm_enc_copy_hash(req, pctx);
aead_request_complete(req, err); aead_request_complete(req, err);
} }
static void gcm_encrypt_done(struct crypto_async_request *areq,
int err)
{
struct aead_request *req = areq->data;
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
if (!err) {
err = gcm_hash(req, pctx);
if (err == -EINPROGRESS || err == -EBUSY)
return;
}
gcm_enc_hash_done(areq, err);
}
static int crypto_gcm_encrypt(struct aead_request *req) static int crypto_gcm_encrypt(struct aead_request *req)
{ {
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct ablkcipher_request *abreq = &pctx->abreq; struct ablkcipher_request *abreq = &pctx->u.abreq;
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
int err; int err;
crypto_gcm_init_crypt(abreq, req, req->cryptlen); crypto_gcm_init_crypt(abreq, req, req->cryptlen);
ablkcipher_request_set_callback(abreq, aead_request_flags(req), ablkcipher_request_set_callback(abreq, aead_request_flags(req),
crypto_gcm_encrypt_done, req); gcm_encrypt_done, req);
gctx->src = req->dst;
gctx->cryptlen = req->cryptlen;
gctx->complete = gcm_enc_hash_done;
err = crypto_ablkcipher_encrypt(abreq); err = crypto_ablkcipher_encrypt(abreq);
if (err) if (err)
return err; return err;
return crypto_gcm_hash(req); err = gcm_hash(req, pctx);
if (err)
return err;
crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16);
gcm_enc_copy_hash(req, pctx);
return 0;
} }
static int crypto_gcm_verify(struct aead_request *req) static int crypto_gcm_verify(struct aead_request *req,
struct crypto_gcm_req_priv_ctx *pctx)
{ {
struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash;
u8 *auth_tag = pctx->auth_tag; u8 *auth_tag = pctx->auth_tag;
u8 *iauth_tag = pctx->iauth_tag; u8 *iauth_tag = pctx->iauth_tag;
unsigned int authsize = crypto_aead_authsize(aead); unsigned int authsize = crypto_aead_authsize(aead);
unsigned int cryptlen = req->cryptlen - authsize; unsigned int cryptlen = req->cryptlen - authsize;
crypto_gcm_ghash_final_xor(ghash, req->assoclen, cryptlen, auth_tag); crypto_xor(auth_tag, iauth_tag, 16);
authsize = crypto_aead_authsize(aead);
scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0); scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0);
return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0; return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0;
} }
static void crypto_gcm_decrypt_done(struct crypto_async_request *areq, int err) static void gcm_decrypt_done(struct crypto_async_request *areq, int err)
{ {
struct aead_request *req = areq->data; struct aead_request *req = areq->data;
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
if (!err) if (!err)
err = crypto_gcm_verify(req); err = crypto_gcm_verify(req, pctx);
aead_request_complete(req, err); aead_request_complete(req, err);
} }
static void gcm_dec_hash_done(struct crypto_async_request *areq, int err)
{
struct aead_request *req = areq->data;
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct ablkcipher_request *abreq = &pctx->u.abreq;
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
if (!err) {
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
gcm_decrypt_done, req);
crypto_gcm_init_crypt(abreq, req, gctx->cryptlen);
err = crypto_ablkcipher_decrypt(abreq);
if (err == -EINPROGRESS || err == -EBUSY)
return;
}
gcm_decrypt_done(areq, err);
}
static int crypto_gcm_decrypt(struct aead_request *req) static int crypto_gcm_decrypt(struct aead_request *req)
{ {
struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct ablkcipher_request *abreq = &pctx->abreq; struct ablkcipher_request *abreq = &pctx->u.abreq;
struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash; struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
unsigned int cryptlen = req->cryptlen;
unsigned int authsize = crypto_aead_authsize(aead); unsigned int authsize = crypto_aead_authsize(aead);
unsigned int cryptlen = req->cryptlen;
int err; int err;
if (cryptlen < authsize) if (cryptlen < authsize)
return -EINVAL; return -EINVAL;
cryptlen -= authsize; cryptlen -= authsize;
crypto_gcm_init_crypt(abreq, req, cryptlen); gctx->src = req->src;
gctx->cryptlen = cryptlen;
gctx->complete = gcm_dec_hash_done;
err = gcm_hash(req, pctx);
if (err)
return err;
ablkcipher_request_set_callback(abreq, aead_request_flags(req), ablkcipher_request_set_callback(abreq, aead_request_flags(req),
crypto_gcm_decrypt_done, req); gcm_decrypt_done, req);
crypto_gcm_init_crypt(abreq, req, cryptlen);
crypto_gcm_ghash_update_sg(ghash, req->src, cryptlen);
err = crypto_ablkcipher_decrypt(abreq); err = crypto_ablkcipher_decrypt(abreq);
if (err) if (err)
return err; return err;
return crypto_gcm_verify(req); return crypto_gcm_verify(req, pctx);
} }
static int crypto_gcm_init_tfm(struct crypto_tfm *tfm) static int crypto_gcm_init_tfm(struct crypto_tfm *tfm)
@ -406,43 +595,56 @@ static int crypto_gcm_init_tfm(struct crypto_tfm *tfm)
struct gcm_instance_ctx *ictx = crypto_instance_ctx(inst); struct gcm_instance_ctx *ictx = crypto_instance_ctx(inst);
struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_ablkcipher *ctr; struct crypto_ablkcipher *ctr;
struct crypto_ahash *ghash;
unsigned long align; unsigned long align;
int err; int err;
ghash = crypto_spawn_ahash(&ictx->ghash);
if (IS_ERR(ghash))
return PTR_ERR(ghash);
ctr = crypto_spawn_skcipher(&ictx->ctr); ctr = crypto_spawn_skcipher(&ictx->ctr);
err = PTR_ERR(ctr); err = PTR_ERR(ctr);
if (IS_ERR(ctr)) if (IS_ERR(ctr))
return err; goto err_free_hash;
ctx->ctr = ctr; ctx->ctr = ctr;
ctx->gf128 = NULL; ctx->ghash = ghash;
align = crypto_tfm_alg_alignmask(tfm); align = crypto_tfm_alg_alignmask(tfm);
align &= ~(crypto_tfm_ctx_alignment() - 1); align &= ~(crypto_tfm_ctx_alignment() - 1);
tfm->crt_aead.reqsize = align + tfm->crt_aead.reqsize = align +
sizeof(struct crypto_gcm_req_priv_ctx) + offsetof(struct crypto_gcm_req_priv_ctx, u) +
crypto_ablkcipher_reqsize(ctr); max(sizeof(struct ablkcipher_request) +
crypto_ablkcipher_reqsize(ctr),
sizeof(struct ahash_request) +
crypto_ahash_reqsize(ghash));
return 0; return 0;
err_free_hash:
crypto_free_ahash(ghash);
return err;
} }
static void crypto_gcm_exit_tfm(struct crypto_tfm *tfm) static void crypto_gcm_exit_tfm(struct crypto_tfm *tfm)
{ {
struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
if (ctx->gf128 != NULL) crypto_free_ahash(ctx->ghash);
gf128mul_free_4k(ctx->gf128);
crypto_free_ablkcipher(ctx->ctr); crypto_free_ablkcipher(ctx->ctr);
} }
static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb,
const char *full_name, const char *full_name,
const char *ctr_name) const char *ctr_name,
const char *ghash_name)
{ {
struct crypto_attr_type *algt; struct crypto_attr_type *algt;
struct crypto_instance *inst; struct crypto_instance *inst;
struct crypto_alg *ctr; struct crypto_alg *ctr;
struct crypto_alg *ghash_alg;
struct ahash_alg *ghash_ahash_alg;
struct gcm_instance_ctx *ctx; struct gcm_instance_ctx *ctx;
int err; int err;
@ -454,17 +656,31 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb,
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type,
CRYPTO_ALG_TYPE_HASH,
CRYPTO_ALG_TYPE_AHASH_MASK);
err = PTR_ERR(ghash_alg);
if (IS_ERR(ghash_alg))
return ERR_PTR(err);
err = -ENOMEM;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst) if (!inst)
return ERR_PTR(-ENOMEM); goto out_put_ghash;
ctx = crypto_instance_ctx(inst); ctx = crypto_instance_ctx(inst);
ghash_ahash_alg = container_of(ghash_alg, struct ahash_alg, halg.base);
err = crypto_init_ahash_spawn(&ctx->ghash, &ghash_ahash_alg->halg,
inst);
if (err)
goto err_free_inst;
crypto_set_skcipher_spawn(&ctx->ctr, inst); crypto_set_skcipher_spawn(&ctx->ctr, inst);
err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0, err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0,
crypto_requires_sync(algt->type, crypto_requires_sync(algt->type,
algt->mask)); algt->mask));
if (err) if (err)
goto err_free_inst; goto err_drop_ghash;
ctr = crypto_skcipher_spawn_alg(&ctx->ctr); ctr = crypto_skcipher_spawn_alg(&ctx->ctr);
@ -479,7 +695,8 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb,
err = -ENAMETOOLONG; err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"gcm_base(%s)", ctr->cra_driver_name) >= "gcm_base(%s,%s)", ctr->cra_driver_name,
ghash_alg->cra_driver_name) >=
CRYPTO_MAX_ALG_NAME) CRYPTO_MAX_ALG_NAME)
goto out_put_ctr; goto out_put_ctr;
@ -502,12 +719,16 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb,
inst->alg.cra_aead.decrypt = crypto_gcm_decrypt; inst->alg.cra_aead.decrypt = crypto_gcm_decrypt;
out: out:
crypto_mod_put(ghash_alg);
return inst; return inst;
out_put_ctr: out_put_ctr:
crypto_drop_skcipher(&ctx->ctr); crypto_drop_skcipher(&ctx->ctr);
err_drop_ghash:
crypto_drop_ahash(&ctx->ghash);
err_free_inst: err_free_inst:
kfree(inst); kfree(inst);
out_put_ghash:
inst = ERR_PTR(err); inst = ERR_PTR(err);
goto out; goto out;
} }
@ -532,7 +753,7 @@ static struct crypto_instance *crypto_gcm_alloc(struct rtattr **tb)
CRYPTO_MAX_ALG_NAME) CRYPTO_MAX_ALG_NAME)
return ERR_PTR(-ENAMETOOLONG); return ERR_PTR(-ENAMETOOLONG);
return crypto_gcm_alloc_common(tb, full_name, ctr_name); return crypto_gcm_alloc_common(tb, full_name, ctr_name, "ghash");
} }
static void crypto_gcm_free(struct crypto_instance *inst) static void crypto_gcm_free(struct crypto_instance *inst)
@ -540,6 +761,7 @@ static void crypto_gcm_free(struct crypto_instance *inst)
struct gcm_instance_ctx *ctx = crypto_instance_ctx(inst); struct gcm_instance_ctx *ctx = crypto_instance_ctx(inst);
crypto_drop_skcipher(&ctx->ctr); crypto_drop_skcipher(&ctx->ctr);
crypto_drop_ahash(&ctx->ghash);
kfree(inst); kfree(inst);
} }
@ -554,6 +776,7 @@ static struct crypto_instance *crypto_gcm_base_alloc(struct rtattr **tb)
{ {
int err; int err;
const char *ctr_name; const char *ctr_name;
const char *ghash_name;
char full_name[CRYPTO_MAX_ALG_NAME]; char full_name[CRYPTO_MAX_ALG_NAME];
ctr_name = crypto_attr_alg_name(tb[1]); ctr_name = crypto_attr_alg_name(tb[1]);
@ -561,11 +784,16 @@ static struct crypto_instance *crypto_gcm_base_alloc(struct rtattr **tb)
if (IS_ERR(ctr_name)) if (IS_ERR(ctr_name))
return ERR_PTR(err); return ERR_PTR(err);
if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s)", ghash_name = crypto_attr_alg_name(tb[2]);
ctr_name) >= CRYPTO_MAX_ALG_NAME) err = PTR_ERR(ghash_name);
if (IS_ERR(ghash_name))
return ERR_PTR(err);
if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)",
ctr_name, ghash_name) >= CRYPTO_MAX_ALG_NAME)
return ERR_PTR(-ENAMETOOLONG); return ERR_PTR(-ENAMETOOLONG);
return crypto_gcm_alloc_common(tb, full_name, ctr_name); return crypto_gcm_alloc_common(tb, full_name, ctr_name, ghash_name);
} }
static struct crypto_template crypto_gcm_base_tmpl = { static struct crypto_template crypto_gcm_base_tmpl = {
@ -784,6 +1012,10 @@ static int __init crypto_gcm_module_init(void)
{ {
int err; int err;
gcm_zeroes = kzalloc(16, GFP_KERNEL);
if (!gcm_zeroes)
return -ENOMEM;
err = crypto_register_template(&crypto_gcm_base_tmpl); err = crypto_register_template(&crypto_gcm_base_tmpl);
if (err) if (err)
goto out; goto out;
@ -796,18 +1028,20 @@ static int __init crypto_gcm_module_init(void)
if (err) if (err)
goto out_undo_gcm; goto out_undo_gcm;
out: return 0;
return err;
out_undo_gcm: out_undo_gcm:
crypto_unregister_template(&crypto_gcm_tmpl); crypto_unregister_template(&crypto_gcm_tmpl);
out_undo_base: out_undo_base:
crypto_unregister_template(&crypto_gcm_base_tmpl); crypto_unregister_template(&crypto_gcm_base_tmpl);
goto out; out:
kfree(gcm_zeroes);
return err;
} }
static void __exit crypto_gcm_module_exit(void) static void __exit crypto_gcm_module_exit(void)
{ {
kfree(gcm_zeroes);
crypto_unregister_template(&crypto_rfc4106_tmpl); crypto_unregister_template(&crypto_rfc4106_tmpl);
crypto_unregister_template(&crypto_gcm_tmpl); crypto_unregister_template(&crypto_gcm_tmpl);
crypto_unregister_template(&crypto_gcm_base_tmpl); crypto_unregister_template(&crypto_gcm_base_tmpl);

170
crypto/ghash-generic.c Normal file
View File

@ -0,0 +1,170 @@
/*
* GHASH: digest algorithm for GCM (Galois/Counter Mode).
*
* Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen <mh1@iki.fi>
* Copyright (c) 2009 Intel Corp.
* Author: Huang Ying <ying.huang@intel.com>
*
* The algorithm implementation is copied from gcm.c.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include <crypto/algapi.h>
#include <crypto/gf128mul.h>
#include <crypto/internal/hash.h>
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#define GHASH_BLOCK_SIZE 16
#define GHASH_DIGEST_SIZE 16
struct ghash_ctx {
struct gf128mul_4k *gf128;
};
struct ghash_desc_ctx {
u8 buffer[GHASH_BLOCK_SIZE];
u32 bytes;
};
static int ghash_init(struct shash_desc *desc)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
memset(dctx, 0, sizeof(*dctx));
return 0;
}
static int ghash_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen)
{
struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
if (keylen != GHASH_BLOCK_SIZE) {
crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
if (ctx->gf128)
gf128mul_free_4k(ctx->gf128);
ctx->gf128 = gf128mul_init_4k_lle((be128 *)key);
if (!ctx->gf128)
return -ENOMEM;
return 0;
}
static int ghash_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *dst = dctx->buffer;
if (dctx->bytes) {
int n = min(srclen, dctx->bytes);
u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
dctx->bytes -= n;
srclen -= n;
while (n--)
*pos++ ^= *src++;
if (!dctx->bytes)
gf128mul_4k_lle((be128 *)dst, ctx->gf128);
}
while (srclen >= GHASH_BLOCK_SIZE) {
crypto_xor(dst, src, GHASH_BLOCK_SIZE);
gf128mul_4k_lle((be128 *)dst, ctx->gf128);
src += GHASH_BLOCK_SIZE;
srclen -= GHASH_BLOCK_SIZE;
}
if (srclen) {
dctx->bytes = GHASH_BLOCK_SIZE - srclen;
while (srclen--)
*dst++ ^= *src++;
}
return 0;
}
static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
{
u8 *dst = dctx->buffer;
if (dctx->bytes) {
u8 *tmp = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
while (dctx->bytes--)
*tmp++ ^= 0;
gf128mul_4k_lle((be128 *)dst, ctx->gf128);
}
dctx->bytes = 0;
}
static int ghash_final(struct shash_desc *desc, u8 *dst)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *buf = dctx->buffer;
ghash_flush(ctx, dctx);
memcpy(dst, buf, GHASH_BLOCK_SIZE);
return 0;
}
static void ghash_exit_tfm(struct crypto_tfm *tfm)
{
struct ghash_ctx *ctx = crypto_tfm_ctx(tfm);
if (ctx->gf128)
gf128mul_free_4k(ctx->gf128);
}
static struct shash_alg ghash_alg = {
.digestsize = GHASH_DIGEST_SIZE,
.init = ghash_init,
.update = ghash_update,
.final = ghash_final,
.setkey = ghash_setkey,
.descsize = sizeof(struct ghash_desc_ctx),
.base = {
.cra_name = "ghash",
.cra_driver_name = "ghash-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(ghash_alg.base.cra_list),
.cra_exit = ghash_exit_tfm,
},
};
static int __init ghash_mod_init(void)
{
return crypto_register_shash(&ghash_alg);
}
static void __exit ghash_mod_exit(void)
{
crypto_unregister_shash(&ghash_alg);
}
module_init(ghash_mod_init);
module_exit(ghash_mod_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GHASH Message Digest Algorithm");
MODULE_ALIAS("ghash");

View File

@ -27,7 +27,7 @@
#include <linux/string.h> #include <linux/string.h>
struct hmac_ctx { struct hmac_ctx {
struct crypto_hash *child; struct crypto_shash *hash;
}; };
static inline void *align_ptr(void *p, unsigned int align) static inline void *align_ptr(void *p, unsigned int align)
@ -35,65 +35,45 @@ static inline void *align_ptr(void *p, unsigned int align)
return (void *)ALIGN((unsigned long)p, align); return (void *)ALIGN((unsigned long)p, align);
} }
static inline struct hmac_ctx *hmac_ctx(struct crypto_hash *tfm) static inline struct hmac_ctx *hmac_ctx(struct crypto_shash *tfm)
{ {
return align_ptr(crypto_hash_ctx_aligned(tfm) + return align_ptr(crypto_shash_ctx_aligned(tfm) +
crypto_hash_blocksize(tfm) * 2 + crypto_shash_statesize(tfm) * 2,
crypto_hash_digestsize(tfm), sizeof(void *)); crypto_tfm_ctx_alignment());
} }
static int hmac_setkey(struct crypto_hash *parent, static int hmac_setkey(struct crypto_shash *parent,
const u8 *inkey, unsigned int keylen) const u8 *inkey, unsigned int keylen)
{ {
int bs = crypto_hash_blocksize(parent); int bs = crypto_shash_blocksize(parent);
int ds = crypto_hash_digestsize(parent); int ds = crypto_shash_digestsize(parent);
char *ipad = crypto_hash_ctx_aligned(parent); int ss = crypto_shash_statesize(parent);
char *opad = ipad + bs; char *ipad = crypto_shash_ctx_aligned(parent);
char *digest = opad + bs; char *opad = ipad + ss;
struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *)); struct hmac_ctx *ctx = align_ptr(opad + ss,
struct crypto_hash *tfm = ctx->child; crypto_tfm_ctx_alignment());
struct crypto_shash *hash = ctx->hash;
struct {
struct shash_desc shash;
char ctx[crypto_shash_descsize(hash)];
} desc;
unsigned int i; unsigned int i;
desc.shash.tfm = hash;
desc.shash.flags = crypto_shash_get_flags(parent) &
CRYPTO_TFM_REQ_MAY_SLEEP;
if (keylen > bs) { if (keylen > bs) {
struct hash_desc desc;
struct scatterlist tmp;
int tmplen;
int err; int err;
desc.tfm = tfm; err = crypto_shash_digest(&desc.shash, inkey, keylen, ipad);
desc.flags = crypto_hash_get_flags(parent);
desc.flags &= CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_hash_init(&desc);
if (err) if (err)
return err; return err;
tmplen = bs * 2 + ds;
sg_init_one(&tmp, ipad, tmplen);
for (; keylen > tmplen; inkey += tmplen, keylen -= tmplen) {
memcpy(ipad, inkey, tmplen);
err = crypto_hash_update(&desc, &tmp, tmplen);
if (err)
return err;
}
if (keylen) {
memcpy(ipad, inkey, keylen);
err = crypto_hash_update(&desc, &tmp, keylen);
if (err)
return err;
}
err = crypto_hash_final(&desc, digest);
if (err)
return err;
inkey = digest;
keylen = ds; keylen = ds;
} } else
memcpy(ipad, inkey, keylen); memcpy(ipad, inkey, keylen);
memset(ipad + keylen, 0, bs - keylen); memset(ipad + keylen, 0, bs - keylen);
memcpy(opad, ipad, bs); memcpy(opad, ipad, bs);
@ -102,184 +82,178 @@ static int hmac_setkey(struct crypto_hash *parent,
opad[i] ^= 0x5c; opad[i] ^= 0x5c;
} }
return 0; return crypto_shash_init(&desc.shash) ?:
crypto_shash_update(&desc.shash, ipad, bs) ?:
crypto_shash_export(&desc.shash, ipad) ?:
crypto_shash_init(&desc.shash) ?:
crypto_shash_update(&desc.shash, opad, bs) ?:
crypto_shash_export(&desc.shash, opad);
} }
static int hmac_init(struct hash_desc *pdesc) static int hmac_export(struct shash_desc *pdesc, void *out)
{ {
struct crypto_hash *parent = pdesc->tfm; struct shash_desc *desc = shash_desc_ctx(pdesc);
int bs = crypto_hash_blocksize(parent);
int ds = crypto_hash_digestsize(parent);
char *ipad = crypto_hash_ctx_aligned(parent);
struct hmac_ctx *ctx = align_ptr(ipad + bs * 2 + ds, sizeof(void *));
struct hash_desc desc;
struct scatterlist tmp;
int err;
desc.tfm = ctx->child; desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
sg_init_one(&tmp, ipad, bs);
err = crypto_hash_init(&desc); return crypto_shash_export(desc, out);
if (unlikely(err))
return err;
return crypto_hash_update(&desc, &tmp, bs);
} }
static int hmac_update(struct hash_desc *pdesc, static int hmac_import(struct shash_desc *pdesc, const void *in)
struct scatterlist *sg, unsigned int nbytes)
{ {
struct shash_desc *desc = shash_desc_ctx(pdesc);
struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm); struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm);
struct hash_desc desc;
desc.tfm = ctx->child; desc->tfm = ctx->hash;
desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
return crypto_hash_update(&desc, sg, nbytes); return crypto_shash_import(desc, in);
} }
static int hmac_final(struct hash_desc *pdesc, u8 *out) static int hmac_init(struct shash_desc *pdesc)
{ {
struct crypto_hash *parent = pdesc->tfm; return hmac_import(pdesc, crypto_shash_ctx_aligned(pdesc->tfm));
int bs = crypto_hash_blocksize(parent);
int ds = crypto_hash_digestsize(parent);
char *opad = crypto_hash_ctx_aligned(parent) + bs;
char *digest = opad + bs;
struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *));
struct hash_desc desc;
struct scatterlist tmp;
int err;
desc.tfm = ctx->child;
desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
sg_init_one(&tmp, opad, bs + ds);
err = crypto_hash_final(&desc, digest);
if (unlikely(err))
return err;
return crypto_hash_digest(&desc, &tmp, bs + ds, out);
} }
static int hmac_digest(struct hash_desc *pdesc, struct scatterlist *sg, static int hmac_update(struct shash_desc *pdesc,
const u8 *data, unsigned int nbytes)
{
struct shash_desc *desc = shash_desc_ctx(pdesc);
desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
return crypto_shash_update(desc, data, nbytes);
}
static int hmac_final(struct shash_desc *pdesc, u8 *out)
{
struct crypto_shash *parent = pdesc->tfm;
int ds = crypto_shash_digestsize(parent);
int ss = crypto_shash_statesize(parent);
char *opad = crypto_shash_ctx_aligned(parent) + ss;
struct shash_desc *desc = shash_desc_ctx(pdesc);
desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
return crypto_shash_final(desc, out) ?:
crypto_shash_import(desc, opad) ?:
crypto_shash_finup(desc, out, ds, out);
}
static int hmac_finup(struct shash_desc *pdesc, const u8 *data,
unsigned int nbytes, u8 *out) unsigned int nbytes, u8 *out)
{ {
struct crypto_hash *parent = pdesc->tfm;
int bs = crypto_hash_blocksize(parent);
int ds = crypto_hash_digestsize(parent);
char *ipad = crypto_hash_ctx_aligned(parent);
char *opad = ipad + bs;
char *digest = opad + bs;
struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *));
struct hash_desc desc;
struct scatterlist sg1[2];
struct scatterlist sg2[1];
int err;
desc.tfm = ctx->child; struct crypto_shash *parent = pdesc->tfm;
desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; int ds = crypto_shash_digestsize(parent);
int ss = crypto_shash_statesize(parent);
char *opad = crypto_shash_ctx_aligned(parent) + ss;
struct shash_desc *desc = shash_desc_ctx(pdesc);
sg_init_table(sg1, 2); desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
sg_set_buf(sg1, ipad, bs);
scatterwalk_sg_chain(sg1, 2, sg);
sg_init_table(sg2, 1); return crypto_shash_finup(desc, data, nbytes, out) ?:
sg_set_buf(sg2, opad, bs + ds); crypto_shash_import(desc, opad) ?:
crypto_shash_finup(desc, out, ds, out);
err = crypto_hash_digest(&desc, sg1, nbytes + bs, digest);
if (unlikely(err))
return err;
return crypto_hash_digest(&desc, sg2, bs + ds, out);
} }
static int hmac_init_tfm(struct crypto_tfm *tfm) static int hmac_init_tfm(struct crypto_tfm *tfm)
{ {
struct crypto_hash *hash; struct crypto_shash *parent = __crypto_shash_cast(tfm);
struct crypto_shash *hash;
struct crypto_instance *inst = (void *)tfm->__crt_alg; struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct crypto_spawn *spawn = crypto_instance_ctx(inst); struct crypto_shash_spawn *spawn = crypto_instance_ctx(inst);
struct hmac_ctx *ctx = hmac_ctx(__crypto_hash_cast(tfm)); struct hmac_ctx *ctx = hmac_ctx(parent);
hash = crypto_spawn_hash(spawn); hash = crypto_spawn_shash(spawn);
if (IS_ERR(hash)) if (IS_ERR(hash))
return PTR_ERR(hash); return PTR_ERR(hash);
ctx->child = hash; parent->descsize = sizeof(struct shash_desc) +
crypto_shash_descsize(hash);
ctx->hash = hash;
return 0; return 0;
} }
static void hmac_exit_tfm(struct crypto_tfm *tfm) static void hmac_exit_tfm(struct crypto_tfm *tfm)
{ {
struct hmac_ctx *ctx = hmac_ctx(__crypto_hash_cast(tfm)); struct hmac_ctx *ctx = hmac_ctx(__crypto_shash_cast(tfm));
crypto_free_hash(ctx->child); crypto_free_shash(ctx->hash);
} }
static void hmac_free(struct crypto_instance *inst) static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
{ {
crypto_drop_spawn(crypto_instance_ctx(inst)); struct shash_instance *inst;
kfree(inst);
}
static struct crypto_instance *hmac_alloc(struct rtattr **tb)
{
struct crypto_instance *inst;
struct crypto_alg *alg; struct crypto_alg *alg;
struct shash_alg *salg;
int err; int err;
int ds; int ds;
int ss;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH); err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
if (err) if (err)
return ERR_PTR(err); return err;
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH, salg = shash_attr_alg(tb[1], 0, 0);
CRYPTO_ALG_TYPE_HASH_MASK); if (IS_ERR(salg))
if (IS_ERR(alg)) return PTR_ERR(salg);
return ERR_CAST(alg);
inst = ERR_PTR(-EINVAL); err = -EINVAL;
ds = alg->cra_type == &crypto_hash_type ? ds = salg->digestsize;
alg->cra_hash.digestsize : ss = salg->statesize;
alg->cra_type ? alg = &salg->base;
__crypto_shash_alg(alg)->digestsize : if (ds > alg->cra_blocksize ||
alg->cra_digest.dia_digestsize; ss < alg->cra_blocksize)
if (ds > alg->cra_blocksize)
goto out_put_alg; goto out_put_alg;
inst = crypto_alloc_instance("hmac", alg); inst = shash_alloc_instance("hmac", alg);
err = PTR_ERR(inst);
if (IS_ERR(inst)) if (IS_ERR(inst))
goto out_put_alg; goto out_put_alg;
inst->alg.cra_flags = CRYPTO_ALG_TYPE_HASH; err = crypto_init_shash_spawn(shash_instance_ctx(inst), salg,
inst->alg.cra_priority = alg->cra_priority; shash_crypto_instance(inst));
inst->alg.cra_blocksize = alg->cra_blocksize; if (err)
inst->alg.cra_alignmask = alg->cra_alignmask; goto out_free_inst;
inst->alg.cra_type = &crypto_hash_type;
inst->alg.cra_hash.digestsize = ds; inst->alg.base.cra_priority = alg->cra_priority;
inst->alg.base.cra_blocksize = alg->cra_blocksize;
inst->alg.base.cra_alignmask = alg->cra_alignmask;
inst->alg.cra_ctxsize = sizeof(struct hmac_ctx) + ss = ALIGN(ss, alg->cra_alignmask + 1);
ALIGN(inst->alg.cra_blocksize * 2 + ds, inst->alg.digestsize = ds;
sizeof(void *)); inst->alg.statesize = ss;
inst->alg.cra_init = hmac_init_tfm; inst->alg.base.cra_ctxsize = sizeof(struct hmac_ctx) +
inst->alg.cra_exit = hmac_exit_tfm; ALIGN(ss * 2, crypto_tfm_ctx_alignment());
inst->alg.cra_hash.init = hmac_init; inst->alg.base.cra_init = hmac_init_tfm;
inst->alg.cra_hash.update = hmac_update; inst->alg.base.cra_exit = hmac_exit_tfm;
inst->alg.cra_hash.final = hmac_final;
inst->alg.cra_hash.digest = hmac_digest; inst->alg.init = hmac_init;
inst->alg.cra_hash.setkey = hmac_setkey; inst->alg.update = hmac_update;
inst->alg.final = hmac_final;
inst->alg.finup = hmac_finup;
inst->alg.export = hmac_export;
inst->alg.import = hmac_import;
inst->alg.setkey = hmac_setkey;
err = shash_register_instance(tmpl, inst);
if (err) {
out_free_inst:
shash_free_instance(shash_crypto_instance(inst));
}
out_put_alg: out_put_alg:
crypto_mod_put(alg); crypto_mod_put(alg);
return inst; return err;
} }
static struct crypto_template hmac_tmpl = { static struct crypto_template hmac_tmpl = {
.name = "hmac", .name = "hmac",
.alloc = hmac_alloc, .create = hmac_create,
.free = hmac_free, .free = shash_free_instance,
.module = THIS_MODULE, .module = THIS_MODULE,
}; };

View File

@ -25,12 +25,7 @@
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/rwsem.h> #include <linux/rwsem.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/fips.h>
#ifdef CONFIG_CRYPTO_FIPS
extern int fips_enabled;
#else
#define fips_enabled 0
#endif
/* Crypto notification events. */ /* Crypto notification events. */
enum { enum {
@ -65,18 +60,6 @@ static inline void crypto_exit_proc(void)
{ } { }
#endif #endif
static inline unsigned int crypto_digest_ctxsize(struct crypto_alg *alg)
{
unsigned int len = alg->cra_ctxsize;
if (alg->cra_alignmask) {
len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
len += alg->cra_digest.dia_digestsize;
}
return len;
}
static inline unsigned int crypto_cipher_ctxsize(struct crypto_alg *alg) static inline unsigned int crypto_cipher_ctxsize(struct crypto_alg *alg)
{ {
return alg->cra_ctxsize; return alg->cra_ctxsize;
@ -91,12 +74,9 @@ struct crypto_alg *crypto_mod_get(struct crypto_alg *alg);
struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask); struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask);
struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask); struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask);
int crypto_init_digest_ops(struct crypto_tfm *tfm);
int crypto_init_digest_ops_async(struct crypto_tfm *tfm);
int crypto_init_cipher_ops(struct crypto_tfm *tfm); int crypto_init_cipher_ops(struct crypto_tfm *tfm);
int crypto_init_compress_ops(struct crypto_tfm *tfm); int crypto_init_compress_ops(struct crypto_tfm *tfm);
void crypto_exit_digest_ops(struct crypto_tfm *tfm);
void crypto_exit_cipher_ops(struct crypto_tfm *tfm); void crypto_exit_cipher_ops(struct crypto_tfm *tfm);
void crypto_exit_compress_ops(struct crypto_tfm *tfm); void crypto_exit_compress_ops(struct crypto_tfm *tfm);
@ -111,12 +91,12 @@ struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
u32 mask); u32 mask);
void *crypto_create_tfm(struct crypto_alg *alg, void *crypto_create_tfm(struct crypto_alg *alg,
const struct crypto_type *frontend); const struct crypto_type *frontend);
struct crypto_alg *crypto_find_alg(const char *alg_name,
const struct crypto_type *frontend,
u32 type, u32 mask);
void *crypto_alloc_tfm(const char *alg_name, void *crypto_alloc_tfm(const char *alg_name,
const struct crypto_type *frontend, u32 type, u32 mask); const struct crypto_type *frontend, u32 type, u32 mask);
int crypto_register_instance(struct crypto_template *tmpl,
struct crypto_instance *inst);
int crypto_register_notifier(struct notifier_block *nb); int crypto_register_notifier(struct notifier_block *nb);
int crypto_unregister_notifier(struct notifier_block *nb); int crypto_unregister_notifier(struct notifier_block *nb);
int crypto_probing_notify(unsigned long val, void *v); int crypto_probing_notify(unsigned long val, void *v);

View File

@ -36,14 +36,12 @@ static int crypto_pcomp_init(struct crypto_tfm *tfm, u32 type, u32 mask)
return 0; return 0;
} }
static unsigned int crypto_pcomp_extsize(struct crypto_alg *alg, static unsigned int crypto_pcomp_extsize(struct crypto_alg *alg)
const struct crypto_type *frontend)
{ {
return alg->cra_ctxsize; return alg->cra_ctxsize;
} }
static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm, static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm)
const struct crypto_type *frontend)
{ {
return 0; return 0;
} }

View File

@ -123,4 +123,4 @@ void crypto_put_default_rng(void)
EXPORT_SYMBOL_GPL(crypto_put_default_rng); EXPORT_SYMBOL_GPL(crypto_put_default_rng);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Random Number Genertor"); MODULE_DESCRIPTION("Random Number Generator");

View File

@ -25,31 +25,21 @@
#include <crypto/sha.h> #include <crypto/sha.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
struct sha1_ctx {
u64 count;
u32 state[5];
u8 buffer[64];
};
static int sha1_init(struct shash_desc *desc) static int sha1_init(struct shash_desc *desc)
{ {
struct sha1_ctx *sctx = shash_desc_ctx(desc); struct sha1_state *sctx = shash_desc_ctx(desc);
static const struct sha1_ctx initstate = { *sctx = (struct sha1_state){
0, .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
{ SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
{ 0, }
}; };
*sctx = initstate;
return 0; return 0;
} }
static int sha1_update(struct shash_desc *desc, const u8 *data, static int sha1_update(struct shash_desc *desc, const u8 *data,
unsigned int len) unsigned int len)
{ {
struct sha1_ctx *sctx = shash_desc_ctx(desc); struct sha1_state *sctx = shash_desc_ctx(desc);
unsigned int partial, done; unsigned int partial, done;
const u8 *src; const u8 *src;
@ -85,7 +75,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
/* Add padding and return the message digest. */ /* Add padding and return the message digest. */
static int sha1_final(struct shash_desc *desc, u8 *out) static int sha1_final(struct shash_desc *desc, u8 *out)
{ {
struct sha1_ctx *sctx = shash_desc_ctx(desc); struct sha1_state *sctx = shash_desc_ctx(desc);
__be32 *dst = (__be32 *)out; __be32 *dst = (__be32 *)out;
u32 i, index, padlen; u32 i, index, padlen;
__be64 bits; __be64 bits;
@ -111,12 +101,31 @@ static int sha1_final(struct shash_desc *desc, u8 *out)
return 0; return 0;
} }
static int sha1_export(struct shash_desc *desc, void *out)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
memcpy(out, sctx, sizeof(*sctx));
return 0;
}
static int sha1_import(struct shash_desc *desc, const void *in)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
memcpy(sctx, in, sizeof(*sctx));
return 0;
}
static struct shash_alg alg = { static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE, .digestsize = SHA1_DIGEST_SIZE,
.init = sha1_init, .init = sha1_init,
.update = sha1_update, .update = sha1_update,
.final = sha1_final, .final = sha1_final,
.descsize = sizeof(struct sha1_ctx), .export = sha1_export,
.import = sha1_import,
.descsize = sizeof(struct sha1_state),
.statesize = sizeof(struct sha1_state),
.base = { .base = {
.cra_name = "sha1", .cra_name = "sha1",
.cra_driver_name= "sha1-generic", .cra_driver_name= "sha1-generic",

View File

@ -25,12 +25,6 @@
#include <crypto/sha.h> #include <crypto/sha.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
struct sha256_ctx {
u32 count[2];
u32 state[8];
u8 buf[128];
};
static inline u32 Ch(u32 x, u32 y, u32 z) static inline u32 Ch(u32 x, u32 y, u32 z)
{ {
return z ^ (x & (y ^ z)); return z ^ (x & (y ^ z));
@ -222,7 +216,7 @@ static void sha256_transform(u32 *state, const u8 *input)
static int sha224_init(struct shash_desc *desc) static int sha224_init(struct shash_desc *desc)
{ {
struct sha256_ctx *sctx = shash_desc_ctx(desc); struct sha256_state *sctx = shash_desc_ctx(desc);
sctx->state[0] = SHA224_H0; sctx->state[0] = SHA224_H0;
sctx->state[1] = SHA224_H1; sctx->state[1] = SHA224_H1;
sctx->state[2] = SHA224_H2; sctx->state[2] = SHA224_H2;
@ -231,15 +225,14 @@ static int sha224_init(struct shash_desc *desc)
sctx->state[5] = SHA224_H5; sctx->state[5] = SHA224_H5;
sctx->state[6] = SHA224_H6; sctx->state[6] = SHA224_H6;
sctx->state[7] = SHA224_H7; sctx->state[7] = SHA224_H7;
sctx->count[0] = 0; sctx->count = 0;
sctx->count[1] = 0;
return 0; return 0;
} }
static int sha256_init(struct shash_desc *desc) static int sha256_init(struct shash_desc *desc)
{ {
struct sha256_ctx *sctx = shash_desc_ctx(desc); struct sha256_state *sctx = shash_desc_ctx(desc);
sctx->state[0] = SHA256_H0; sctx->state[0] = SHA256_H0;
sctx->state[1] = SHA256_H1; sctx->state[1] = SHA256_H1;
sctx->state[2] = SHA256_H2; sctx->state[2] = SHA256_H2;
@ -248,7 +241,7 @@ static int sha256_init(struct shash_desc *desc)
sctx->state[5] = SHA256_H5; sctx->state[5] = SHA256_H5;
sctx->state[6] = SHA256_H6; sctx->state[6] = SHA256_H6;
sctx->state[7] = SHA256_H7; sctx->state[7] = SHA256_H7;
sctx->count[0] = sctx->count[1] = 0; sctx->count = 0;
return 0; return 0;
} }
@ -256,58 +249,54 @@ static int sha256_init(struct shash_desc *desc)
static int sha256_update(struct shash_desc *desc, const u8 *data, static int sha256_update(struct shash_desc *desc, const u8 *data,
unsigned int len) unsigned int len)
{ {
struct sha256_ctx *sctx = shash_desc_ctx(desc); struct sha256_state *sctx = shash_desc_ctx(desc);
unsigned int i, index, part_len; unsigned int partial, done;
const u8 *src;
/* Compute number of bytes mod 128 */ partial = sctx->count & 0x3f;
index = (unsigned int)((sctx->count[0] >> 3) & 0x3f); sctx->count += len;
done = 0;
src = data;
/* Update number of bits */ if ((partial + len) > 63) {
if ((sctx->count[0] += (len << 3)) < (len << 3)) { if (partial) {
sctx->count[1]++; done = -partial;
sctx->count[1] += (len >> 29); memcpy(sctx->buf + partial, data, done + 64);
src = sctx->buf;
} }
part_len = 64 - index; do {
sha256_transform(sctx->state, src);
done += 64;
src = data + done;
} while (done + 63 < len);
/* Transform as many times as possible. */ partial = 0;
if (len >= part_len) {
memcpy(&sctx->buf[index], data, part_len);
sha256_transform(sctx->state, sctx->buf);
for (i = part_len; i + 63 < len; i += 64)
sha256_transform(sctx->state, &data[i]);
index = 0;
} else {
i = 0;
} }
memcpy(sctx->buf + partial, src, len - done);
/* Buffer remaining input */
memcpy(&sctx->buf[index], &data[i], len-i);
return 0; return 0;
} }
static int sha256_final(struct shash_desc *desc, u8 *out) static int sha256_final(struct shash_desc *desc, u8 *out)
{ {
struct sha256_ctx *sctx = shash_desc_ctx(desc); struct sha256_state *sctx = shash_desc_ctx(desc);
__be32 *dst = (__be32 *)out; __be32 *dst = (__be32 *)out;
__be32 bits[2]; __be64 bits;
unsigned int index, pad_len; unsigned int index, pad_len;
int i; int i;
static const u8 padding[64] = { 0x80, }; static const u8 padding[64] = { 0x80, };
/* Save number of bits */ /* Save number of bits */
bits[1] = cpu_to_be32(sctx->count[0]); bits = cpu_to_be64(sctx->count << 3);
bits[0] = cpu_to_be32(sctx->count[1]);
/* Pad out to 56 mod 64. */ /* Pad out to 56 mod 64. */
index = (sctx->count[0] >> 3) & 0x3f; index = sctx->count & 0x3f;
pad_len = (index < 56) ? (56 - index) : ((64+56) - index); pad_len = (index < 56) ? (56 - index) : ((64+56) - index);
sha256_update(desc, padding, pad_len); sha256_update(desc, padding, pad_len);
/* Append length (before padding) */ /* Append length (before padding) */
sha256_update(desc, (const u8 *)bits, sizeof(bits)); sha256_update(desc, (const u8 *)&bits, sizeof(bits));
/* Store state in digest */ /* Store state in digest */
for (i = 0; i < 8; i++) for (i = 0; i < 8; i++)
@ -331,12 +320,31 @@ static int sha224_final(struct shash_desc *desc, u8 *hash)
return 0; return 0;
} }
static int sha256_export(struct shash_desc *desc, void *out)
{
struct sha256_state *sctx = shash_desc_ctx(desc);
memcpy(out, sctx, sizeof(*sctx));
return 0;
}
static int sha256_import(struct shash_desc *desc, const void *in)
{
struct sha256_state *sctx = shash_desc_ctx(desc);
memcpy(sctx, in, sizeof(*sctx));
return 0;
}
static struct shash_alg sha256 = { static struct shash_alg sha256 = {
.digestsize = SHA256_DIGEST_SIZE, .digestsize = SHA256_DIGEST_SIZE,
.init = sha256_init, .init = sha256_init,
.update = sha256_update, .update = sha256_update,
.final = sha256_final, .final = sha256_final,
.descsize = sizeof(struct sha256_ctx), .export = sha256_export,
.import = sha256_import,
.descsize = sizeof(struct sha256_state),
.statesize = sizeof(struct sha256_state),
.base = { .base = {
.cra_name = "sha256", .cra_name = "sha256",
.cra_driver_name= "sha256-generic", .cra_driver_name= "sha256-generic",
@ -351,7 +359,7 @@ static struct shash_alg sha224 = {
.init = sha224_init, .init = sha224_init,
.update = sha256_update, .update = sha256_update,
.final = sha224_final, .final = sha224_final,
.descsize = sizeof(struct sha256_ctx), .descsize = sizeof(struct sha256_state),
.base = { .base = {
.cra_name = "sha224", .cra_name = "sha224",
.cra_driver_name= "sha224-generic", .cra_driver_name= "sha224-generic",

View File

@ -21,12 +21,6 @@
#include <linux/percpu.h> #include <linux/percpu.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
struct sha512_ctx {
u64 state[8];
u32 count[4];
u8 buf[128];
};
static DEFINE_PER_CPU(u64[80], msg_schedule); static DEFINE_PER_CPU(u64[80], msg_schedule);
static inline u64 Ch(u64 x, u64 y, u64 z) static inline u64 Ch(u64 x, u64 y, u64 z)
@ -141,7 +135,7 @@ sha512_transform(u64 *state, const u8 *input)
static int static int
sha512_init(struct shash_desc *desc) sha512_init(struct shash_desc *desc)
{ {
struct sha512_ctx *sctx = shash_desc_ctx(desc); struct sha512_state *sctx = shash_desc_ctx(desc);
sctx->state[0] = SHA512_H0; sctx->state[0] = SHA512_H0;
sctx->state[1] = SHA512_H1; sctx->state[1] = SHA512_H1;
sctx->state[2] = SHA512_H2; sctx->state[2] = SHA512_H2;
@ -150,7 +144,7 @@ sha512_init(struct shash_desc *desc)
sctx->state[5] = SHA512_H5; sctx->state[5] = SHA512_H5;
sctx->state[6] = SHA512_H6; sctx->state[6] = SHA512_H6;
sctx->state[7] = SHA512_H7; sctx->state[7] = SHA512_H7;
sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0; sctx->count[0] = sctx->count[1] = 0;
return 0; return 0;
} }
@ -158,7 +152,7 @@ sha512_init(struct shash_desc *desc)
static int static int
sha384_init(struct shash_desc *desc) sha384_init(struct shash_desc *desc)
{ {
struct sha512_ctx *sctx = shash_desc_ctx(desc); struct sha512_state *sctx = shash_desc_ctx(desc);
sctx->state[0] = SHA384_H0; sctx->state[0] = SHA384_H0;
sctx->state[1] = SHA384_H1; sctx->state[1] = SHA384_H1;
sctx->state[2] = SHA384_H2; sctx->state[2] = SHA384_H2;
@ -167,7 +161,7 @@ sha384_init(struct shash_desc *desc)
sctx->state[5] = SHA384_H5; sctx->state[5] = SHA384_H5;
sctx->state[6] = SHA384_H6; sctx->state[6] = SHA384_H6;
sctx->state[7] = SHA384_H7; sctx->state[7] = SHA384_H7;
sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0; sctx->count[0] = sctx->count[1] = 0;
return 0; return 0;
} }
@ -175,20 +169,16 @@ sha384_init(struct shash_desc *desc)
static int static int
sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len) sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len)
{ {
struct sha512_ctx *sctx = shash_desc_ctx(desc); struct sha512_state *sctx = shash_desc_ctx(desc);
unsigned int i, index, part_len; unsigned int i, index, part_len;
/* Compute number of bytes mod 128 */ /* Compute number of bytes mod 128 */
index = (unsigned int)((sctx->count[0] >> 3) & 0x7F); index = sctx->count[0] & 0x7f;
/* Update number of bits */ /* Update number of bytes */
if ((sctx->count[0] += (len << 3)) < (len << 3)) { if (!(sctx->count[0] += len))
if ((sctx->count[1] += 1) < 1) sctx->count[1]++;
if ((sctx->count[2] += 1) < 1)
sctx->count[3]++;
sctx->count[1] += (len >> 29);
}
part_len = 128 - index; part_len = 128 - index;
@ -214,21 +204,19 @@ sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len)
static int static int
sha512_final(struct shash_desc *desc, u8 *hash) sha512_final(struct shash_desc *desc, u8 *hash)
{ {
struct sha512_ctx *sctx = shash_desc_ctx(desc); struct sha512_state *sctx = shash_desc_ctx(desc);
static u8 padding[128] = { 0x80, }; static u8 padding[128] = { 0x80, };
__be64 *dst = (__be64 *)hash; __be64 *dst = (__be64 *)hash;
__be32 bits[4]; __be64 bits[2];
unsigned int index, pad_len; unsigned int index, pad_len;
int i; int i;
/* Save number of bits */ /* Save number of bits */
bits[3] = cpu_to_be32(sctx->count[0]); bits[1] = cpu_to_be64(sctx->count[0] << 3);
bits[2] = cpu_to_be32(sctx->count[1]); bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
bits[1] = cpu_to_be32(sctx->count[2]);
bits[0] = cpu_to_be32(sctx->count[3]);
/* Pad out to 112 mod 128. */ /* Pad out to 112 mod 128. */
index = (sctx->count[0] >> 3) & 0x7f; index = sctx->count[0] & 0x7f;
pad_len = (index < 112) ? (112 - index) : ((128+112) - index); pad_len = (index < 112) ? (112 - index) : ((128+112) - index);
sha512_update(desc, padding, pad_len); sha512_update(desc, padding, pad_len);
@ -240,7 +228,7 @@ sha512_final(struct shash_desc *desc, u8 *hash)
dst[i] = cpu_to_be64(sctx->state[i]); dst[i] = cpu_to_be64(sctx->state[i]);
/* Zeroize sensitive information. */ /* Zeroize sensitive information. */
memset(sctx, 0, sizeof(struct sha512_ctx)); memset(sctx, 0, sizeof(struct sha512_state));
return 0; return 0;
} }
@ -262,7 +250,7 @@ static struct shash_alg sha512 = {
.init = sha512_init, .init = sha512_init,
.update = sha512_update, .update = sha512_update,
.final = sha512_final, .final = sha512_final,
.descsize = sizeof(struct sha512_ctx), .descsize = sizeof(struct sha512_state),
.base = { .base = {
.cra_name = "sha512", .cra_name = "sha512",
.cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_flags = CRYPTO_ALG_TYPE_SHASH,
@ -276,7 +264,7 @@ static struct shash_alg sha384 = {
.init = sha384_init, .init = sha384_init,
.update = sha512_update, .update = sha512_update,
.final = sha384_final, .final = sha384_final,
.descsize = sizeof(struct sha512_ctx), .descsize = sizeof(struct sha512_state),
.base = { .base = {
.cra_name = "sha384", .cra_name = "sha384",
.cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_flags = CRYPTO_ALG_TYPE_SHASH,

View File

@ -22,6 +22,12 @@
static const struct crypto_type crypto_shash_type; static const struct crypto_type crypto_shash_type;
static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{
return -ENOSYS;
}
static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen) unsigned int keylen)
{ {
@ -39,8 +45,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(alignbuffer, key, keylen); memcpy(alignbuffer, key, keylen);
err = shash->setkey(tfm, alignbuffer, keylen); err = shash->setkey(tfm, alignbuffer, keylen);
memset(alignbuffer, 0, keylen); kzfree(buffer);
kfree(buffer);
return err; return err;
} }
@ -50,9 +55,6 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
struct shash_alg *shash = crypto_shash_alg(tfm); struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm); unsigned long alignmask = crypto_shash_alignmask(tfm);
if (!shash->setkey)
return -ENOSYS;
if ((unsigned long)key & alignmask) if ((unsigned long)key & alignmask)
return shash_setkey_unaligned(tfm, key, keylen); return shash_setkey_unaligned(tfm, key, keylen);
@ -74,15 +76,19 @@ static int shash_update_unaligned(struct shash_desc *desc, const u8 *data,
unsigned long alignmask = crypto_shash_alignmask(tfm); unsigned long alignmask = crypto_shash_alignmask(tfm);
unsigned int unaligned_len = alignmask + 1 - unsigned int unaligned_len = alignmask + 1 -
((unsigned long)data & alignmask); ((unsigned long)data & alignmask);
u8 buf[shash_align_buffer_size(unaligned_len, alignmask)] u8 ubuf[shash_align_buffer_size(unaligned_len, alignmask)]
__attribute__ ((aligned)); __attribute__ ((aligned));
u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1);
int err;
if (unaligned_len > len) if (unaligned_len > len)
unaligned_len = len; unaligned_len = len;
memcpy(buf, data, unaligned_len); memcpy(buf, data, unaligned_len);
err = shash->update(desc, buf, unaligned_len);
memset(buf, 0, unaligned_len);
return shash->update(desc, buf, unaligned_len) ?: return err ?:
shash->update(desc, data + unaligned_len, len - unaligned_len); shash->update(desc, data + unaligned_len, len - unaligned_len);
} }
@ -106,12 +112,19 @@ static int shash_final_unaligned(struct shash_desc *desc, u8 *out)
unsigned long alignmask = crypto_shash_alignmask(tfm); unsigned long alignmask = crypto_shash_alignmask(tfm);
struct shash_alg *shash = crypto_shash_alg(tfm); struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned int ds = crypto_shash_digestsize(tfm); unsigned int ds = crypto_shash_digestsize(tfm);
u8 buf[shash_align_buffer_size(ds, alignmask)] u8 ubuf[shash_align_buffer_size(ds, alignmask)]
__attribute__ ((aligned)); __attribute__ ((aligned));
u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1);
int err; int err;
err = shash->final(desc, buf); err = shash->final(desc, buf);
if (err)
goto out;
memcpy(out, buf, ds); memcpy(out, buf, ds);
out:
memset(buf, 0, ds);
return err; return err;
} }
@ -142,8 +155,7 @@ int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
struct shash_alg *shash = crypto_shash_alg(tfm); struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm); unsigned long alignmask = crypto_shash_alignmask(tfm);
if (((unsigned long)data | (unsigned long)out) & alignmask || if (((unsigned long)data | (unsigned long)out) & alignmask)
!shash->finup)
return shash_finup_unaligned(desc, data, len, out); return shash_finup_unaligned(desc, data, len, out);
return shash->finup(desc, data, len, out); return shash->finup(desc, data, len, out);
@ -154,8 +166,7 @@ static int shash_digest_unaligned(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out) unsigned int len, u8 *out)
{ {
return crypto_shash_init(desc) ?: return crypto_shash_init(desc) ?:
crypto_shash_update(desc, data, len) ?: crypto_shash_finup(desc, data, len, out);
crypto_shash_final(desc, out);
} }
int crypto_shash_digest(struct shash_desc *desc, const u8 *data, int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
@ -165,27 +176,24 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
struct shash_alg *shash = crypto_shash_alg(tfm); struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm); unsigned long alignmask = crypto_shash_alignmask(tfm);
if (((unsigned long)data | (unsigned long)out) & alignmask || if (((unsigned long)data | (unsigned long)out) & alignmask)
!shash->digest)
return shash_digest_unaligned(desc, data, len, out); return shash_digest_unaligned(desc, data, len, out);
return shash->digest(desc, data, len, out); return shash->digest(desc, data, len, out);
} }
EXPORT_SYMBOL_GPL(crypto_shash_digest); EXPORT_SYMBOL_GPL(crypto_shash_digest);
int crypto_shash_import(struct shash_desc *desc, const u8 *in) static int shash_default_export(struct shash_desc *desc, void *out)
{ {
struct crypto_shash *tfm = desc->tfm; memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(desc->tfm));
struct shash_alg *alg = crypto_shash_alg(tfm); return 0;
}
memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(tfm));
static int shash_default_import(struct shash_desc *desc, const void *in)
if (alg->reinit) {
alg->reinit(desc); memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(desc->tfm));
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(crypto_shash_import);
static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key, static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen) unsigned int keylen)
@ -206,9 +214,8 @@ static int shash_async_init(struct ahash_request *req)
return crypto_shash_init(desc); return crypto_shash_init(desc);
} }
static int shash_async_update(struct ahash_request *req) int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc)
{ {
struct shash_desc *desc = ahash_request_ctx(req);
struct crypto_hash_walk walk; struct crypto_hash_walk walk;
int nbytes; int nbytes;
@ -218,13 +225,51 @@ static int shash_async_update(struct ahash_request *req)
return nbytes; return nbytes;
} }
EXPORT_SYMBOL_GPL(shash_ahash_update);
static int shash_async_update(struct ahash_request *req)
{
return shash_ahash_update(req, ahash_request_ctx(req));
}
static int shash_async_final(struct ahash_request *req) static int shash_async_final(struct ahash_request *req)
{ {
return crypto_shash_final(ahash_request_ctx(req), req->result); return crypto_shash_final(ahash_request_ctx(req), req->result);
} }
static int shash_async_digest(struct ahash_request *req) int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc)
{
struct crypto_hash_walk walk;
int nbytes;
nbytes = crypto_hash_walk_first(req, &walk);
if (!nbytes)
return crypto_shash_final(desc, req->result);
do {
nbytes = crypto_hash_walk_last(&walk) ?
crypto_shash_finup(desc, walk.data, nbytes,
req->result) :
crypto_shash_update(desc, walk.data, nbytes);
nbytes = crypto_hash_walk_done(&walk, nbytes);
} while (nbytes > 0);
return nbytes;
}
EXPORT_SYMBOL_GPL(shash_ahash_finup);
static int shash_async_finup(struct ahash_request *req)
{
struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct shash_desc *desc = ahash_request_ctx(req);
desc->tfm = *ctx;
desc->flags = req->base.flags;
return shash_ahash_finup(req, desc);
}
int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
{ {
struct scatterlist *sg = req->src; struct scatterlist *sg = req->src;
unsigned int offset = sg->offset; unsigned int offset = sg->offset;
@ -232,34 +277,40 @@ static int shash_async_digest(struct ahash_request *req)
int err; int err;
if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) { if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) {
struct crypto_shash **ctx =
crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct shash_desc *desc = ahash_request_ctx(req);
void *data; void *data;
desc->tfm = *ctx;
desc->flags = req->base.flags;
data = crypto_kmap(sg_page(sg), 0); data = crypto_kmap(sg_page(sg), 0);
err = crypto_shash_digest(desc, data + offset, nbytes, err = crypto_shash_digest(desc, data + offset, nbytes,
req->result); req->result);
crypto_kunmap(data, 0); crypto_kunmap(data, 0);
crypto_yield(desc->flags); crypto_yield(desc->flags);
goto out; } else
err = crypto_shash_init(desc) ?:
shash_ahash_finup(req, desc);
return err;
}
EXPORT_SYMBOL_GPL(shash_ahash_digest);
static int shash_async_digest(struct ahash_request *req)
{
struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct shash_desc *desc = ahash_request_ctx(req);
desc->tfm = *ctx;
desc->flags = req->base.flags;
return shash_ahash_digest(req, desc);
} }
err = shash_async_init(req); static int shash_async_export(struct ahash_request *req, void *out)
if (err) {
goto out; return crypto_shash_export(ahash_request_ctx(req), out);
}
err = shash_async_update(req); static int shash_async_import(struct ahash_request *req, const void *in)
if (err) {
goto out; return crypto_shash_import(ahash_request_ctx(req), in);
err = shash_async_final(req);
out:
return err;
} }
static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm) static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm)
@ -269,11 +320,11 @@ static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm)
crypto_free_shash(*ctx); crypto_free_shash(*ctx);
} }
static int crypto_init_shash_ops_async(struct crypto_tfm *tfm) int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
{ {
struct crypto_alg *calg = tfm->__crt_alg; struct crypto_alg *calg = tfm->__crt_alg;
struct shash_alg *alg = __crypto_shash_alg(calg); struct shash_alg *alg = __crypto_shash_alg(calg);
struct ahash_tfm *crt = &tfm->crt_ahash; struct crypto_ahash *crt = __crypto_ahash_cast(tfm);
struct crypto_shash **ctx = crypto_tfm_ctx(tfm); struct crypto_shash **ctx = crypto_tfm_ctx(tfm);
struct crypto_shash *shash; struct crypto_shash *shash;
@ -292,10 +343,16 @@ static int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
crt->init = shash_async_init; crt->init = shash_async_init;
crt->update = shash_async_update; crt->update = shash_async_update;
crt->final = shash_async_final; crt->final = shash_async_final;
crt->finup = shash_async_finup;
crt->digest = shash_async_digest; crt->digest = shash_async_digest;
crt->setkey = shash_async_setkey;
crt->digestsize = alg->digestsize; if (alg->setkey)
crt->setkey = shash_async_setkey;
if (alg->export)
crt->export = shash_async_export;
if (alg->import)
crt->import = shash_async_import;
crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash); crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash);
return 0; return 0;
@ -304,14 +361,16 @@ static int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
static int shash_compat_setkey(struct crypto_hash *tfm, const u8 *key, static int shash_compat_setkey(struct crypto_hash *tfm, const u8 *key,
unsigned int keylen) unsigned int keylen)
{ {
struct shash_desc *desc = crypto_hash_ctx(tfm); struct shash_desc **descp = crypto_hash_ctx(tfm);
struct shash_desc *desc = *descp;
return crypto_shash_setkey(desc->tfm, key, keylen); return crypto_shash_setkey(desc->tfm, key, keylen);
} }
static int shash_compat_init(struct hash_desc *hdesc) static int shash_compat_init(struct hash_desc *hdesc)
{ {
struct shash_desc *desc = crypto_hash_ctx(hdesc->tfm); struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
struct shash_desc *desc = *descp;
desc->flags = hdesc->flags; desc->flags = hdesc->flags;
@ -321,7 +380,8 @@ static int shash_compat_init(struct hash_desc *hdesc)
static int shash_compat_update(struct hash_desc *hdesc, struct scatterlist *sg, static int shash_compat_update(struct hash_desc *hdesc, struct scatterlist *sg,
unsigned int len) unsigned int len)
{ {
struct shash_desc *desc = crypto_hash_ctx(hdesc->tfm); struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
struct shash_desc *desc = *descp;
struct crypto_hash_walk walk; struct crypto_hash_walk walk;
int nbytes; int nbytes;
@ -334,7 +394,9 @@ static int shash_compat_update(struct hash_desc *hdesc, struct scatterlist *sg,
static int shash_compat_final(struct hash_desc *hdesc, u8 *out) static int shash_compat_final(struct hash_desc *hdesc, u8 *out)
{ {
return crypto_shash_final(crypto_hash_ctx(hdesc->tfm), out); struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
return crypto_shash_final(*descp, out);
} }
static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg, static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg,
@ -344,7 +406,8 @@ static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg,
int err; int err;
if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) { if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) {
struct shash_desc *desc = crypto_hash_ctx(hdesc->tfm); struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
struct shash_desc *desc = *descp;
void *data; void *data;
desc->flags = hdesc->flags; desc->flags = hdesc->flags;
@ -372,9 +435,11 @@ out:
static void crypto_exit_shash_ops_compat(struct crypto_tfm *tfm) static void crypto_exit_shash_ops_compat(struct crypto_tfm *tfm)
{ {
struct shash_desc *desc= crypto_tfm_ctx(tfm); struct shash_desc **descp = crypto_tfm_ctx(tfm);
struct shash_desc *desc = *descp;
crypto_free_shash(desc->tfm); crypto_free_shash(desc->tfm);
kzfree(desc);
} }
static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm) static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm)
@ -382,8 +447,9 @@ static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm)
struct hash_tfm *crt = &tfm->crt_hash; struct hash_tfm *crt = &tfm->crt_hash;
struct crypto_alg *calg = tfm->__crt_alg; struct crypto_alg *calg = tfm->__crt_alg;
struct shash_alg *alg = __crypto_shash_alg(calg); struct shash_alg *alg = __crypto_shash_alg(calg);
struct shash_desc *desc = crypto_tfm_ctx(tfm); struct shash_desc **descp = crypto_tfm_ctx(tfm);
struct crypto_shash *shash; struct crypto_shash *shash;
struct shash_desc *desc;
if (!crypto_mod_get(calg)) if (!crypto_mod_get(calg))
return -EAGAIN; return -EAGAIN;
@ -394,6 +460,14 @@ static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm)
return PTR_ERR(shash); return PTR_ERR(shash);
} }
desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(shash),
GFP_KERNEL);
if (!desc) {
crypto_free_shash(shash);
return -ENOMEM;
}
*descp = desc;
desc->tfm = shash; desc->tfm = shash;
tfm->exit = crypto_exit_shash_ops_compat; tfm->exit = crypto_exit_shash_ops_compat;
@ -413,8 +487,6 @@ static int crypto_init_shash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
switch (mask & CRYPTO_ALG_TYPE_MASK) { switch (mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_HASH_MASK: case CRYPTO_ALG_TYPE_HASH_MASK:
return crypto_init_shash_ops_compat(tfm); return crypto_init_shash_ops_compat(tfm);
case CRYPTO_ALG_TYPE_AHASH_MASK:
return crypto_init_shash_ops_async(tfm);
} }
return -EINVAL; return -EINVAL;
@ -423,26 +495,23 @@ static int crypto_init_shash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
static unsigned int crypto_shash_ctxsize(struct crypto_alg *alg, u32 type, static unsigned int crypto_shash_ctxsize(struct crypto_alg *alg, u32 type,
u32 mask) u32 mask)
{ {
struct shash_alg *salg = __crypto_shash_alg(alg);
switch (mask & CRYPTO_ALG_TYPE_MASK) { switch (mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_HASH_MASK: case CRYPTO_ALG_TYPE_HASH_MASK:
return sizeof(struct shash_desc) + salg->descsize; return sizeof(struct shash_desc *);
case CRYPTO_ALG_TYPE_AHASH_MASK:
return sizeof(struct crypto_shash *);
} }
return 0; return 0;
} }
static int crypto_shash_init_tfm(struct crypto_tfm *tfm, static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
const struct crypto_type *frontend)
{ {
struct crypto_shash *hash = __crypto_shash_cast(tfm);
hash->descsize = crypto_shash_alg(hash)->descsize;
return 0; return 0;
} }
static unsigned int crypto_shash_extsize(struct crypto_alg *alg, static unsigned int crypto_shash_extsize(struct crypto_alg *alg)
const struct crypto_type *frontend)
{ {
return alg->cra_ctxsize; return alg->cra_ctxsize;
} }
@ -456,7 +525,6 @@ static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
seq_printf(m, "type : shash\n"); seq_printf(m, "type : shash\n");
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "digestsize : %u\n", salg->digestsize); seq_printf(m, "digestsize : %u\n", salg->digestsize);
seq_printf(m, "descsize : %u\n", salg->descsize);
} }
static const struct crypto_type crypto_shash_type = { static const struct crypto_type crypto_shash_type = {
@ -480,18 +548,43 @@ struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
} }
EXPORT_SYMBOL_GPL(crypto_alloc_shash); EXPORT_SYMBOL_GPL(crypto_alloc_shash);
int crypto_register_shash(struct shash_alg *alg) static int shash_prepare_alg(struct shash_alg *alg)
{ {
struct crypto_alg *base = &alg->base; struct crypto_alg *base = &alg->base;
if (alg->digestsize > PAGE_SIZE / 8 || if (alg->digestsize > PAGE_SIZE / 8 ||
alg->descsize > PAGE_SIZE / 8) alg->descsize > PAGE_SIZE / 8 ||
alg->statesize > PAGE_SIZE / 8)
return -EINVAL; return -EINVAL;
base->cra_type = &crypto_shash_type; base->cra_type = &crypto_shash_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_SHASH; base->cra_flags |= CRYPTO_ALG_TYPE_SHASH;
if (!alg->finup)
alg->finup = shash_finup_unaligned;
if (!alg->digest)
alg->digest = shash_digest_unaligned;
if (!alg->export) {
alg->export = shash_default_export;
alg->import = shash_default_import;
alg->statesize = alg->descsize;
}
if (!alg->setkey)
alg->setkey = shash_no_setkey;
return 0;
}
int crypto_register_shash(struct shash_alg *alg)
{
struct crypto_alg *base = &alg->base;
int err;
err = shash_prepare_alg(alg);
if (err)
return err;
return crypto_register_alg(base); return crypto_register_alg(base);
} }
EXPORT_SYMBOL_GPL(crypto_register_shash); EXPORT_SYMBOL_GPL(crypto_register_shash);
@ -502,5 +595,44 @@ int crypto_unregister_shash(struct shash_alg *alg)
} }
EXPORT_SYMBOL_GPL(crypto_unregister_shash); EXPORT_SYMBOL_GPL(crypto_unregister_shash);
int shash_register_instance(struct crypto_template *tmpl,
struct shash_instance *inst)
{
int err;
err = shash_prepare_alg(&inst->alg);
if (err)
return err;
return crypto_register_instance(tmpl, shash_crypto_instance(inst));
}
EXPORT_SYMBOL_GPL(shash_register_instance);
void shash_free_instance(struct crypto_instance *inst)
{
crypto_drop_spawn(crypto_instance_ctx(inst));
kfree(shash_instance(inst));
}
EXPORT_SYMBOL_GPL(shash_free_instance);
int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn,
struct shash_alg *alg,
struct crypto_instance *inst)
{
return crypto_init_spawn2(&spawn->base, &alg->base, inst,
&crypto_shash_type);
}
EXPORT_SYMBOL_GPL(crypto_init_shash_spawn);
struct shash_alg *shash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
{
struct crypto_alg *alg;
alg = crypto_attr_alg2(rta, &crypto_shash_type, type, mask);
return IS_ERR(alg) ? ERR_CAST(alg) :
container_of(alg, struct shash_alg, base);
}
EXPORT_SYMBOL_GPL(shash_attr_alg);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Synchronous cryptographic hash type"); MODULE_DESCRIPTION("Synchronous cryptographic hash type");

View File

@ -45,6 +45,9 @@
*/ */
static unsigned int sec; static unsigned int sec;
static char *alg = NULL;
static u32 type;
static u32 mask;
static int mode; static int mode;
static char *tvmem[TVMEMSIZE]; static char *tvmem[TVMEMSIZE];
@ -716,6 +719,10 @@ static int do_test(int m)
ret += tcrypt_test("hmac(rmd160)"); ret += tcrypt_test("hmac(rmd160)");
break; break;
case 109:
ret += tcrypt_test("vmac(aes)");
break;
case 150: case 150:
ret += tcrypt_test("ansi_cprng"); ret += tcrypt_test("ansi_cprng");
break; break;
@ -885,6 +892,12 @@ static int do_test(int m)
return ret; return ret;
} }
static int do_alg_test(const char *alg, u32 type, u32 mask)
{
return crypto_has_alg(alg, type, mask ?: CRYPTO_ALG_TYPE_MASK) ?
0 : -ENOENT;
}
static int __init tcrypt_mod_init(void) static int __init tcrypt_mod_init(void)
{ {
int err = -ENOMEM; int err = -ENOMEM;
@ -896,7 +909,11 @@ static int __init tcrypt_mod_init(void)
goto err_free_tv; goto err_free_tv;
} }
if (alg)
err = do_alg_test(alg, type, mask);
else
err = do_test(mode); err = do_test(mode);
if (err) { if (err) {
printk(KERN_ERR "tcrypt: one or more tests failed!\n"); printk(KERN_ERR "tcrypt: one or more tests failed!\n");
goto err_free_tv; goto err_free_tv;
@ -928,6 +945,9 @@ static void __exit tcrypt_mod_fini(void) { }
module_init(tcrypt_mod_init); module_init(tcrypt_mod_init);
module_exit(tcrypt_mod_fini); module_exit(tcrypt_mod_fini);
module_param(alg, charp, 0);
module_param(type, uint, 0);
module_param(mask, uint, 0);
module_param(mode, int, 0); module_param(mode, int, 0);
module_param(sec, uint, 0); module_param(sec, uint, 0);
MODULE_PARM_DESC(sec, "Length in seconds of speed tests " MODULE_PARM_DESC(sec, "Length in seconds of speed tests "

View File

@ -190,10 +190,6 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
hash_buff = xbuf[0]; hash_buff = xbuf[0];
ret = -EINVAL;
if (WARN_ON(template[i].psize > PAGE_SIZE))
goto out;
memcpy(hash_buff, template[i].plaintext, template[i].psize); memcpy(hash_buff, template[i].plaintext, template[i].psize);
sg_init_one(&sg[0], hash_buff, template[i].psize); sg_init_one(&sg[0], hash_buff, template[i].psize);
@ -2251,6 +2247,15 @@ static const struct alg_test_desc alg_test_descs[] = {
.count = TGR192_TEST_VECTORS .count = TGR192_TEST_VECTORS
} }
} }
}, {
.alg = "vmac(aes)",
.test = alg_test_hash,
.suite = {
.hash = {
.vecs = aes_vmac128_tv_template,
.count = VMAC_AES_TEST_VECTORS
}
}
}, { }, {
.alg = "wp256", .alg = "wp256",
.test = alg_test_hash, .test = alg_test_hash,
@ -2348,6 +2353,7 @@ static int alg_find_test(const char *alg)
int alg_test(const char *driver, const char *alg, u32 type, u32 mask) int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
{ {
int i; int i;
int j;
int rc; int rc;
if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_CIPHER) { if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_CIPHER) {
@ -2369,14 +2375,22 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
} }
i = alg_find_test(alg); i = alg_find_test(alg);
if (i < 0) j = alg_find_test(driver);
if (i < 0 && j < 0)
goto notest; goto notest;
if (fips_enabled && !alg_test_descs[i].fips_allowed) if (fips_enabled && ((i >= 0 && !alg_test_descs[i].fips_allowed) ||
(j >= 0 && !alg_test_descs[j].fips_allowed)))
goto non_fips_alg; goto non_fips_alg;
rc = alg_test_descs[i].test(alg_test_descs + i, driver, rc = 0;
if (i >= 0)
rc |= alg_test_descs[i].test(alg_test_descs + i, driver,
type, mask); type, mask);
if (j >= 0)
rc |= alg_test_descs[j].test(alg_test_descs + j, driver,
type, mask);
test_done: test_done:
if (fips_enabled && rc) if (fips_enabled && rc)
panic("%s: %s alg self test failed in fips mode!\n", driver, alg); panic("%s: %s alg self test failed in fips mode!\n", driver, alg);

View File

@ -1654,6 +1654,22 @@ static struct hash_testvec aes_xcbc128_tv_template[] = {
} }
}; };
#define VMAC_AES_TEST_VECTORS 1
static char vmac_string[128] = {'\x01', '\x01', '\x01', '\x01',
'\x02', '\x03', '\x02', '\x02',
'\x02', '\x04', '\x01', '\x07',
'\x04', '\x01', '\x04', '\x03',};
static struct hash_testvec aes_vmac128_tv_template[] = {
{
.key = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
.plaintext = vmac_string,
.digest = "\xcb\xd7\x8a\xfd\xb7\x33\x79\xe7",
.psize = 128,
.ksize = 16,
},
};
/* /*
* SHA384 HMAC test vectors from RFC4231 * SHA384 HMAC test vectors from RFC4231
*/ */

678
crypto/vmac.c Normal file
View File

@ -0,0 +1,678 @@
/*
* Modified to interface to the Linux kernel
* Copyright (c) 2009, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*/
/* --------------------------------------------------------------------------
* VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
* This implementation is herby placed in the public domain.
* The authors offers no warranty. Use at your own risk.
* Please send bug reports to the authors.
* Last modified: 17 APR 08, 1700 PDT
* ----------------------------------------------------------------------- */
#include <linux/init.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/scatterlist.h>
#include <asm/byteorder.h>
#include <crypto/scatterwalk.h>
#include <crypto/vmac.h>
#include <crypto/internal/hash.h>
/*
* Constants and masks
*/
#define UINT64_C(x) x##ULL
const u64 p64 = UINT64_C(0xfffffffffffffeff); /* 2^64 - 257 prime */
const u64 m62 = UINT64_C(0x3fffffffffffffff); /* 62-bit mask */
const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */
const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */
const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
#ifdef __LITTLE_ENDIAN
#define INDEX_HIGH 1
#define INDEX_LOW 0
#else
#define INDEX_HIGH 0
#define INDEX_LOW 1
#endif
/*
* The following routines are used in this implementation. They are
* written via macros to simulate zero-overhead call-by-reference.
*
* MUL64: 64x64->128-bit multiplication
* PMUL64: assumes top bits cleared on inputs
* ADD128: 128x128->128-bit addition
*/
#define ADD128(rh, rl, ih, il) \
do { \
u64 _il = (il); \
(rl) += (_il); \
if ((rl) < (_il)) \
(rh)++; \
(rh) += (ih); \
} while (0)
#define MUL32(i1, i2) ((u64)(u32)(i1)*(u32)(i2))
#define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \
do { \
u64 _i1 = (i1), _i2 = (i2); \
u64 m = MUL32(_i1, _i2>>32) + MUL32(_i1>>32, _i2); \
rh = MUL32(_i1>>32, _i2>>32); \
rl = MUL32(_i1, _i2); \
ADD128(rh, rl, (m >> 32), (m << 32)); \
} while (0)
#define MUL64(rh, rl, i1, i2) \
do { \
u64 _i1 = (i1), _i2 = (i2); \
u64 m1 = MUL32(_i1, _i2>>32); \
u64 m2 = MUL32(_i1>>32, _i2); \
rh = MUL32(_i1>>32, _i2>>32); \
rl = MUL32(_i1, _i2); \
ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \
ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \
} while (0)
/*
* For highest performance the L1 NH and L2 polynomial hashes should be
* carefully implemented to take advantage of one's target architechture.
* Here these two hash functions are defined multiple time; once for
* 64-bit architectures, once for 32-bit SSE2 architectures, and once
* for the rest (32-bit) architectures.
* For each, nh_16 *must* be defined (works on multiples of 16 bytes).
* Optionally, nh_vmac_nhbytes can be defined (for multiples of
* VMAC_NHBYTES), and nh_16_2 and nh_vmac_nhbytes_2 (versions that do two
* NH computations at once).
*/
#ifdef CONFIG_64BIT
#define nh_16(mp, kp, nw, rh, rl) \
do { \
int i; u64 th, tl; \
rh = rl = 0; \
for (i = 0; i < nw; i += 2) { \
MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
ADD128(rh, rl, th, tl); \
} \
} while (0)
#define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1) \
do { \
int i; u64 th, tl; \
rh1 = rl1 = rh = rl = 0; \
for (i = 0; i < nw; i += 2) { \
MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \
le64_to_cpup((mp)+i+1)+(kp)[i+3]); \
ADD128(rh1, rl1, th, tl); \
} \
} while (0)
#if (VMAC_NHBYTES >= 64) /* These versions do 64-bytes of message at a time */
#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
do { \
int i; u64 th, tl; \
rh = rl = 0; \
for (i = 0; i < nw; i += 8) { \
MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \
le64_to_cpup((mp)+i+3)+(kp)[i+3]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \
le64_to_cpup((mp)+i+5)+(kp)[i+5]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \
le64_to_cpup((mp)+i+7)+(kp)[i+7]); \
ADD128(rh, rl, th, tl); \
} \
} while (0)
#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1) \
do { \
int i; u64 th, tl; \
rh1 = rl1 = rh = rl = 0; \
for (i = 0; i < nw; i += 8) { \
MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \
le64_to_cpup((mp)+i+1)+(kp)[i+3]); \
ADD128(rh1, rl1, th, tl); \
MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \
le64_to_cpup((mp)+i+3)+(kp)[i+3]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+4], \
le64_to_cpup((mp)+i+3)+(kp)[i+5]); \
ADD128(rh1, rl1, th, tl); \
MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \
le64_to_cpup((mp)+i+5)+(kp)[i+5]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+6], \
le64_to_cpup((mp)+i+5)+(kp)[i+7]); \
ADD128(rh1, rl1, th, tl); \
MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \
le64_to_cpup((mp)+i+7)+(kp)[i+7]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+8], \
le64_to_cpup((mp)+i+7)+(kp)[i+9]); \
ADD128(rh1, rl1, th, tl); \
} \
} while (0)
#endif
#define poly_step(ah, al, kh, kl, mh, ml) \
do { \
u64 t1h, t1l, t2h, t2l, t3h, t3l, z = 0; \
/* compute ab*cd, put bd into result registers */ \
PMUL64(t3h, t3l, al, kh); \
PMUL64(t2h, t2l, ah, kl); \
PMUL64(t1h, t1l, ah, 2*kh); \
PMUL64(ah, al, al, kl); \
/* add 2 * ac to result */ \
ADD128(ah, al, t1h, t1l); \
/* add together ad + bc */ \
ADD128(t2h, t2l, t3h, t3l); \
/* now (ah,al), (t2l,2*t2h) need summing */ \
/* first add the high registers, carrying into t2h */ \
ADD128(t2h, ah, z, t2l); \
/* double t2h and add top bit of ah */ \
t2h = 2 * t2h + (ah >> 63); \
ah &= m63; \
/* now add the low registers */ \
ADD128(ah, al, mh, ml); \
ADD128(ah, al, z, t2h); \
} while (0)
#else /* ! CONFIG_64BIT */
#ifndef nh_16
#define nh_16(mp, kp, nw, rh, rl) \
do { \
u64 t1, t2, m1, m2, t; \
int i; \
rh = rl = t = 0; \
for (i = 0; i < nw; i += 2) { \
t1 = le64_to_cpup(mp+i) + kp[i]; \
t2 = le64_to_cpup(mp+i+1) + kp[i+1]; \
m2 = MUL32(t1 >> 32, t2); \
m1 = MUL32(t1, t2 >> 32); \
ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \
MUL32(t1, t2)); \
rh += (u64)(u32)(m1 >> 32) \
+ (u32)(m2 >> 32); \
t += (u64)(u32)m1 + (u32)m2; \
} \
ADD128(rh, rl, (t >> 32), (t << 32)); \
} while (0)
#endif
static void poly_step_func(u64 *ahi, u64 *alo,
const u64 *kh, const u64 *kl,
const u64 *mh, const u64 *ml)
{
#define a0 (*(((u32 *)alo)+INDEX_LOW))
#define a1 (*(((u32 *)alo)+INDEX_HIGH))
#define a2 (*(((u32 *)ahi)+INDEX_LOW))
#define a3 (*(((u32 *)ahi)+INDEX_HIGH))
#define k0 (*(((u32 *)kl)+INDEX_LOW))
#define k1 (*(((u32 *)kl)+INDEX_HIGH))
#define k2 (*(((u32 *)kh)+INDEX_LOW))
#define k3 (*(((u32 *)kh)+INDEX_HIGH))
u64 p, q, t;
u32 t2;
p = MUL32(a3, k3);
p += p;
p += *(u64 *)mh;
p += MUL32(a0, k2);
p += MUL32(a1, k1);
p += MUL32(a2, k0);
t = (u32)(p);
p >>= 32;
p += MUL32(a0, k3);
p += MUL32(a1, k2);
p += MUL32(a2, k1);
p += MUL32(a3, k0);
t |= ((u64)((u32)p & 0x7fffffff)) << 32;
p >>= 31;
p += (u64)(((u32 *)ml)[INDEX_LOW]);
p += MUL32(a0, k0);
q = MUL32(a1, k3);
q += MUL32(a2, k2);
q += MUL32(a3, k1);
q += q;
p += q;
t2 = (u32)(p);
p >>= 32;
p += (u64)(((u32 *)ml)[INDEX_HIGH]);
p += MUL32(a0, k1);
p += MUL32(a1, k0);
q = MUL32(a2, k3);
q += MUL32(a3, k2);
q += q;
p += q;
*(u64 *)(alo) = (p << 32) | t2;
p >>= 32;
*(u64 *)(ahi) = p + t;
#undef a0
#undef a1
#undef a2
#undef a3
#undef k0
#undef k1
#undef k2
#undef k3
}
#define poly_step(ah, al, kh, kl, mh, ml) \
poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml))
#endif /* end of specialized NH and poly definitions */
/* At least nh_16 is defined. Defined others as needed here */
#ifndef nh_16_2
#define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2) \
do { \
nh_16(mp, kp, nw, rh, rl); \
nh_16(mp, ((kp)+2), nw, rh2, rl2); \
} while (0)
#endif
#ifndef nh_vmac_nhbytes
#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
nh_16(mp, kp, nw, rh, rl)
#endif
#ifndef nh_vmac_nhbytes_2
#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2) \
do { \
nh_vmac_nhbytes(mp, kp, nw, rh, rl); \
nh_vmac_nhbytes(mp, ((kp)+2), nw, rh2, rl2); \
} while (0)
#endif
static void vhash_abort(struct vmac_ctx *ctx)
{
ctx->polytmp[0] = ctx->polykey[0] ;
ctx->polytmp[1] = ctx->polykey[1] ;
ctx->first_block_processed = 0;
}
static u64 l3hash(u64 p1, u64 p2,
u64 k1, u64 k2, u64 len)
{
u64 rh, rl, t, z = 0;
/* fully reduce (p1,p2)+(len,0) mod p127 */
t = p1 >> 63;
p1 &= m63;
ADD128(p1, p2, len, t);
/* At this point, (p1,p2) is at most 2^127+(len<<64) */
t = (p1 > m63) + ((p1 == m63) && (p2 == m64));
ADD128(p1, p2, z, t);
p1 &= m63;
/* compute (p1,p2)/(2^64-2^32) and (p1,p2)%(2^64-2^32) */
t = p1 + (p2 >> 32);
t += (t >> 32);
t += (u32)t > 0xfffffffeu;
p1 += (t >> 32);
p2 += (p1 << 32);
/* compute (p1+k1)%p64 and (p2+k2)%p64 */
p1 += k1;
p1 += (0 - (p1 < k1)) & 257;
p2 += k2;
p2 += (0 - (p2 < k2)) & 257;
/* compute (p1+k1)*(p2+k2)%p64 */
MUL64(rh, rl, p1, p2);
t = rh >> 56;
ADD128(t, rl, z, rh);
rh <<= 8;
ADD128(t, rl, z, rh);
t += t << 8;
rl += t;
rl += (0 - (rl < t)) & 257;
rl += (0 - (rl > p64-1)) & 257;
return rl;
}
static void vhash_update(const unsigned char *m,
unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */
struct vmac_ctx *ctx)
{
u64 rh, rl, *mptr;
const u64 *kptr = (u64 *)ctx->nhkey;
int i;
u64 ch, cl;
u64 pkh = ctx->polykey[0];
u64 pkl = ctx->polykey[1];
mptr = (u64 *)m;
i = mbytes / VMAC_NHBYTES; /* Must be non-zero */
ch = ctx->polytmp[0];
cl = ctx->polytmp[1];
if (!ctx->first_block_processed) {
ctx->first_block_processed = 1;
nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
rh &= m62;
ADD128(ch, cl, rh, rl);
mptr += (VMAC_NHBYTES/sizeof(u64));
i--;
}
while (i--) {
nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
rh &= m62;
poly_step(ch, cl, pkh, pkl, rh, rl);
mptr += (VMAC_NHBYTES/sizeof(u64));
}
ctx->polytmp[0] = ch;
ctx->polytmp[1] = cl;
}
static u64 vhash(unsigned char m[], unsigned int mbytes,
u64 *tagl, struct vmac_ctx *ctx)
{
u64 rh, rl, *mptr;
const u64 *kptr = (u64 *)ctx->nhkey;
int i, remaining;
u64 ch, cl;
u64 pkh = ctx->polykey[0];
u64 pkl = ctx->polykey[1];
mptr = (u64 *)m;
i = mbytes / VMAC_NHBYTES;
remaining = mbytes % VMAC_NHBYTES;
if (ctx->first_block_processed) {
ch = ctx->polytmp[0];
cl = ctx->polytmp[1];
} else if (i) {
nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl);
ch &= m62;
ADD128(ch, cl, pkh, pkl);
mptr += (VMAC_NHBYTES/sizeof(u64));
i--;
} else if (remaining) {
nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl);
ch &= m62;
ADD128(ch, cl, pkh, pkl);
mptr += (VMAC_NHBYTES/sizeof(u64));
goto do_l3;
} else {/* Empty String */
ch = pkh; cl = pkl;
goto do_l3;
}
while (i--) {
nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
rh &= m62;
poly_step(ch, cl, pkh, pkl, rh, rl);
mptr += (VMAC_NHBYTES/sizeof(u64));
}
if (remaining) {
nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl);
rh &= m62;
poly_step(ch, cl, pkh, pkl, rh, rl);
}
do_l3:
vhash_abort(ctx);
remaining *= 8;
return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining);
}
static u64 vmac(unsigned char m[], unsigned int mbytes,
unsigned char n[16], u64 *tagl,
struct vmac_ctx_t *ctx)
{
u64 *in_n, *out_p;
u64 p, h;
int i;
in_n = ctx->__vmac_ctx.cached_nonce;
out_p = ctx->__vmac_ctx.cached_aes;
i = n[15] & 1;
if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) {
in_n[0] = *(u64 *)(n);
in_n[1] = *(u64 *)(n+8);
((unsigned char *)in_n)[15] &= 0xFE;
crypto_cipher_encrypt_one(ctx->child,
(unsigned char *)out_p, (unsigned char *)in_n);
((unsigned char *)in_n)[15] |= (unsigned char)(1-i);
}
p = be64_to_cpup(out_p + i);
h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx);
return p + h;
}
static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx)
{
u64 in[2] = {0}, out[2];
unsigned i;
int err = 0;
err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN);
if (err)
return err;
/* Fill nh key */
((unsigned char *)in)[0] = 0x80;
for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) {
crypto_cipher_encrypt_one(ctx->child,
(unsigned char *)out, (unsigned char *)in);
ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out);
ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1);
((unsigned char *)in)[15] += 1;
}
/* Fill poly key */
((unsigned char *)in)[0] = 0xC0;
in[1] = 0;
for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) {
crypto_cipher_encrypt_one(ctx->child,
(unsigned char *)out, (unsigned char *)in);
ctx->__vmac_ctx.polytmp[i] =
ctx->__vmac_ctx.polykey[i] =
be64_to_cpup(out) & mpoly;
ctx->__vmac_ctx.polytmp[i+1] =
ctx->__vmac_ctx.polykey[i+1] =
be64_to_cpup(out+1) & mpoly;
((unsigned char *)in)[15] += 1;
}
/* Fill ip key */
((unsigned char *)in)[0] = 0xE0;
in[1] = 0;
for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) {
do {
crypto_cipher_encrypt_one(ctx->child,
(unsigned char *)out, (unsigned char *)in);
ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out);
ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1);
((unsigned char *)in)[15] += 1;
} while (ctx->__vmac_ctx.l3key[i] >= p64
|| ctx->__vmac_ctx.l3key[i+1] >= p64);
}
/* Invalidate nonce/aes cache and reset other elements */
ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */
ctx->__vmac_ctx.cached_nonce[1] = (u64)0; /* Ensure illegal nonce */
ctx->__vmac_ctx.first_block_processed = 0;
return err;
}
static int vmac_setkey(struct crypto_shash *parent,
const u8 *key, unsigned int keylen)
{
struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
if (keylen != VMAC_KEY_LEN) {
crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
return vmac_set_key((u8 *)key, ctx);
}
static int vmac_init(struct shash_desc *pdesc)
{
struct crypto_shash *parent = pdesc->tfm;
struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
return 0;
}
static int vmac_update(struct shash_desc *pdesc, const u8 *p,
unsigned int len)
{
struct crypto_shash *parent = pdesc->tfm;
struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
vhash_update(p, len, &ctx->__vmac_ctx);
return 0;
}
static int vmac_final(struct shash_desc *pdesc, u8 *out)
{
struct crypto_shash *parent = pdesc->tfm;
struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
vmac_t mac;
u8 nonce[16] = {};
mac = vmac(NULL, 0, nonce, NULL, ctx);
memcpy(out, &mac, sizeof(vmac_t));
memset(&mac, 0, sizeof(vmac_t));
memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
return 0;
}
static int vmac_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_cipher *cipher;
struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
return 0;
}
static void vmac_exit_tfm(struct crypto_tfm *tfm)
{
struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
crypto_free_cipher(ctx->child);
}
static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct shash_instance *inst;
struct crypto_alg *alg;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
if (err)
return err;
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
return PTR_ERR(alg);
inst = shash_alloc_instance("vmac", alg);
err = PTR_ERR(inst);
if (IS_ERR(inst))
goto out_put_alg;
err = crypto_init_spawn(shash_instance_ctx(inst), alg,
shash_crypto_instance(inst),
CRYPTO_ALG_TYPE_MASK);
if (err)
goto out_free_inst;
inst->alg.base.cra_priority = alg->cra_priority;
inst->alg.base.cra_blocksize = alg->cra_blocksize;
inst->alg.base.cra_alignmask = alg->cra_alignmask;
inst->alg.digestsize = sizeof(vmac_t);
inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t);
inst->alg.base.cra_init = vmac_init_tfm;
inst->alg.base.cra_exit = vmac_exit_tfm;
inst->alg.init = vmac_init;
inst->alg.update = vmac_update;
inst->alg.final = vmac_final;
inst->alg.setkey = vmac_setkey;
err = shash_register_instance(tmpl, inst);
if (err) {
out_free_inst:
shash_free_instance(shash_crypto_instance(inst));
}
out_put_alg:
crypto_mod_put(alg);
return err;
}
static struct crypto_template vmac_tmpl = {
.name = "vmac",
.create = vmac_create,
.free = shash_free_instance,
.module = THIS_MODULE,
};
static int __init vmac_module_init(void)
{
return crypto_register_template(&vmac_tmpl);
}
static void __exit vmac_module_exit(void)
{
crypto_unregister_template(&vmac_tmpl);
}
module_init(vmac_module_init);
module_exit(vmac_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("VMAC hash algorithm");

View File

@ -19,211 +19,142 @@
* Kazunori Miyazawa <miyazawa@linux-ipv6.org> * Kazunori Miyazawa <miyazawa@linux-ipv6.org>
*/ */
#include <crypto/scatterwalk.h> #include <crypto/internal/hash.h>
#include <linux/crypto.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/hardirq.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/scatterlist.h>
static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101, static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101,
0x02020202, 0x02020202, 0x02020202, 0x02020202, 0x02020202, 0x02020202, 0x02020202, 0x02020202,
0x03030303, 0x03030303, 0x03030303, 0x03030303}; 0x03030303, 0x03030303, 0x03030303, 0x03030303};
/* /*
* +------------------------ * +------------------------
* | <parent tfm> * | <parent tfm>
* +------------------------ * +------------------------
* | crypto_xcbc_ctx * | xcbc_tfm_ctx
* +------------------------
* | consts (block size * 2)
* +------------------------
*/
struct xcbc_tfm_ctx {
struct crypto_cipher *child;
u8 ctx[];
};
/*
* +------------------------
* | <shash desc>
* +------------------------
* | xcbc_desc_ctx
* +------------------------ * +------------------------
* | odds (block size) * | odds (block size)
* +------------------------ * +------------------------
* | prev (block size) * | prev (block size)
* +------------------------ * +------------------------
* | key (block size)
* +------------------------
* | consts (block size * 3)
* +------------------------
*/ */
struct crypto_xcbc_ctx { struct xcbc_desc_ctx {
struct crypto_cipher *child;
u8 *odds;
u8 *prev;
u8 *key;
u8 *consts;
void (*xor)(u8 *a, const u8 *b, unsigned int bs);
unsigned int keylen;
unsigned int len; unsigned int len;
u8 ctx[];
}; };
static void xor_128(u8 *a, const u8 *b, unsigned int bs) static int crypto_xcbc_digest_setkey(struct crypto_shash *parent,
const u8 *inkey, unsigned int keylen)
{ {
((u32 *)a)[0] ^= ((u32 *)b)[0]; unsigned long alignmask = crypto_shash_alignmask(parent);
((u32 *)a)[1] ^= ((u32 *)b)[1]; struct xcbc_tfm_ctx *ctx = crypto_shash_ctx(parent);
((u32 *)a)[2] ^= ((u32 *)b)[2]; int bs = crypto_shash_blocksize(parent);
((u32 *)a)[3] ^= ((u32 *)b)[3]; u8 *consts = PTR_ALIGN(&ctx->ctx[0], alignmask + 1);
}
static int _crypto_xcbc_digest_setkey(struct crypto_hash *parent,
struct crypto_xcbc_ctx *ctx)
{
int bs = crypto_hash_blocksize(parent);
int err = 0; int err = 0;
u8 key1[bs]; u8 key1[bs];
if ((err = crypto_cipher_setkey(ctx->child, ctx->key, ctx->keylen))) if ((err = crypto_cipher_setkey(ctx->child, inkey, keylen)))
return err; return err;
crypto_cipher_encrypt_one(ctx->child, key1, ctx->consts); crypto_cipher_encrypt_one(ctx->child, consts, (u8 *)ks + bs);
crypto_cipher_encrypt_one(ctx->child, consts + bs, (u8 *)ks + bs * 2);
crypto_cipher_encrypt_one(ctx->child, key1, (u8 *)ks);
return crypto_cipher_setkey(ctx->child, key1, bs); return crypto_cipher_setkey(ctx->child, key1, bs);
} }
static int crypto_xcbc_digest_setkey(struct crypto_hash *parent, static int crypto_xcbc_digest_init(struct shash_desc *pdesc)
const u8 *inkey, unsigned int keylen)
{ {
struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent); unsigned long alignmask = crypto_shash_alignmask(pdesc->tfm);
struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc);
if (keylen != crypto_cipher_blocksize(ctx->child)) int bs = crypto_shash_blocksize(pdesc->tfm);
return -EINVAL; u8 *prev = PTR_ALIGN(&ctx->ctx[0], alignmask + 1) + bs;
ctx->keylen = keylen;
memcpy(ctx->key, inkey, keylen);
ctx->consts = (u8*)ks;
return _crypto_xcbc_digest_setkey(parent, ctx);
}
static int crypto_xcbc_digest_init(struct hash_desc *pdesc)
{
struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(pdesc->tfm);
int bs = crypto_hash_blocksize(pdesc->tfm);
ctx->len = 0; ctx->len = 0;
memset(ctx->odds, 0, bs); memset(prev, 0, bs);
memset(ctx->prev, 0, bs);
return 0; return 0;
} }
static int crypto_xcbc_digest_update2(struct hash_desc *pdesc, static int crypto_xcbc_digest_update(struct shash_desc *pdesc, const u8 *p,
struct scatterlist *sg, unsigned int len)
unsigned int nbytes)
{ {
struct crypto_hash *parent = pdesc->tfm; struct crypto_shash *parent = pdesc->tfm;
struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent); unsigned long alignmask = crypto_shash_alignmask(parent);
struct crypto_cipher *tfm = ctx->child; struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent);
int bs = crypto_hash_blocksize(parent); struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
for (;;) { int bs = crypto_shash_blocksize(parent);
struct page *pg = sg_page(sg); u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1);
unsigned int offset = sg->offset; u8 *prev = odds + bs;
unsigned int slen = sg->length;
if (unlikely(slen > nbytes))
slen = nbytes;
nbytes -= slen;
while (slen > 0) {
unsigned int len = min(slen, ((unsigned int)(PAGE_SIZE)) - offset);
char *p = crypto_kmap(pg, 0) + offset;
/* checking the data can fill the block */ /* checking the data can fill the block */
if ((ctx->len + len) <= bs) { if ((ctx->len + len) <= bs) {
memcpy(ctx->odds + ctx->len, p, len); memcpy(odds + ctx->len, p, len);
ctx->len += len; ctx->len += len;
slen -= len; return 0;
/* checking the rest of the page */
if (len + offset >= PAGE_SIZE) {
offset = 0;
pg++;
} else
offset += len;
crypto_kunmap(p, 0);
crypto_yield(pdesc->flags);
continue;
} }
/* filling odds with new data and encrypting it */ /* filling odds with new data and encrypting it */
memcpy(ctx->odds + ctx->len, p, bs - ctx->len); memcpy(odds + ctx->len, p, bs - ctx->len);
len -= bs - ctx->len; len -= bs - ctx->len;
p += bs - ctx->len; p += bs - ctx->len;
ctx->xor(ctx->prev, ctx->odds, bs); crypto_xor(prev, odds, bs);
crypto_cipher_encrypt_one(tfm, ctx->prev, ctx->prev); crypto_cipher_encrypt_one(tfm, prev, prev);
/* clearing the length */ /* clearing the length */
ctx->len = 0; ctx->len = 0;
/* encrypting the rest of data */ /* encrypting the rest of data */
while (len > bs) { while (len > bs) {
ctx->xor(ctx->prev, p, bs); crypto_xor(prev, p, bs);
crypto_cipher_encrypt_one(tfm, ctx->prev, crypto_cipher_encrypt_one(tfm, prev, prev);
ctx->prev);
p += bs; p += bs;
len -= bs; len -= bs;
} }
/* keeping the surplus of blocksize */ /* keeping the surplus of blocksize */
if (len) { if (len) {
memcpy(ctx->odds, p, len); memcpy(odds, p, len);
ctx->len = len; ctx->len = len;
} }
crypto_kunmap(p, 0);
crypto_yield(pdesc->flags);
slen -= min(slen, ((unsigned int)(PAGE_SIZE)) - offset);
offset = 0;
pg++;
}
if (!nbytes)
break;
sg = scatterwalk_sg_next(sg);
}
return 0; return 0;
} }
static int crypto_xcbc_digest_update(struct hash_desc *pdesc, static int crypto_xcbc_digest_final(struct shash_desc *pdesc, u8 *out)
struct scatterlist *sg,
unsigned int nbytes)
{ {
if (WARN_ON_ONCE(in_irq())) struct crypto_shash *parent = pdesc->tfm;
return -EDEADLK; unsigned long alignmask = crypto_shash_alignmask(parent);
return crypto_xcbc_digest_update2(pdesc, sg, nbytes); struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent);
} struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_blocksize(parent);
u8 *consts = PTR_ALIGN(&tctx->ctx[0], alignmask + 1);
u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1);
u8 *prev = odds + bs;
unsigned int offset = 0;
static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out) if (ctx->len != bs) {
{
struct crypto_hash *parent = pdesc->tfm;
struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
struct crypto_cipher *tfm = ctx->child;
int bs = crypto_hash_blocksize(parent);
int err = 0;
if (ctx->len == bs) {
u8 key2[bs];
if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0)
return err;
crypto_cipher_encrypt_one(tfm, key2,
(u8 *)(ctx->consts + bs));
ctx->xor(ctx->prev, ctx->odds, bs);
ctx->xor(ctx->prev, key2, bs);
_crypto_xcbc_digest_setkey(parent, ctx);
crypto_cipher_encrypt_one(tfm, out, ctx->prev);
} else {
u8 key3[bs];
unsigned int rlen; unsigned int rlen;
u8 *p = ctx->odds + ctx->len; u8 *p = odds + ctx->len;
*p = 0x80; *p = 0x80;
p++; p++;
@ -231,128 +162,111 @@ static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out)
if (rlen) if (rlen)
memset(p, 0, rlen); memset(p, 0, rlen);
if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0) offset += bs;
return err;
crypto_cipher_encrypt_one(tfm, key3,
(u8 *)(ctx->consts + bs * 2));
ctx->xor(ctx->prev, ctx->odds, bs);
ctx->xor(ctx->prev, key3, bs);
_crypto_xcbc_digest_setkey(parent, ctx);
crypto_cipher_encrypt_one(tfm, out, ctx->prev);
} }
crypto_xor(prev, odds, bs);
crypto_xor(prev, consts + offset, bs);
crypto_cipher_encrypt_one(tfm, out, prev);
return 0; return 0;
} }
static int crypto_xcbc_digest(struct hash_desc *pdesc,
struct scatterlist *sg, unsigned int nbytes, u8 *out)
{
if (WARN_ON_ONCE(in_irq()))
return -EDEADLK;
crypto_xcbc_digest_init(pdesc);
crypto_xcbc_digest_update2(pdesc, sg, nbytes);
return crypto_xcbc_digest_final(pdesc, out);
}
static int xcbc_init_tfm(struct crypto_tfm *tfm) static int xcbc_init_tfm(struct crypto_tfm *tfm)
{ {
struct crypto_cipher *cipher; struct crypto_cipher *cipher;
struct crypto_instance *inst = (void *)tfm->__crt_alg; struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct crypto_spawn *spawn = crypto_instance_ctx(inst); struct crypto_spawn *spawn = crypto_instance_ctx(inst);
struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(__crypto_hash_cast(tfm)); struct xcbc_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
int bs = crypto_hash_blocksize(__crypto_hash_cast(tfm));
cipher = crypto_spawn_cipher(spawn); cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher)) if (IS_ERR(cipher))
return PTR_ERR(cipher); return PTR_ERR(cipher);
switch(bs) {
case 16:
ctx->xor = xor_128;
break;
default:
return -EINVAL;
}
ctx->child = cipher; ctx->child = cipher;
ctx->odds = (u8*)(ctx+1);
ctx->prev = ctx->odds + bs;
ctx->key = ctx->prev + bs;
return 0; return 0;
}; };
static void xcbc_exit_tfm(struct crypto_tfm *tfm) static void xcbc_exit_tfm(struct crypto_tfm *tfm)
{ {
struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(__crypto_hash_cast(tfm)); struct xcbc_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_cipher(ctx->child); crypto_free_cipher(ctx->child);
} }
static struct crypto_instance *xcbc_alloc(struct rtattr **tb) static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
{ {
struct crypto_instance *inst; struct shash_instance *inst;
struct crypto_alg *alg; struct crypto_alg *alg;
unsigned long alignmask;
int err; int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH); err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
if (err) if (err)
return ERR_PTR(err); return err;
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK); CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg)) if (IS_ERR(alg))
return ERR_CAST(alg); return PTR_ERR(alg);
switch(alg->cra_blocksize) { switch(alg->cra_blocksize) {
case 16: case 16:
break; break;
default: default:
inst = ERR_PTR(-EINVAL);
goto out_put_alg; goto out_put_alg;
} }
inst = crypto_alloc_instance("xcbc", alg); inst = shash_alloc_instance("xcbc", alg);
err = PTR_ERR(inst);
if (IS_ERR(inst)) if (IS_ERR(inst))
goto out_put_alg; goto out_put_alg;
inst->alg.cra_flags = CRYPTO_ALG_TYPE_HASH; err = crypto_init_spawn(shash_instance_ctx(inst), alg,
inst->alg.cra_priority = alg->cra_priority; shash_crypto_instance(inst),
inst->alg.cra_blocksize = alg->cra_blocksize; CRYPTO_ALG_TYPE_MASK);
inst->alg.cra_alignmask = alg->cra_alignmask; if (err)
inst->alg.cra_type = &crypto_hash_type; goto out_free_inst;
inst->alg.cra_hash.digestsize = alg->cra_blocksize; alignmask = alg->cra_alignmask | 3;
inst->alg.cra_ctxsize = sizeof(struct crypto_xcbc_ctx) + inst->alg.base.cra_alignmask = alignmask;
ALIGN(inst->alg.cra_blocksize * 3, sizeof(void *)); inst->alg.base.cra_priority = alg->cra_priority;
inst->alg.cra_init = xcbc_init_tfm; inst->alg.base.cra_blocksize = alg->cra_blocksize;
inst->alg.cra_exit = xcbc_exit_tfm;
inst->alg.cra_hash.init = crypto_xcbc_digest_init; inst->alg.digestsize = alg->cra_blocksize;
inst->alg.cra_hash.update = crypto_xcbc_digest_update; inst->alg.descsize = ALIGN(sizeof(struct xcbc_desc_ctx),
inst->alg.cra_hash.final = crypto_xcbc_digest_final; crypto_tfm_ctx_alignment()) +
inst->alg.cra_hash.digest = crypto_xcbc_digest; (alignmask &
inst->alg.cra_hash.setkey = crypto_xcbc_digest_setkey; ~(crypto_tfm_ctx_alignment() - 1)) +
alg->cra_blocksize * 2;
inst->alg.base.cra_ctxsize = ALIGN(sizeof(struct xcbc_tfm_ctx),
alignmask + 1) +
alg->cra_blocksize * 2;
inst->alg.base.cra_init = xcbc_init_tfm;
inst->alg.base.cra_exit = xcbc_exit_tfm;
inst->alg.init = crypto_xcbc_digest_init;
inst->alg.update = crypto_xcbc_digest_update;
inst->alg.final = crypto_xcbc_digest_final;
inst->alg.setkey = crypto_xcbc_digest_setkey;
err = shash_register_instance(tmpl, inst);
if (err) {
out_free_inst:
shash_free_instance(shash_crypto_instance(inst));
}
out_put_alg: out_put_alg:
crypto_mod_put(alg); crypto_mod_put(alg);
return inst; return err;
}
static void xcbc_free(struct crypto_instance *inst)
{
crypto_drop_spawn(crypto_instance_ctx(inst));
kfree(inst);
} }
static struct crypto_template crypto_xcbc_tmpl = { static struct crypto_template crypto_xcbc_tmpl = {
.name = "xcbc", .name = "xcbc",
.alloc = xcbc_alloc, .create = xcbc_create,
.free = xcbc_free, .free = shash_free_instance,
.module = THIS_MODULE, .module = THIS_MODULE,
}; };

View File

@ -44,8 +44,8 @@
* want to register another driver on the same PCI id. * want to register another driver on the same PCI id.
*/ */
static const struct pci_device_id pci_tbl[] = { static const struct pci_device_id pci_tbl[] = {
{ 0x1022, 0x7443, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, { PCI_VDEVICE(AMD, 0x7443), 0, },
{ 0x1022, 0x746b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, { PCI_VDEVICE(AMD, 0x746b), 0, },
{ 0, }, /* terminate list */ { 0, }, /* terminate list */
}; };
MODULE_DEVICE_TABLE(pci, pci_tbl); MODULE_DEVICE_TABLE(pci, pci_tbl);

View File

@ -46,8 +46,7 @@
* want to register another driver on the same PCI id. * want to register another driver on the same PCI id.
*/ */
static const struct pci_device_id pci_tbl[] = { static const struct pci_device_id pci_tbl[] = {
{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LX_AES, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), 0, },
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
{ 0, }, /* terminate list */ { 0, }, /* terminate list */
}; };
MODULE_DEVICE_TABLE(pci, pci_tbl); MODULE_DEVICE_TABLE(pci, pci_tbl);

View File

@ -240,6 +240,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/cryptohash.h> #include <linux/cryptohash.h>
#include <linux/fips.h>
#ifdef CONFIG_GENERIC_HARDIRQS #ifdef CONFIG_GENERIC_HARDIRQS
# include <linux/irq.h> # include <linux/irq.h>
@ -413,6 +414,7 @@ struct entropy_store {
unsigned add_ptr; unsigned add_ptr;
int entropy_count; int entropy_count;
int input_rotate; int input_rotate;
__u8 *last_data;
}; };
static __u32 input_pool_data[INPUT_POOL_WORDS]; static __u32 input_pool_data[INPUT_POOL_WORDS];
@ -852,12 +854,21 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
{ {
ssize_t ret = 0, i; ssize_t ret = 0, i;
__u8 tmp[EXTRACT_SIZE]; __u8 tmp[EXTRACT_SIZE];
unsigned long flags;
xfer_secondary_pool(r, nbytes); xfer_secondary_pool(r, nbytes);
nbytes = account(r, nbytes, min, reserved); nbytes = account(r, nbytes, min, reserved);
while (nbytes) { while (nbytes) {
extract_buf(r, tmp); extract_buf(r, tmp);
if (r->last_data) {
spin_lock_irqsave(&r->lock, flags);
if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
panic("Hardware RNG duplicated output!\n");
memcpy(r->last_data, tmp, EXTRACT_SIZE);
spin_unlock_irqrestore(&r->lock, flags);
}
i = min_t(int, nbytes, EXTRACT_SIZE); i = min_t(int, nbytes, EXTRACT_SIZE);
memcpy(buf, tmp, i); memcpy(buf, tmp, i);
nbytes -= i; nbytes -= i;
@ -940,6 +951,9 @@ static void init_std_data(struct entropy_store *r)
now = ktime_get_real(); now = ktime_get_real();
mix_pool_bytes(r, &now, sizeof(now)); mix_pool_bytes(r, &now, sizeof(now));
mix_pool_bytes(r, utsname(), sizeof(*(utsname()))); mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
/* Enable continuous test in fips mode */
if (fips_enabled)
r->last_data = kmalloc(EXTRACT_SIZE, GFP_KERNEL);
} }
static int rand_initialize(void) static int rand_initialize(void)

View File

@ -13,7 +13,6 @@ if CRYPTO_HW
config CRYPTO_DEV_PADLOCK config CRYPTO_DEV_PADLOCK
tristate "Support for VIA PadLock ACE" tristate "Support for VIA PadLock ACE"
depends on X86 && !UML depends on X86 && !UML
select CRYPTO_ALGAPI
help help
Some VIA processors come with an integrated crypto engine Some VIA processors come with an integrated crypto engine
(so called VIA PadLock ACE, Advanced Cryptography Engine) (so called VIA PadLock ACE, Advanced Cryptography Engine)
@ -39,6 +38,7 @@ config CRYPTO_DEV_PADLOCK_AES
config CRYPTO_DEV_PADLOCK_SHA config CRYPTO_DEV_PADLOCK_SHA
tristate "PadLock driver for SHA1 and SHA256 algorithms" tristate "PadLock driver for SHA1 and SHA256 algorithms"
depends on CRYPTO_DEV_PADLOCK depends on CRYPTO_DEV_PADLOCK
select CRYPTO_HASH
select CRYPTO_SHA1 select CRYPTO_SHA1
select CRYPTO_SHA256 select CRYPTO_SHA256
help help
@ -157,6 +157,19 @@ config S390_PRNG
ANSI X9.17 standard. The PRNG is usable via the char device ANSI X9.17 standard. The PRNG is usable via the char device
/dev/prandom. /dev/prandom.
config CRYPTO_DEV_MV_CESA
tristate "Marvell's Cryptographic Engine"
depends on PLAT_ORION
select CRYPTO_ALGAPI
select CRYPTO_AES
select CRYPTO_BLKCIPHER2
help
This driver allows you to utilize the Cryptographic Engines and
Security Accelerator (CESA) which can be found on the Marvell Orion
and Kirkwood SoCs, such as QNAP's TS-209.
Currently the driver supports AES in ECB and CBC mode without DMA.
config CRYPTO_DEV_HIFN_795X config CRYPTO_DEV_HIFN_795X
tristate "Driver HIFN 795x crypto accelerator chips" tristate "Driver HIFN 795x crypto accelerator chips"
select CRYPTO_DES select CRYPTO_DES

View File

@ -2,6 +2,7 @@ obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/

View File

@ -208,7 +208,8 @@ static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
} }
} }
tfm->crt_ahash.reqsize = sizeof(struct crypto4xx_ctx); crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct crypto4xx_ctx));
sa = (struct dynamic_sa_ctl *) ctx->sa_in; sa = (struct dynamic_sa_ctl *) ctx->sa_in;
set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV, set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA, SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA,

View File

@ -31,8 +31,6 @@
#include <asm/dcr.h> #include <asm/dcr.h>
#include <asm/dcr-regs.h> #include <asm/dcr-regs.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <crypto/internal/hash.h>
#include <crypto/algapi.h>
#include <crypto/aes.h> #include <crypto/aes.h>
#include <crypto/sha.h> #include <crypto/sha.h>
#include "crypto4xx_reg_def.h" #include "crypto4xx_reg_def.h"
@ -998,10 +996,15 @@ static int crypto4xx_alg_init(struct crypto_tfm *tfm)
ctx->sa_out_dma_addr = 0; ctx->sa_out_dma_addr = 0;
ctx->sa_len = 0; ctx->sa_len = 0;
if (alg->cra_type == &crypto_ablkcipher_type) switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
default:
tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx); tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
else if (alg->cra_type == &crypto_ahash_type) break;
tfm->crt_ahash.reqsize = sizeof(struct crypto4xx_ctx); case CRYPTO_ALG_TYPE_AHASH:
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct crypto4xx_ctx));
break;
}
return 0; return 0;
} }
@ -1015,7 +1018,8 @@ static void crypto4xx_alg_exit(struct crypto_tfm *tfm)
} }
int crypto4xx_register_alg(struct crypto4xx_device *sec_dev, int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
struct crypto_alg *crypto_alg, int array_size) struct crypto4xx_alg_common *crypto_alg,
int array_size)
{ {
struct crypto4xx_alg *alg; struct crypto4xx_alg *alg;
int i; int i;
@ -1027,13 +1031,18 @@ int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
return -ENOMEM; return -ENOMEM;
alg->alg = crypto_alg[i]; alg->alg = crypto_alg[i];
INIT_LIST_HEAD(&alg->alg.cra_list);
if (alg->alg.cra_init == NULL)
alg->alg.cra_init = crypto4xx_alg_init;
if (alg->alg.cra_exit == NULL)
alg->alg.cra_exit = crypto4xx_alg_exit;
alg->dev = sec_dev; alg->dev = sec_dev;
rc = crypto_register_alg(&alg->alg);
switch (alg->alg.type) {
case CRYPTO_ALG_TYPE_AHASH:
rc = crypto_register_ahash(&alg->alg.u.hash);
break;
default:
rc = crypto_register_alg(&alg->alg.u.cipher);
break;
}
if (rc) { if (rc) {
list_del(&alg->entry); list_del(&alg->entry);
kfree(alg); kfree(alg);
@ -1051,7 +1060,14 @@ static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) { list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
list_del(&alg->entry); list_del(&alg->entry);
crypto_unregister_alg(&alg->alg); switch (alg->alg.type) {
case CRYPTO_ALG_TYPE_AHASH:
crypto_unregister_ahash(&alg->alg.u.hash);
break;
default:
crypto_unregister_alg(&alg->alg.u.cipher);
}
kfree(alg); kfree(alg);
} }
} }
@ -1104,17 +1120,18 @@ static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
/** /**
* Supported Crypto Algorithms * Supported Crypto Algorithms
*/ */
struct crypto_alg crypto4xx_alg[] = { struct crypto4xx_alg_common crypto4xx_alg[] = {
/* Crypto AES modes */ /* Crypto AES modes */
{ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
.cra_name = "cbc(aes)", .cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-ppc4xx", .cra_driver_name = "cbc-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE, .cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx), .cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type, .cra_type = &crypto_ablkcipher_type,
.cra_init = crypto4xx_alg_init,
.cra_exit = crypto4xx_alg_exit,
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
.cra_u = { .cra_u = {
.ablkcipher = { .ablkcipher = {
@ -1126,29 +1143,7 @@ struct crypto_alg crypto4xx_alg[] = {
.decrypt = crypto4xx_decrypt, .decrypt = crypto4xx_decrypt,
} }
} }
}, }},
/* Hash SHA1 */
{
.cra_name = "sha1",
.cra_driver_name = "sha1-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ahash_type,
.cra_init = crypto4xx_sha1_alg_init,
.cra_module = THIS_MODULE,
.cra_u = {
.ahash = {
.digestsize = SHA1_DIGEST_SIZE,
.init = crypto4xx_hash_init,
.update = crypto4xx_hash_update,
.final = crypto4xx_hash_final,
.digest = crypto4xx_hash_digest,
}
}
},
}; };
/** /**

View File

@ -22,6 +22,8 @@
#ifndef __CRYPTO4XX_CORE_H__ #ifndef __CRYPTO4XX_CORE_H__
#define __CRYPTO4XX_CORE_H__ #define __CRYPTO4XX_CORE_H__
#include <crypto/internal/hash.h>
#define PPC460SX_SDR0_SRST 0x201 #define PPC460SX_SDR0_SRST 0x201
#define PPC405EX_SDR0_SRST 0x200 #define PPC405EX_SDR0_SRST 0x200
#define PPC460EX_SDR0_SRST 0x201 #define PPC460EX_SDR0_SRST 0x201
@ -138,14 +140,31 @@ struct crypto4xx_req_ctx {
u16 sa_len; u16 sa_len;
}; };
struct crypto4xx_alg_common {
u32 type;
union {
struct crypto_alg cipher;
struct ahash_alg hash;
} u;
};
struct crypto4xx_alg { struct crypto4xx_alg {
struct list_head entry; struct list_head entry;
struct crypto_alg alg; struct crypto4xx_alg_common alg;
struct crypto4xx_device *dev; struct crypto4xx_device *dev;
}; };
#define crypto_alg_to_crypto4xx_alg(x) \ static inline struct crypto4xx_alg *crypto_alg_to_crypto4xx_alg(
container_of(x, struct crypto4xx_alg, alg) struct crypto_alg *x)
{
switch (x->cra_flags & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_AHASH:
return container_of(__crypto_ahash_alg(x),
struct crypto4xx_alg, alg.u.hash);
}
return container_of(x, struct crypto4xx_alg, alg.u.cipher);
}
extern int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size); extern int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
extern void crypto4xx_free_sa(struct crypto4xx_ctx *ctx); extern void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);

606
drivers/crypto/mv_cesa.c Normal file
View File

@ -0,0 +1,606 @@
/*
* Support for Marvell's crypto engine which can be found on some Orion5X
* boards.
*
* Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
* License: GPLv2
*
*/
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <linux/crypto.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kthread.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include "mv_cesa.h"
/*
* STM:
* /---------------------------------------\
* | | request complete
* \./ |
* IDLE -> new request -> BUSY -> done -> DEQUEUE
* /°\ |
* | | more scatter entries
* \________________/
*/
enum engine_status {
ENGINE_IDLE,
ENGINE_BUSY,
ENGINE_W_DEQUEUE,
};
/**
* struct req_progress - used for every crypt request
* @src_sg_it: sg iterator for src
* @dst_sg_it: sg iterator for dst
* @sg_src_left: bytes left in src to process (scatter list)
* @src_start: offset to add to src start position (scatter list)
* @crypt_len: length of current crypt process
* @sg_dst_left: bytes left dst to process in this scatter list
* @dst_start: offset to add to dst start position (scatter list)
* @total_req_bytes: total number of bytes processed (request).
*
* sg helper are used to iterate over the scatterlist. Since the size of the
* SRAM may be less than the scatter size, this struct struct is used to keep
* track of progress within current scatterlist.
*/
struct req_progress {
struct sg_mapping_iter src_sg_it;
struct sg_mapping_iter dst_sg_it;
/* src mostly */
int sg_src_left;
int src_start;
int crypt_len;
/* dst mostly */
int sg_dst_left;
int dst_start;
int total_req_bytes;
};
struct crypto_priv {
void __iomem *reg;
void __iomem *sram;
int irq;
struct task_struct *queue_th;
/* the lock protects queue and eng_st */
spinlock_t lock;
struct crypto_queue queue;
enum engine_status eng_st;
struct ablkcipher_request *cur_req;
struct req_progress p;
int max_req_size;
int sram_size;
};
static struct crypto_priv *cpg;
struct mv_ctx {
u8 aes_enc_key[AES_KEY_LEN];
u32 aes_dec_key[8];
int key_len;
u32 need_calc_aes_dkey;
};
enum crypto_op {
COP_AES_ECB,
COP_AES_CBC,
};
struct mv_req_ctx {
enum crypto_op op;
int decrypt;
};
static void compute_aes_dec_key(struct mv_ctx *ctx)
{
struct crypto_aes_ctx gen_aes_key;
int key_pos;
if (!ctx->need_calc_aes_dkey)
return;
crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
key_pos = ctx->key_len + 24;
memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
switch (ctx->key_len) {
case AES_KEYSIZE_256:
key_pos -= 2;
/* fall */
case AES_KEYSIZE_192:
key_pos -= 2;
memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos],
4 * 4);
break;
}
ctx->need_calc_aes_dkey = 0;
}
static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
unsigned int len)
{
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
switch (len) {
case AES_KEYSIZE_128:
case AES_KEYSIZE_192:
case AES_KEYSIZE_256:
break;
default:
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
ctx->key_len = len;
ctx->need_calc_aes_dkey = 1;
memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
return 0;
}
static void setup_data_in(struct ablkcipher_request *req)
{
int ret;
void *buf;
if (!cpg->p.sg_src_left) {
ret = sg_miter_next(&cpg->p.src_sg_it);
BUG_ON(!ret);
cpg->p.sg_src_left = cpg->p.src_sg_it.length;
cpg->p.src_start = 0;
}
cpg->p.crypt_len = min(cpg->p.sg_src_left, cpg->max_req_size);
buf = cpg->p.src_sg_it.addr;
buf += cpg->p.src_start;
memcpy(cpg->sram + SRAM_DATA_IN_START, buf, cpg->p.crypt_len);
cpg->p.sg_src_left -= cpg->p.crypt_len;
cpg->p.src_start += cpg->p.crypt_len;
}
static void mv_process_current_q(int first_block)
{
struct ablkcipher_request *req = cpg->cur_req;
struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
struct sec_accel_config op;
switch (req_ctx->op) {
case COP_AES_ECB:
op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
break;
case COP_AES_CBC:
op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
if (first_block)
memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
break;
}
if (req_ctx->decrypt) {
op.config |= CFG_DIR_DEC;
memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
AES_KEY_LEN);
} else {
op.config |= CFG_DIR_ENC;
memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
AES_KEY_LEN);
}
switch (ctx->key_len) {
case AES_KEYSIZE_128:
op.config |= CFG_AES_LEN_128;
break;
case AES_KEYSIZE_192:
op.config |= CFG_AES_LEN_192;
break;
case AES_KEYSIZE_256:
op.config |= CFG_AES_LEN_256;
break;
}
op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
ENC_P_DST(SRAM_DATA_OUT_START);
op.enc_key_p = SRAM_DATA_KEY_P;
setup_data_in(req);
op.enc_len = cpg->p.crypt_len;
memcpy(cpg->sram + SRAM_CONFIG, &op,
sizeof(struct sec_accel_config));
writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
/* GO */
writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
/*
* XXX: add timer if the interrupt does not occur for some mystery
* reason
*/
}
static void mv_crypto_algo_completion(void)
{
struct ablkcipher_request *req = cpg->cur_req;
struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
if (req_ctx->op != COP_AES_CBC)
return ;
memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
}
static void dequeue_complete_req(void)
{
struct ablkcipher_request *req = cpg->cur_req;
void *buf;
int ret;
cpg->p.total_req_bytes += cpg->p.crypt_len;
do {
int dst_copy;
if (!cpg->p.sg_dst_left) {
ret = sg_miter_next(&cpg->p.dst_sg_it);
BUG_ON(!ret);
cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
cpg->p.dst_start = 0;
}
buf = cpg->p.dst_sg_it.addr;
buf += cpg->p.dst_start;
dst_copy = min(cpg->p.crypt_len, cpg->p.sg_dst_left);
memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy);
cpg->p.sg_dst_left -= dst_copy;
cpg->p.crypt_len -= dst_copy;
cpg->p.dst_start += dst_copy;
} while (cpg->p.crypt_len > 0);
BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
if (cpg->p.total_req_bytes < req->nbytes) {
/* process next scatter list entry */
cpg->eng_st = ENGINE_BUSY;
mv_process_current_q(0);
} else {
sg_miter_stop(&cpg->p.src_sg_it);
sg_miter_stop(&cpg->p.dst_sg_it);
mv_crypto_algo_completion();
cpg->eng_st = ENGINE_IDLE;
req->base.complete(&req->base, 0);
}
}
static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
{
int i = 0;
do {
total_bytes -= sl[i].length;
i++;
} while (total_bytes > 0);
return i;
}
static void mv_enqueue_new_req(struct ablkcipher_request *req)
{
int num_sgs;
cpg->cur_req = req;
memset(&cpg->p, 0, sizeof(struct req_progress));
num_sgs = count_sgs(req->src, req->nbytes);
sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
num_sgs = count_sgs(req->dst, req->nbytes);
sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
mv_process_current_q(1);
}
static int queue_manag(void *data)
{
cpg->eng_st = ENGINE_IDLE;
do {
struct ablkcipher_request *req;
struct crypto_async_request *async_req = NULL;
struct crypto_async_request *backlog;
__set_current_state(TASK_INTERRUPTIBLE);
if (cpg->eng_st == ENGINE_W_DEQUEUE)
dequeue_complete_req();
spin_lock_irq(&cpg->lock);
if (cpg->eng_st == ENGINE_IDLE) {
backlog = crypto_get_backlog(&cpg->queue);
async_req = crypto_dequeue_request(&cpg->queue);
if (async_req) {
BUG_ON(cpg->eng_st != ENGINE_IDLE);
cpg->eng_st = ENGINE_BUSY;
}
}
spin_unlock_irq(&cpg->lock);
if (backlog) {
backlog->complete(backlog, -EINPROGRESS);
backlog = NULL;
}
if (async_req) {
req = container_of(async_req,
struct ablkcipher_request, base);
mv_enqueue_new_req(req);
async_req = NULL;
}
schedule();
} while (!kthread_should_stop());
return 0;
}
static int mv_handle_req(struct ablkcipher_request *req)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&cpg->lock, flags);
ret = ablkcipher_enqueue_request(&cpg->queue, req);
spin_unlock_irqrestore(&cpg->lock, flags);
wake_up_process(cpg->queue_th);
return ret;
}
static int mv_enc_aes_ecb(struct ablkcipher_request *req)
{
struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
req_ctx->op = COP_AES_ECB;
req_ctx->decrypt = 0;
return mv_handle_req(req);
}
static int mv_dec_aes_ecb(struct ablkcipher_request *req)
{
struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
req_ctx->op = COP_AES_ECB;
req_ctx->decrypt = 1;
compute_aes_dec_key(ctx);
return mv_handle_req(req);
}
static int mv_enc_aes_cbc(struct ablkcipher_request *req)
{
struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
req_ctx->op = COP_AES_CBC;
req_ctx->decrypt = 0;
return mv_handle_req(req);
}
static int mv_dec_aes_cbc(struct ablkcipher_request *req)
{
struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
req_ctx->op = COP_AES_CBC;
req_ctx->decrypt = 1;
compute_aes_dec_key(ctx);
return mv_handle_req(req);
}
static int mv_cra_init(struct crypto_tfm *tfm)
{
tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
return 0;
}
irqreturn_t crypto_int(int irq, void *priv)
{
u32 val;
val = readl(cpg->reg + SEC_ACCEL_INT_STATUS);
if (!(val & SEC_INT_ACCEL0_DONE))
return IRQ_NONE;
val &= ~SEC_INT_ACCEL0_DONE;
writel(val, cpg->reg + FPGA_INT_STATUS);
writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
BUG_ON(cpg->eng_st != ENGINE_BUSY);
cpg->eng_st = ENGINE_W_DEQUEUE;
wake_up_process(cpg->queue_th);
return IRQ_HANDLED;
}
struct crypto_alg mv_aes_alg_ecb = {
.cra_name = "ecb(aes)",
.cra_driver_name = "mv-ecb-aes",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = 16,
.cra_ctxsize = sizeof(struct mv_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = mv_cra_init,
.cra_u = {
.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = mv_setkey_aes,
.encrypt = mv_enc_aes_ecb,
.decrypt = mv_dec_aes_ecb,
},
},
};
struct crypto_alg mv_aes_alg_cbc = {
.cra_name = "cbc(aes)",
.cra_driver_name = "mv-cbc-aes",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = mv_cra_init,
.cra_u = {
.ablkcipher = {
.ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = mv_setkey_aes,
.encrypt = mv_enc_aes_cbc,
.decrypt = mv_dec_aes_cbc,
},
},
};
static int mv_probe(struct platform_device *pdev)
{
struct crypto_priv *cp;
struct resource *res;
int irq;
int ret;
if (cpg) {
printk(KERN_ERR "Second crypto dev?\n");
return -EEXIST;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
if (!res)
return -ENXIO;
cp = kzalloc(sizeof(*cp), GFP_KERNEL);
if (!cp)
return -ENOMEM;
spin_lock_init(&cp->lock);
crypto_init_queue(&cp->queue, 50);
cp->reg = ioremap(res->start, res->end - res->start + 1);
if (!cp->reg) {
ret = -ENOMEM;
goto err;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
if (!res) {
ret = -ENXIO;
goto err_unmap_reg;
}
cp->sram_size = res->end - res->start + 1;
cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
cp->sram = ioremap(res->start, cp->sram_size);
if (!cp->sram) {
ret = -ENOMEM;
goto err_unmap_reg;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0 || irq == NO_IRQ) {
ret = irq;
goto err_unmap_sram;
}
cp->irq = irq;
platform_set_drvdata(pdev, cp);
cpg = cp;
cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
if (IS_ERR(cp->queue_th)) {
ret = PTR_ERR(cp->queue_th);
goto err_thread;
}
ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
cp);
if (ret)
goto err_unmap_sram;
writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
ret = crypto_register_alg(&mv_aes_alg_ecb);
if (ret)
goto err_reg;
ret = crypto_register_alg(&mv_aes_alg_cbc);
if (ret)
goto err_unreg_ecb;
return 0;
err_unreg_ecb:
crypto_unregister_alg(&mv_aes_alg_ecb);
err_thread:
free_irq(irq, cp);
err_reg:
kthread_stop(cp->queue_th);
err_unmap_sram:
iounmap(cp->sram);
err_unmap_reg:
iounmap(cp->reg);
err:
kfree(cp);
cpg = NULL;
platform_set_drvdata(pdev, NULL);
return ret;
}
static int mv_remove(struct platform_device *pdev)
{
struct crypto_priv *cp = platform_get_drvdata(pdev);
crypto_unregister_alg(&mv_aes_alg_ecb);
crypto_unregister_alg(&mv_aes_alg_cbc);
kthread_stop(cp->queue_th);
free_irq(cp->irq, cp);
memset(cp->sram, 0, cp->sram_size);
iounmap(cp->sram);
iounmap(cp->reg);
kfree(cp);
cpg = NULL;
return 0;
}
static struct platform_driver marvell_crypto = {
.probe = mv_probe,
.remove = mv_remove,
.driver = {
.owner = THIS_MODULE,
.name = "mv_crypto",
},
};
MODULE_ALIAS("platform:mv_crypto");
static int __init mv_crypto_init(void)
{
return platform_driver_register(&marvell_crypto);
}
module_init(mv_crypto_init);
static void __exit mv_crypto_exit(void)
{
platform_driver_unregister(&marvell_crypto);
}
module_exit(mv_crypto_exit);
MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
MODULE_LICENSE("GPL");

119
drivers/crypto/mv_cesa.h Normal file
View File

@ -0,0 +1,119 @@
#ifndef __MV_CRYPTO_H__
#define DIGEST_INITIAL_VAL_A 0xdd00
#define DES_CMD_REG 0xdd58
#define SEC_ACCEL_CMD 0xde00
#define SEC_CMD_EN_SEC_ACCL0 (1 << 0)
#define SEC_CMD_EN_SEC_ACCL1 (1 << 1)
#define SEC_CMD_DISABLE_SEC (1 << 2)
#define SEC_ACCEL_DESC_P0 0xde04
#define SEC_DESC_P0_PTR(x) (x)
#define SEC_ACCEL_DESC_P1 0xde14
#define SEC_DESC_P1_PTR(x) (x)
#define SEC_ACCEL_CFG 0xde08
#define SEC_CFG_STOP_DIG_ERR (1 << 0)
#define SEC_CFG_CH0_W_IDMA (1 << 7)
#define SEC_CFG_CH1_W_IDMA (1 << 8)
#define SEC_CFG_ACT_CH0_IDMA (1 << 9)
#define SEC_CFG_ACT_CH1_IDMA (1 << 10)
#define SEC_ACCEL_STATUS 0xde0c
#define SEC_ST_ACT_0 (1 << 0)
#define SEC_ST_ACT_1 (1 << 1)
/*
* FPGA_INT_STATUS looks like a FPGA leftover and is documented only in Errata
* 4.12. It looks like that it was part of an IRQ-controller in FPGA and
* someone forgot to remove it while switching to the core and moving to
* SEC_ACCEL_INT_STATUS.
*/
#define FPGA_INT_STATUS 0xdd68
#define SEC_ACCEL_INT_STATUS 0xde20
#define SEC_INT_AUTH_DONE (1 << 0)
#define SEC_INT_DES_E_DONE (1 << 1)
#define SEC_INT_AES_E_DONE (1 << 2)
#define SEC_INT_AES_D_DONE (1 << 3)
#define SEC_INT_ENC_DONE (1 << 4)
#define SEC_INT_ACCEL0_DONE (1 << 5)
#define SEC_INT_ACCEL1_DONE (1 << 6)
#define SEC_INT_ACC0_IDMA_DONE (1 << 7)
#define SEC_INT_ACC1_IDMA_DONE (1 << 8)
#define SEC_ACCEL_INT_MASK 0xde24
#define AES_KEY_LEN (8 * 4)
struct sec_accel_config {
u32 config;
#define CFG_OP_MAC_ONLY 0
#define CFG_OP_CRYPT_ONLY 1
#define CFG_OP_MAC_CRYPT 2
#define CFG_OP_CRYPT_MAC 3
#define CFG_MACM_MD5 (4 << 4)
#define CFG_MACM_SHA1 (5 << 4)
#define CFG_MACM_HMAC_MD5 (6 << 4)
#define CFG_MACM_HMAC_SHA1 (7 << 4)
#define CFG_ENCM_DES (1 << 8)
#define CFG_ENCM_3DES (2 << 8)
#define CFG_ENCM_AES (3 << 8)
#define CFG_DIR_ENC (0 << 12)
#define CFG_DIR_DEC (1 << 12)
#define CFG_ENC_MODE_ECB (0 << 16)
#define CFG_ENC_MODE_CBC (1 << 16)
#define CFG_3DES_EEE (0 << 20)
#define CFG_3DES_EDE (1 << 20)
#define CFG_AES_LEN_128 (0 << 24)
#define CFG_AES_LEN_192 (1 << 24)
#define CFG_AES_LEN_256 (2 << 24)
u32 enc_p;
#define ENC_P_SRC(x) (x)
#define ENC_P_DST(x) ((x) << 16)
u32 enc_len;
#define ENC_LEN(x) (x)
u32 enc_key_p;
#define ENC_KEY_P(x) (x)
u32 enc_iv;
#define ENC_IV_POINT(x) ((x) << 0)
#define ENC_IV_BUF_POINT(x) ((x) << 16)
u32 mac_src_p;
#define MAC_SRC_DATA_P(x) (x)
#define MAC_SRC_TOTAL_LEN(x) ((x) << 16)
u32 mac_digest;
u32 mac_iv;
}__attribute__ ((packed));
/*
* /-----------\ 0
* | ACCEL CFG | 4 * 8
* |-----------| 0x20
* | CRYPT KEY | 8 * 4
* |-----------| 0x40
* | IV IN | 4 * 4
* |-----------| 0x40 (inplace)
* | IV BUF | 4 * 4
* |-----------| 0x50
* | DATA IN | 16 * x (max ->max_req_size)
* |-----------| 0x50 (inplace operation)
* | DATA OUT | 16 * x (max ->max_req_size)
* \-----------/ SRAM size
*/
#define SRAM_CONFIG 0x00
#define SRAM_DATA_KEY_P 0x20
#define SRAM_DATA_IV 0x40
#define SRAM_DATA_IV_BUF 0x40
#define SRAM_DATA_IN_START 0x50
#define SRAM_DATA_OUT_START 0x50
#define SRAM_CFG_SPACE 0x50
#endif

View File

@ -12,81 +12,43 @@
* *
*/ */
#include <crypto/algapi.h> #include <crypto/internal/hash.h>
#include <crypto/sha.h> #include <crypto/sha.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/cryptohash.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <asm/i387.h> #include <asm/i387.h>
#include "padlock.h" #include "padlock.h"
#define SHA1_DEFAULT_FALLBACK "sha1-generic" struct padlock_sha_desc {
#define SHA256_DEFAULT_FALLBACK "sha256-generic" struct shash_desc fallback;
struct padlock_sha_ctx {
char *data;
size_t used;
int bypass;
void (*f_sha_padlock)(const char *in, char *out, int count);
struct hash_desc fallback;
}; };
static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm) struct padlock_sha_ctx {
struct crypto_shash *fallback;
};
static int padlock_sha_init(struct shash_desc *desc)
{ {
return crypto_tfm_ctx(tfm); struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
dctx->fallback.tfm = ctx->fallback;
dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
return crypto_shash_init(&dctx->fallback);
} }
/* We'll need aligned address on the stack */ static int padlock_sha_update(struct shash_desc *desc,
#define NEAREST_ALIGNED(ptr) \ const u8 *data, unsigned int length)
((void *)ALIGN((size_t)(ptr), PADLOCK_ALIGNMENT))
static struct crypto_alg sha1_alg, sha256_alg;
static void padlock_sha_bypass(struct crypto_tfm *tfm)
{ {
if (ctx(tfm)->bypass) struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
return;
crypto_hash_init(&ctx(tfm)->fallback); dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
if (ctx(tfm)->data && ctx(tfm)->used) { return crypto_shash_update(&dctx->fallback, data, length);
struct scatterlist sg;
sg_init_one(&sg, ctx(tfm)->data, ctx(tfm)->used);
crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length);
}
ctx(tfm)->used = 0;
ctx(tfm)->bypass = 1;
}
static void padlock_sha_init(struct crypto_tfm *tfm)
{
ctx(tfm)->used = 0;
ctx(tfm)->bypass = 0;
}
static void padlock_sha_update(struct crypto_tfm *tfm,
const uint8_t *data, unsigned int length)
{
/* Our buffer is always one page. */
if (unlikely(!ctx(tfm)->bypass &&
(ctx(tfm)->used + length > PAGE_SIZE)))
padlock_sha_bypass(tfm);
if (unlikely(ctx(tfm)->bypass)) {
struct scatterlist sg;
sg_init_one(&sg, (uint8_t *)data, length);
crypto_hash_update(&ctx(tfm)->fallback, &sg, length);
return;
}
memcpy(ctx(tfm)->data + ctx(tfm)->used, data, length);
ctx(tfm)->used += length;
} }
static inline void padlock_output_block(uint32_t *src, static inline void padlock_output_block(uint32_t *src,
@ -96,165 +58,206 @@ static inline void padlock_output_block(uint32_t *src,
*dst++ = swab32(*src++); *dst++ = swab32(*src++);
} }
static void padlock_do_sha1(const char *in, char *out, int count) static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
unsigned int count, u8 *out)
{ {
/* We can't store directly to *out as it may be unaligned. */ /* We can't store directly to *out as it may be unaligned. */
/* BTW Don't reduce the buffer size below 128 Bytes! /* BTW Don't reduce the buffer size below 128 Bytes!
* PadLock microcode needs it that big. */ * PadLock microcode needs it that big. */
char buf[128+16]; char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT)));
char *result = NEAREST_ALIGNED(buf); struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
struct sha1_state state;
unsigned int space;
unsigned int leftover;
int ts_state; int ts_state;
int err;
((uint32_t *)result)[0] = SHA1_H0; dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
((uint32_t *)result)[1] = SHA1_H1; err = crypto_shash_export(&dctx->fallback, &state);
((uint32_t *)result)[2] = SHA1_H2; if (err)
((uint32_t *)result)[3] = SHA1_H3; goto out;
((uint32_t *)result)[4] = SHA1_H4;
if (state.count + count > ULONG_MAX)
return crypto_shash_finup(&dctx->fallback, in, count, out);
leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1;
space = SHA1_BLOCK_SIZE - leftover;
if (space) {
if (count > space) {
err = crypto_shash_update(&dctx->fallback, in, space) ?:
crypto_shash_export(&dctx->fallback, &state);
if (err)
goto out;
count -= space;
in += space;
} else {
memcpy(state.buffer + leftover, in, count);
in = state.buffer;
count += leftover;
state.count &= ~(SHA1_BLOCK_SIZE - 1);
}
}
memcpy(result, &state.state, SHA1_DIGEST_SIZE);
/* prevent taking the spurious DNA fault with padlock. */ /* prevent taking the spurious DNA fault with padlock. */
ts_state = irq_ts_save(); ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */ asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
: "+S"(in), "+D"(result) : \
: "c"(count), "a"(0)); : "c"((unsigned long)state.count + count), \
"a"((unsigned long)state.count), \
"S"(in), "D"(result));
irq_ts_restore(ts_state); irq_ts_restore(ts_state);
padlock_output_block((uint32_t *)result, (uint32_t *)out, 5); padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
out:
return err;
} }
static void padlock_do_sha256(const char *in, char *out, int count) static int padlock_sha1_final(struct shash_desc *desc, u8 *out)
{
u8 buf[4];
return padlock_sha1_finup(desc, buf, 0, out);
}
static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
unsigned int count, u8 *out)
{ {
/* We can't store directly to *out as it may be unaligned. */ /* We can't store directly to *out as it may be unaligned. */
/* BTW Don't reduce the buffer size below 128 Bytes! /* BTW Don't reduce the buffer size below 128 Bytes!
* PadLock microcode needs it that big. */ * PadLock microcode needs it that big. */
char buf[128+16]; char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT)));
char *result = NEAREST_ALIGNED(buf); struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
struct sha256_state state;
unsigned int space;
unsigned int leftover;
int ts_state; int ts_state;
int err;
((uint32_t *)result)[0] = SHA256_H0; dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
((uint32_t *)result)[1] = SHA256_H1; err = crypto_shash_export(&dctx->fallback, &state);
((uint32_t *)result)[2] = SHA256_H2; if (err)
((uint32_t *)result)[3] = SHA256_H3; goto out;
((uint32_t *)result)[4] = SHA256_H4;
((uint32_t *)result)[5] = SHA256_H5; if (state.count + count > ULONG_MAX)
((uint32_t *)result)[6] = SHA256_H6; return crypto_shash_finup(&dctx->fallback, in, count, out);
((uint32_t *)result)[7] = SHA256_H7;
leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1;
space = SHA256_BLOCK_SIZE - leftover;
if (space) {
if (count > space) {
err = crypto_shash_update(&dctx->fallback, in, space) ?:
crypto_shash_export(&dctx->fallback, &state);
if (err)
goto out;
count -= space;
in += space;
} else {
memcpy(state.buf + leftover, in, count);
in = state.buf;
count += leftover;
state.count &= ~(SHA1_BLOCK_SIZE - 1);
}
}
memcpy(result, &state.state, SHA256_DIGEST_SIZE);
/* prevent taking the spurious DNA fault with padlock. */ /* prevent taking the spurious DNA fault with padlock. */
ts_state = irq_ts_save(); ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */ asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
: "+S"(in), "+D"(result) : \
: "c"(count), "a"(0)); : "c"((unsigned long)state.count + count), \
"a"((unsigned long)state.count), \
"S"(in), "D"(result));
irq_ts_restore(ts_state); irq_ts_restore(ts_state);
padlock_output_block((uint32_t *)result, (uint32_t *)out, 8); padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
out:
return err;
} }
static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out) static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
{ {
if (unlikely(ctx(tfm)->bypass)) { u8 buf[4];
crypto_hash_final(&ctx(tfm)->fallback, out);
ctx(tfm)->bypass = 0;
return;
}
/* Pass the input buffer to PadLock microcode... */ return padlock_sha256_finup(desc, buf, 0, out);
ctx(tfm)->f_sha_padlock(ctx(tfm)->data, out, ctx(tfm)->used);
ctx(tfm)->used = 0;
} }
static int padlock_cra_init(struct crypto_tfm *tfm) static int padlock_cra_init(struct crypto_tfm *tfm)
{ {
struct crypto_shash *hash = __crypto_shash_cast(tfm);
const char *fallback_driver_name = tfm->__crt_alg->cra_name; const char *fallback_driver_name = tfm->__crt_alg->cra_name;
struct crypto_hash *fallback_tfm; struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_shash *fallback_tfm;
/* For now we'll allocate one page. This int err = -ENOMEM;
* could eventually be configurable one day. */
ctx(tfm)->data = (char *)__get_free_page(GFP_KERNEL);
if (!ctx(tfm)->data)
return -ENOMEM;
/* Allocate a fallback and abort if it failed. */ /* Allocate a fallback and abort if it failed. */
fallback_tfm = crypto_alloc_hash(fallback_driver_name, 0, fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK); CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback_tfm)) { if (IS_ERR(fallback_tfm)) {
printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n", printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
fallback_driver_name); fallback_driver_name);
free_page((unsigned long)(ctx(tfm)->data)); err = PTR_ERR(fallback_tfm);
return PTR_ERR(fallback_tfm); goto out;
} }
ctx(tfm)->fallback.tfm = fallback_tfm; ctx->fallback = fallback_tfm;
hash->descsize += crypto_shash_descsize(fallback_tfm);
return 0; return 0;
}
static int padlock_sha1_cra_init(struct crypto_tfm *tfm) out:
{ return err;
ctx(tfm)->f_sha_padlock = padlock_do_sha1;
return padlock_cra_init(tfm);
}
static int padlock_sha256_cra_init(struct crypto_tfm *tfm)
{
ctx(tfm)->f_sha_padlock = padlock_do_sha256;
return padlock_cra_init(tfm);
} }
static void padlock_cra_exit(struct crypto_tfm *tfm) static void padlock_cra_exit(struct crypto_tfm *tfm)
{ {
if (ctx(tfm)->data) { struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
free_page((unsigned long)(ctx(tfm)->data));
ctx(tfm)->data = NULL; crypto_free_shash(ctx->fallback);
} }
crypto_free_hash(ctx(tfm)->fallback.tfm); static struct shash_alg sha1_alg = {
ctx(tfm)->fallback.tfm = NULL; .digestsize = SHA1_DIGEST_SIZE,
} .init = padlock_sha_init,
.update = padlock_sha_update,
static struct crypto_alg sha1_alg = { .finup = padlock_sha1_finup,
.final = padlock_sha1_final,
.descsize = sizeof(struct padlock_sha_desc),
.base = {
.cra_name = "sha1", .cra_name = "sha1",
.cra_driver_name = "sha1-padlock", .cra_driver_name = "sha1-padlock",
.cra_priority = PADLOCK_CRA_PRIORITY, .cra_priority = PADLOCK_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_DIGEST | .cra_flags = CRYPTO_ALG_TYPE_SHASH |
CRYPTO_ALG_NEED_FALLBACK, CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE, .cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct padlock_sha_ctx), .cra_ctxsize = sizeof(struct padlock_sha_ctx),
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(sha1_alg.cra_list), .cra_init = padlock_cra_init,
.cra_init = padlock_sha1_cra_init,
.cra_exit = padlock_cra_exit, .cra_exit = padlock_cra_exit,
.cra_u = {
.digest = {
.dia_digestsize = SHA1_DIGEST_SIZE,
.dia_init = padlock_sha_init,
.dia_update = padlock_sha_update,
.dia_final = padlock_sha_final,
}
} }
}; };
static struct crypto_alg sha256_alg = { static struct shash_alg sha256_alg = {
.digestsize = SHA256_DIGEST_SIZE,
.init = padlock_sha_init,
.update = padlock_sha_update,
.finup = padlock_sha256_finup,
.final = padlock_sha256_final,
.descsize = sizeof(struct padlock_sha_desc),
.base = {
.cra_name = "sha256", .cra_name = "sha256",
.cra_driver_name = "sha256-padlock", .cra_driver_name = "sha256-padlock",
.cra_priority = PADLOCK_CRA_PRIORITY, .cra_priority = PADLOCK_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_DIGEST | .cra_flags = CRYPTO_ALG_TYPE_SHASH |
CRYPTO_ALG_NEED_FALLBACK, CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA256_BLOCK_SIZE, .cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct padlock_sha_ctx), .cra_ctxsize = sizeof(struct padlock_sha_ctx),
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(sha256_alg.cra_list), .cra_init = padlock_cra_init,
.cra_init = padlock_sha256_cra_init,
.cra_exit = padlock_cra_exit, .cra_exit = padlock_cra_exit,
.cra_u = {
.digest = {
.dia_digestsize = SHA256_DIGEST_SIZE,
.dia_init = padlock_sha_init,
.dia_update = padlock_sha_update,
.dia_final = padlock_sha_final,
}
} }
}; };
@ -272,11 +275,11 @@ static int __init padlock_init(void)
return -ENODEV; return -ENODEV;
} }
rc = crypto_register_alg(&sha1_alg); rc = crypto_register_shash(&sha1_alg);
if (rc) if (rc)
goto out; goto out;
rc = crypto_register_alg(&sha256_alg); rc = crypto_register_shash(&sha256_alg);
if (rc) if (rc)
goto out_unreg1; goto out_unreg1;
@ -285,7 +288,7 @@ static int __init padlock_init(void)
return 0; return 0;
out_unreg1: out_unreg1:
crypto_unregister_alg(&sha1_alg); crypto_unregister_shash(&sha1_alg);
out: out:
printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n"); printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
return rc; return rc;
@ -293,8 +296,8 @@ out:
static void __exit padlock_fini(void) static void __exit padlock_fini(void)
{ {
crypto_unregister_alg(&sha1_alg); crypto_unregister_shash(&sha1_alg);
crypto_unregister_alg(&sha256_alg); crypto_unregister_shash(&sha256_alg);
} }
module_init(padlock_init); module_init(padlock_init);

View File

@ -86,6 +86,25 @@ struct talitos_request {
void *context; void *context;
}; };
/* per-channel fifo management */
struct talitos_channel {
/* request fifo */
struct talitos_request *fifo;
/* number of requests pending in channel h/w fifo */
atomic_t submit_count ____cacheline_aligned;
/* request submission (head) lock */
spinlock_t head_lock ____cacheline_aligned;
/* index to next free descriptor request */
int head;
/* request release (tail) lock */
spinlock_t tail_lock ____cacheline_aligned;
/* index to next in-progress/done descriptor request */
int tail;
};
struct talitos_private { struct talitos_private {
struct device *dev; struct device *dev;
struct of_device *ofdev; struct of_device *ofdev;
@ -101,15 +120,6 @@ struct talitos_private {
/* SEC Compatibility info */ /* SEC Compatibility info */
unsigned long features; unsigned long features;
/* next channel to be assigned next incoming descriptor */
atomic_t last_chan;
/* per-channel number of requests pending in channel h/w fifo */
atomic_t *submit_count;
/* per-channel request fifo */
struct talitos_request **fifo;
/* /*
* length of the request fifo * length of the request fifo
* fifo_len is chfifo_len rounded up to next power of 2 * fifo_len is chfifo_len rounded up to next power of 2
@ -117,15 +127,10 @@ struct talitos_private {
*/ */
unsigned int fifo_len; unsigned int fifo_len;
/* per-channel index to next free descriptor request */ struct talitos_channel *chan;
int *head;
/* per-channel index to next in-progress/done descriptor request */ /* next channel to be assigned next incoming descriptor */
int *tail; atomic_t last_chan ____cacheline_aligned;
/* per-channel request submission (head) and release (tail) locks */
spinlock_t *head_lock;
spinlock_t *tail_lock;
/* request callback tasklet */ /* request callback tasklet */
struct tasklet_struct done_task; struct tasklet_struct done_task;
@ -141,6 +146,12 @@ struct talitos_private {
#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr)
{
talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
talitos_ptr->eptr = cpu_to_be32(upper_32_bits(dma_addr));
}
/* /*
* map virtual single (contiguous) pointer to h/w descriptor pointer * map virtual single (contiguous) pointer to h/w descriptor pointer
*/ */
@ -150,8 +161,10 @@ static void map_single_talitos_ptr(struct device *dev,
unsigned char extent, unsigned char extent,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
talitos_ptr->len = cpu_to_be16(len); talitos_ptr->len = cpu_to_be16(len);
talitos_ptr->ptr = cpu_to_be32(dma_map_single(dev, data, len, dir)); to_talitos_ptr(talitos_ptr, dma_addr);
talitos_ptr->j_extent = extent; talitos_ptr->j_extent = extent;
} }
@ -182,9 +195,9 @@ static int reset_channel(struct device *dev, int ch)
return -EIO; return -EIO;
} }
/* set done writeback and IRQ */ /* set 36-bit addressing, done writeback enable and done IRQ enable */
setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_CDWE | setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_EAE |
TALITOS_CCCR_LO_CDIE); TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
/* and ICCR writeback, if available */ /* and ICCR writeback, if available */
if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
@ -282,16 +295,16 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc,
/* emulate SEC's round-robin channel fifo polling scheme */ /* emulate SEC's round-robin channel fifo polling scheme */
ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1); ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1);
spin_lock_irqsave(&priv->head_lock[ch], flags); spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
if (!atomic_inc_not_zero(&priv->submit_count[ch])) { if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
/* h/w fifo is full */ /* h/w fifo is full */
spin_unlock_irqrestore(&priv->head_lock[ch], flags); spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
return -EAGAIN; return -EAGAIN;
} }
head = priv->head[ch]; head = priv->chan[ch].head;
request = &priv->fifo[ch][head]; request = &priv->chan[ch].fifo[head];
/* map descriptor and save caller data */ /* map descriptor and save caller data */
request->dma_desc = dma_map_single(dev, desc, sizeof(*desc), request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
@ -300,16 +313,19 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc,
request->context = context; request->context = context;
/* increment fifo head */ /* increment fifo head */
priv->head[ch] = (priv->head[ch] + 1) & (priv->fifo_len - 1); priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
smp_wmb(); smp_wmb();
request->desc = desc; request->desc = desc;
/* GO! */ /* GO! */
wmb(); wmb();
out_be32(priv->reg + TALITOS_FF_LO(ch), request->dma_desc); out_be32(priv->reg + TALITOS_FF(ch),
cpu_to_be32(upper_32_bits(request->dma_desc)));
out_be32(priv->reg + TALITOS_FF_LO(ch),
cpu_to_be32(lower_32_bits(request->dma_desc)));
spin_unlock_irqrestore(&priv->head_lock[ch], flags); spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
return -EINPROGRESS; return -EINPROGRESS;
} }
@ -324,11 +340,11 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
unsigned long flags; unsigned long flags;
int tail, status; int tail, status;
spin_lock_irqsave(&priv->tail_lock[ch], flags); spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
tail = priv->tail[ch]; tail = priv->chan[ch].tail;
while (priv->fifo[ch][tail].desc) { while (priv->chan[ch].fifo[tail].desc) {
request = &priv->fifo[ch][tail]; request = &priv->chan[ch].fifo[tail];
/* descriptors with their done bits set don't get the error */ /* descriptors with their done bits set don't get the error */
rmb(); rmb();
@ -354,22 +370,22 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
request->desc = NULL; request->desc = NULL;
/* increment fifo tail */ /* increment fifo tail */
priv->tail[ch] = (tail + 1) & (priv->fifo_len - 1); priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
spin_unlock_irqrestore(&priv->tail_lock[ch], flags); spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
atomic_dec(&priv->submit_count[ch]); atomic_dec(&priv->chan[ch].submit_count);
saved_req.callback(dev, saved_req.desc, saved_req.context, saved_req.callback(dev, saved_req.desc, saved_req.context,
status); status);
/* channel may resume processing in single desc error case */ /* channel may resume processing in single desc error case */
if (error && !reset_ch && status == error) if (error && !reset_ch && status == error)
return; return;
spin_lock_irqsave(&priv->tail_lock[ch], flags); spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
tail = priv->tail[ch]; tail = priv->chan[ch].tail;
} }
spin_unlock_irqrestore(&priv->tail_lock[ch], flags); spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
} }
/* /*
@ -397,20 +413,20 @@ static void talitos_done(unsigned long data)
static struct talitos_desc *current_desc(struct device *dev, int ch) static struct talitos_desc *current_desc(struct device *dev, int ch)
{ {
struct talitos_private *priv = dev_get_drvdata(dev); struct talitos_private *priv = dev_get_drvdata(dev);
int tail = priv->tail[ch]; int tail = priv->chan[ch].tail;
dma_addr_t cur_desc; dma_addr_t cur_desc;
cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch)); cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch));
while (priv->fifo[ch][tail].dma_desc != cur_desc) { while (priv->chan[ch].fifo[tail].dma_desc != cur_desc) {
tail = (tail + 1) & (priv->fifo_len - 1); tail = (tail + 1) & (priv->fifo_len - 1);
if (tail == priv->tail[ch]) { if (tail == priv->chan[ch].tail) {
dev_err(dev, "couldn't locate current descriptor\n"); dev_err(dev, "couldn't locate current descriptor\n");
return NULL; return NULL;
} }
} }
return priv->fifo[ch][tail].desc; return priv->chan[ch].fifo[tail].desc;
} }
/* /*
@ -929,7 +945,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
int n_sg = sg_count; int n_sg = sg_count;
while (n_sg--) { while (n_sg--) {
link_tbl_ptr->ptr = cpu_to_be32(sg_dma_address(sg)); to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg));
link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg)); link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
link_tbl_ptr->j_extent = 0; link_tbl_ptr->j_extent = 0;
link_tbl_ptr++; link_tbl_ptr++;
@ -970,7 +986,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
struct talitos_desc *desc = &edesc->desc; struct talitos_desc *desc = &edesc->desc;
unsigned int cryptlen = areq->cryptlen; unsigned int cryptlen = areq->cryptlen;
unsigned int authsize = ctx->authsize; unsigned int authsize = ctx->authsize;
unsigned int ivsize; unsigned int ivsize = crypto_aead_ivsize(aead);
int sg_count, ret; int sg_count, ret;
int sg_link_tbl_len; int sg_link_tbl_len;
@ -978,11 +994,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
0, DMA_TO_DEVICE); 0, DMA_TO_DEVICE);
/* hmac data */ /* hmac data */
map_single_talitos_ptr(dev, &desc->ptr[1], sg_virt(areq->src) - map_single_talitos_ptr(dev, &desc->ptr[1], areq->assoclen + ivsize,
sg_virt(areq->assoc), sg_virt(areq->assoc), 0, sg_virt(areq->assoc), 0, DMA_TO_DEVICE);
DMA_TO_DEVICE);
/* cipher iv */ /* cipher iv */
ivsize = crypto_aead_ivsize(aead);
map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0, map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0,
DMA_TO_DEVICE); DMA_TO_DEVICE);
@ -1006,7 +1020,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
edesc->src_is_chained); edesc->src_is_chained);
if (sg_count == 1) { if (sg_count == 1) {
desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src));
} else { } else {
sg_link_tbl_len = cryptlen; sg_link_tbl_len = cryptlen;
@ -1017,14 +1031,14 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
&edesc->link_tbl[0]); &edesc->link_tbl[0]);
if (sg_count > 1) { if (sg_count > 1) {
desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl); to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl);
dma_sync_single_for_device(dev, edesc->dma_link_tbl, dma_sync_single_for_device(dev, edesc->dma_link_tbl,
edesc->dma_len, edesc->dma_len,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
} else { } else {
/* Only one segment now, so no link tbl needed */ /* Only one segment now, so no link tbl needed */
desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq-> to_talitos_ptr(&desc->ptr[4],
src)); sg_dma_address(areq->src));
} }
} }
@ -1039,14 +1053,14 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
edesc->dst_is_chained); edesc->dst_is_chained);
if (sg_count == 1) { if (sg_count == 1) {
desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst)); to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst));
} else { } else {
struct talitos_ptr *link_tbl_ptr = struct talitos_ptr *link_tbl_ptr =
&edesc->link_tbl[edesc->src_nents + 1]; &edesc->link_tbl[edesc->src_nents + 1];
desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *) to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
edesc->dma_link_tbl + (edesc->src_nents + 1) *
edesc->src_nents + 1); sizeof(struct talitos_ptr));
sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
link_tbl_ptr); link_tbl_ptr);
@ -1059,11 +1073,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
link_tbl_ptr->len = cpu_to_be16(authsize); link_tbl_ptr->len = cpu_to_be16(authsize);
/* icv data follows link tables */ /* icv data follows link tables */
link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *) to_talitos_ptr(link_tbl_ptr, edesc->dma_link_tbl +
edesc->dma_link_tbl + (edesc->src_nents + edesc->dst_nents + 2) *
edesc->src_nents + sizeof(struct talitos_ptr));
edesc->dst_nents + 2);
desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
edesc->dma_len, DMA_BIDIRECTIONAL); edesc->dma_len, DMA_BIDIRECTIONAL);
@ -1338,7 +1350,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
/* first DWORD empty */ /* first DWORD empty */
desc->ptr[0].len = 0; desc->ptr[0].len = 0;
desc->ptr[0].ptr = 0; to_talitos_ptr(&desc->ptr[0], 0);
desc->ptr[0].j_extent = 0; desc->ptr[0].j_extent = 0;
/* cipher iv */ /* cipher iv */
@ -1362,20 +1374,20 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
edesc->src_is_chained); edesc->src_is_chained);
if (sg_count == 1) { if (sg_count == 1) {
desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->src)); to_talitos_ptr(&desc->ptr[3], sg_dma_address(areq->src));
} else { } else {
sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen, sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
&edesc->link_tbl[0]); &edesc->link_tbl[0]);
if (sg_count > 1) { if (sg_count > 1) {
to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl);
desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP; desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
desc->ptr[3].ptr = cpu_to_be32(edesc->dma_link_tbl);
dma_sync_single_for_device(dev, edesc->dma_link_tbl, dma_sync_single_for_device(dev, edesc->dma_link_tbl,
edesc->dma_len, edesc->dma_len,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
} else { } else {
/* Only one segment now, so no link tbl needed */ /* Only one segment now, so no link tbl needed */
desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq-> to_talitos_ptr(&desc->ptr[3],
src)); sg_dma_address(areq->src));
} }
} }
@ -1390,15 +1402,15 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
edesc->dst_is_chained); edesc->dst_is_chained);
if (sg_count == 1) { if (sg_count == 1) {
desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->dst)); to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->dst));
} else { } else {
struct talitos_ptr *link_tbl_ptr = struct talitos_ptr *link_tbl_ptr =
&edesc->link_tbl[edesc->src_nents + 1]; &edesc->link_tbl[edesc->src_nents + 1];
to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
(edesc->src_nents + 1) *
sizeof(struct talitos_ptr));
desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
desc->ptr[4].ptr = cpu_to_be32((struct talitos_ptr *)
edesc->dma_link_tbl +
edesc->src_nents + 1);
sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
link_tbl_ptr); link_tbl_ptr);
dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
@ -1411,7 +1423,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
/* last DWORD empty */ /* last DWORD empty */
desc->ptr[6].len = 0; desc->ptr[6].len = 0;
desc->ptr[6].ptr = 0; to_talitos_ptr(&desc->ptr[6], 0);
desc->ptr[6].j_extent = 0; desc->ptr[6].j_extent = 0;
ret = talitos_submit(dev, desc, callback, areq); ret = talitos_submit(dev, desc, callback, areq);
@ -1742,17 +1754,11 @@ static int talitos_remove(struct of_device *ofdev)
if (hw_supports(dev, DESC_HDR_SEL0_RNG)) if (hw_supports(dev, DESC_HDR_SEL0_RNG))
talitos_unregister_rng(dev); talitos_unregister_rng(dev);
kfree(priv->submit_count);
kfree(priv->tail);
kfree(priv->head);
if (priv->fifo)
for (i = 0; i < priv->num_channels; i++) for (i = 0; i < priv->num_channels; i++)
kfree(priv->fifo[i]); if (priv->chan[i].fifo)
kfree(priv->chan[i].fifo);
kfree(priv->fifo); kfree(priv->chan);
kfree(priv->head_lock);
kfree(priv->tail_lock);
if (priv->irq != NO_IRQ) { if (priv->irq != NO_IRQ) {
free_irq(priv->irq, dev); free_irq(priv->irq, dev);
@ -1872,58 +1878,36 @@ static int talitos_probe(struct of_device *ofdev,
if (of_device_is_compatible(np, "fsl,sec2.1")) if (of_device_is_compatible(np, "fsl,sec2.1"))
priv->features |= TALITOS_FTR_HW_AUTH_CHECK; priv->features |= TALITOS_FTR_HW_AUTH_CHECK;
priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, priv->chan = kzalloc(sizeof(struct talitos_channel) *
GFP_KERNEL); priv->num_channels, GFP_KERNEL);
priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, if (!priv->chan) {
GFP_KERNEL); dev_err(dev, "failed to allocate channel management space\n");
if (!priv->head_lock || !priv->tail_lock) {
dev_err(dev, "failed to allocate fifo locks\n");
err = -ENOMEM; err = -ENOMEM;
goto err_out; goto err_out;
} }
for (i = 0; i < priv->num_channels; i++) { for (i = 0; i < priv->num_channels; i++) {
spin_lock_init(&priv->head_lock[i]); spin_lock_init(&priv->chan[i].head_lock);
spin_lock_init(&priv->tail_lock[i]); spin_lock_init(&priv->chan[i].tail_lock);
}
priv->fifo = kmalloc(sizeof(struct talitos_request *) *
priv->num_channels, GFP_KERNEL);
if (!priv->fifo) {
dev_err(dev, "failed to allocate request fifo\n");
err = -ENOMEM;
goto err_out;
} }
priv->fifo_len = roundup_pow_of_two(priv->chfifo_len); priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
for (i = 0; i < priv->num_channels; i++) { for (i = 0; i < priv->num_channels; i++) {
priv->fifo[i] = kzalloc(sizeof(struct talitos_request) * priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
priv->fifo_len, GFP_KERNEL); priv->fifo_len, GFP_KERNEL);
if (!priv->fifo[i]) { if (!priv->chan[i].fifo) {
dev_err(dev, "failed to allocate request fifo %d\n", i); dev_err(dev, "failed to allocate request fifo %d\n", i);
err = -ENOMEM; err = -ENOMEM;
goto err_out; goto err_out;
} }
} }
priv->submit_count = kmalloc(sizeof(atomic_t) * priv->num_channels,
GFP_KERNEL);
if (!priv->submit_count) {
dev_err(dev, "failed to allocate fifo submit count space\n");
err = -ENOMEM;
goto err_out;
}
for (i = 0; i < priv->num_channels; i++) for (i = 0; i < priv->num_channels; i++)
atomic_set(&priv->submit_count[i], -(priv->chfifo_len - 1)); atomic_set(&priv->chan[i].submit_count,
-(priv->chfifo_len - 1));
priv->head = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL); dma_set_mask(dev, DMA_BIT_MASK(36));
priv->tail = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
if (!priv->head || !priv->tail) {
dev_err(dev, "failed to allocate request index space\n");
err = -ENOMEM;
goto err_out;
}
/* reset and initialize the h/w */ /* reset and initialize the h/w */
err = init_device(dev); err = init_device(dev);

View File

@ -57,6 +57,7 @@
#define TALITOS_CCCR_RESET 0x1 /* channel reset */ #define TALITOS_CCCR_RESET 0x1 /* channel reset */
#define TALITOS_CCCR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x110c) #define TALITOS_CCCR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x110c)
#define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */ #define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */
#define TALITOS_CCCR_LO_EAE 0x20 /* extended address enable */
#define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */ #define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */
#define TALITOS_CCCR_LO_NT 0x4 /* notification type */ #define TALITOS_CCCR_LO_NT 0x4 /* notification type */
#define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */ #define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */

View File

@ -22,11 +22,9 @@ struct seq_file;
struct crypto_type { struct crypto_type {
unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask); unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
unsigned int (*extsize)(struct crypto_alg *alg, unsigned int (*extsize)(struct crypto_alg *alg);
const struct crypto_type *frontend);
int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask); int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
int (*init_tfm)(struct crypto_tfm *tfm, int (*init_tfm)(struct crypto_tfm *tfm);
const struct crypto_type *frontend);
void (*show)(struct seq_file *m, struct crypto_alg *alg); void (*show)(struct seq_file *m, struct crypto_alg *alg);
struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask); struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask);
@ -52,6 +50,7 @@ struct crypto_template {
struct crypto_instance *(*alloc)(struct rtattr **tb); struct crypto_instance *(*alloc)(struct rtattr **tb);
void (*free)(struct crypto_instance *inst); void (*free)(struct crypto_instance *inst);
int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
char name[CRYPTO_MAX_ALG_NAME]; char name[CRYPTO_MAX_ALG_NAME];
}; };
@ -60,6 +59,7 @@ struct crypto_spawn {
struct list_head list; struct list_head list;
struct crypto_alg *alg; struct crypto_alg *alg;
struct crypto_instance *inst; struct crypto_instance *inst;
const struct crypto_type *frontend;
u32 mask; u32 mask;
}; };
@ -114,11 +114,19 @@ int crypto_register_template(struct crypto_template *tmpl);
void crypto_unregister_template(struct crypto_template *tmpl); void crypto_unregister_template(struct crypto_template *tmpl);
struct crypto_template *crypto_lookup_template(const char *name); struct crypto_template *crypto_lookup_template(const char *name);
int crypto_register_instance(struct crypto_template *tmpl,
struct crypto_instance *inst);
int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
struct crypto_instance *inst, u32 mask); struct crypto_instance *inst, u32 mask);
int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
struct crypto_instance *inst,
const struct crypto_type *frontend);
void crypto_drop_spawn(struct crypto_spawn *spawn); void crypto_drop_spawn(struct crypto_spawn *spawn);
struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
u32 mask); u32 mask);
void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
static inline void crypto_set_spawn(struct crypto_spawn *spawn, static inline void crypto_set_spawn(struct crypto_spawn *spawn,
struct crypto_instance *inst) struct crypto_instance *inst)
@ -129,8 +137,19 @@ static inline void crypto_set_spawn(struct crypto_spawn *spawn,
struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
int crypto_check_attr_type(struct rtattr **tb, u32 type); int crypto_check_attr_type(struct rtattr **tb, u32 type);
const char *crypto_attr_alg_name(struct rtattr *rta); const char *crypto_attr_alg_name(struct rtattr *rta);
struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask); struct crypto_alg *crypto_attr_alg2(struct rtattr *rta,
const struct crypto_type *frontend,
u32 type, u32 mask);
static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta,
u32 type, u32 mask)
{
return crypto_attr_alg2(rta, NULL, type, mask);
}
int crypto_attr_u32(struct rtattr *rta, u32 *num); int crypto_attr_u32(struct rtattr *rta, u32 *num);
void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
unsigned int head);
struct crypto_instance *crypto_alloc_instance(const char *name, struct crypto_instance *crypto_alloc_instance(const char *name,
struct crypto_alg *alg); struct crypto_alg *alg);
@ -157,12 +176,8 @@ int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm) static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
{ {
unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm); return PTR_ALIGN(crypto_tfm_ctx(tfm),
unsigned long align = crypto_tfm_alg_alignmask(tfm); crypto_tfm_alg_alignmask(tfm) + 1);
if (align <= crypto_tfm_ctx_alignment())
align = 1;
return (void *)ALIGN(addr, align);
} }
static inline struct crypto_instance *crypto_tfm_alg_instance( static inline struct crypto_instance *crypto_tfm_alg_instance(

View File

@ -7,6 +7,7 @@
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <crypto/hash.h>
struct cryptd_ablkcipher { struct cryptd_ablkcipher {
struct crypto_ablkcipher base; struct crypto_ablkcipher base;
@ -24,4 +25,20 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm); struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm);
void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm); void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm);
struct cryptd_ahash {
struct crypto_ahash base;
};
static inline struct cryptd_ahash *__cryptd_ahash_cast(
struct crypto_ahash *tfm)
{
return (struct cryptd_ahash *)tfm;
}
/* alg_name should be algorithm to be cryptd-ed */
struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
u32 type, u32 mask);
struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm);
void cryptd_free_ahash(struct cryptd_ahash *tfm);
#endif #endif

View File

@ -15,6 +15,42 @@
#include <linux/crypto.h> #include <linux/crypto.h>
struct crypto_ahash;
struct hash_alg_common {
unsigned int digestsize;
unsigned int statesize;
struct crypto_alg base;
};
struct ahash_request {
struct crypto_async_request base;
unsigned int nbytes;
struct scatterlist *src;
u8 *result;
/* This field may only be used by the ahash API code. */
void *priv;
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
struct ahash_alg {
int (*init)(struct ahash_request *req);
int (*update)(struct ahash_request *req);
int (*final)(struct ahash_request *req);
int (*finup)(struct ahash_request *req);
int (*digest)(struct ahash_request *req);
int (*export)(struct ahash_request *req, void *out);
int (*import)(struct ahash_request *req, const void *in);
int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
struct hash_alg_common halg;
};
struct shash_desc { struct shash_desc {
struct crypto_shash *tfm; struct crypto_shash *tfm;
u32 flags; u32 flags;
@ -24,7 +60,6 @@ struct shash_desc {
struct shash_alg { struct shash_alg {
int (*init)(struct shash_desc *desc); int (*init)(struct shash_desc *desc);
int (*reinit)(struct shash_desc *desc);
int (*update)(struct shash_desc *desc, const u8 *data, int (*update)(struct shash_desc *desc, const u8 *data,
unsigned int len); unsigned int len);
int (*final)(struct shash_desc *desc, u8 *out); int (*final)(struct shash_desc *desc, u8 *out);
@ -32,38 +67,48 @@ struct shash_alg {
unsigned int len, u8 *out); unsigned int len, u8 *out);
int (*digest)(struct shash_desc *desc, const u8 *data, int (*digest)(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out); unsigned int len, u8 *out);
int (*export)(struct shash_desc *desc, void *out);
int (*import)(struct shash_desc *desc, const void *in);
int (*setkey)(struct crypto_shash *tfm, const u8 *key, int (*setkey)(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen); unsigned int keylen);
unsigned int descsize; unsigned int descsize;
unsigned int digestsize;
/* These fields must match hash_alg_common. */
unsigned int digestsize
__attribute__ ((aligned(__alignof__(struct hash_alg_common))));
unsigned int statesize;
struct crypto_alg base; struct crypto_alg base;
}; };
struct crypto_ahash { struct crypto_ahash {
int (*init)(struct ahash_request *req);
int (*update)(struct ahash_request *req);
int (*final)(struct ahash_request *req);
int (*finup)(struct ahash_request *req);
int (*digest)(struct ahash_request *req);
int (*export)(struct ahash_request *req, void *out);
int (*import)(struct ahash_request *req, const void *in);
int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
unsigned int reqsize;
struct crypto_tfm base; struct crypto_tfm base;
}; };
struct crypto_shash { struct crypto_shash {
unsigned int descsize;
struct crypto_tfm base; struct crypto_tfm base;
}; };
static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm) static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm)
{ {
return (struct crypto_ahash *)tfm; return container_of(tfm, struct crypto_ahash, base);
} }
static inline struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
u32 type, u32 mask) u32 mask);
{
type &= ~CRYPTO_ALG_TYPE_MASK;
mask &= ~CRYPTO_ALG_TYPE_MASK;
type |= CRYPTO_ALG_TYPE_AHASH;
mask |= CRYPTO_ALG_TYPE_AHASH_MASK;
return __crypto_ahash_cast(crypto_alloc_base(alg_name, type, mask));
}
static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm) static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
{ {
@ -72,7 +117,7 @@ static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
static inline void crypto_free_ahash(struct crypto_ahash *tfm) static inline void crypto_free_ahash(struct crypto_ahash *tfm)
{ {
crypto_free_tfm(crypto_ahash_tfm(tfm)); crypto_destroy_tfm(tfm, crypto_ahash_tfm(tfm));
} }
static inline unsigned int crypto_ahash_alignmask( static inline unsigned int crypto_ahash_alignmask(
@ -81,14 +126,26 @@ static inline unsigned int crypto_ahash_alignmask(
return crypto_tfm_alg_alignmask(crypto_ahash_tfm(tfm)); return crypto_tfm_alg_alignmask(crypto_ahash_tfm(tfm));
} }
static inline struct ahash_tfm *crypto_ahash_crt(struct crypto_ahash *tfm) static inline struct hash_alg_common *__crypto_hash_alg_common(
struct crypto_alg *alg)
{ {
return &crypto_ahash_tfm(tfm)->crt_ahash; return container_of(alg, struct hash_alg_common, base);
}
static inline struct hash_alg_common *crypto_hash_alg_common(
struct crypto_ahash *tfm)
{
return __crypto_hash_alg_common(crypto_ahash_tfm(tfm)->__crt_alg);
} }
static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm) static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm)
{ {
return crypto_ahash_crt(tfm)->digestsize; return crypto_hash_alg_common(tfm)->digestsize;
}
static inline unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm)
{
return crypto_hash_alg_common(tfm)->statesize;
} }
static inline u32 crypto_ahash_get_flags(struct crypto_ahash *tfm) static inline u32 crypto_ahash_get_flags(struct crypto_ahash *tfm)
@ -114,7 +171,7 @@ static inline struct crypto_ahash *crypto_ahash_reqtfm(
static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm) static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm)
{ {
return crypto_ahash_crt(tfm)->reqsize; return tfm->reqsize;
} }
static inline void *ahash_request_ctx(struct ahash_request *req) static inline void *ahash_request_ctx(struct ahash_request *req)
@ -122,44 +179,30 @@ static inline void *ahash_request_ctx(struct ahash_request *req)
return req->__ctx; return req->__ctx;
} }
static inline int crypto_ahash_setkey(struct crypto_ahash *tfm, int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
const u8 *key, unsigned int keylen) unsigned int keylen);
{ int crypto_ahash_finup(struct ahash_request *req);
struct ahash_tfm *crt = crypto_ahash_crt(tfm); int crypto_ahash_final(struct ahash_request *req);
int crypto_ahash_digest(struct ahash_request *req);
return crt->setkey(tfm, key, keylen); static inline int crypto_ahash_export(struct ahash_request *req, void *out)
{
return crypto_ahash_reqtfm(req)->export(req, out);
} }
static inline int crypto_ahash_digest(struct ahash_request *req) static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
{ {
struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req)); return crypto_ahash_reqtfm(req)->import(req, in);
return crt->digest(req);
} }
static inline void crypto_ahash_export(struct ahash_request *req, u8 *out)
{
memcpy(out, ahash_request_ctx(req),
crypto_ahash_reqsize(crypto_ahash_reqtfm(req)));
}
int crypto_ahash_import(struct ahash_request *req, const u8 *in);
static inline int crypto_ahash_init(struct ahash_request *req) static inline int crypto_ahash_init(struct ahash_request *req)
{ {
struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req)); return crypto_ahash_reqtfm(req)->init(req);
return crt->init(req);
} }
static inline int crypto_ahash_update(struct ahash_request *req) static inline int crypto_ahash_update(struct ahash_request *req)
{ {
struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req)); return crypto_ahash_reqtfm(req)->update(req);
return crt->update(req);
}
static inline int crypto_ahash_final(struct ahash_request *req)
{
struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req));
return crt->final(req);
} }
static inline void ahash_request_set_tfm(struct ahash_request *req, static inline void ahash_request_set_tfm(struct ahash_request *req,
@ -184,7 +227,7 @@ static inline struct ahash_request *ahash_request_alloc(
static inline void ahash_request_free(struct ahash_request *req) static inline void ahash_request_free(struct ahash_request *req)
{ {
kfree(req); kzfree(req);
} }
static inline struct ahash_request *ahash_request_cast( static inline struct ahash_request *ahash_request_cast(
@ -251,6 +294,11 @@ static inline unsigned int crypto_shash_digestsize(struct crypto_shash *tfm)
return crypto_shash_alg(tfm)->digestsize; return crypto_shash_alg(tfm)->digestsize;
} }
static inline unsigned int crypto_shash_statesize(struct crypto_shash *tfm)
{
return crypto_shash_alg(tfm)->statesize;
}
static inline u32 crypto_shash_get_flags(struct crypto_shash *tfm) static inline u32 crypto_shash_get_flags(struct crypto_shash *tfm)
{ {
return crypto_tfm_get_flags(crypto_shash_tfm(tfm)); return crypto_tfm_get_flags(crypto_shash_tfm(tfm));
@ -268,7 +316,7 @@ static inline void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags)
static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm) static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm)
{ {
return crypto_shash_alg(tfm)->descsize; return tfm->descsize;
} }
static inline void *shash_desc_ctx(struct shash_desc *desc) static inline void *shash_desc_ctx(struct shash_desc *desc)
@ -281,12 +329,15 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
int crypto_shash_digest(struct shash_desc *desc, const u8 *data, int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out); unsigned int len, u8 *out);
static inline void crypto_shash_export(struct shash_desc *desc, u8 *out) static inline int crypto_shash_export(struct shash_desc *desc, void *out)
{ {
memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(desc->tfm)); return crypto_shash_alg(desc->tfm)->export(desc, out);
} }
int crypto_shash_import(struct shash_desc *desc, const u8 *in); static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
{
return crypto_shash_alg(desc->tfm)->import(desc, in);
}
static inline int crypto_shash_init(struct shash_desc *desc) static inline int crypto_shash_init(struct shash_desc *desc)
{ {

View File

@ -34,6 +34,22 @@ struct crypto_hash_walk {
unsigned int flags; unsigned int flags;
}; };
struct ahash_instance {
struct ahash_alg alg;
};
struct shash_instance {
struct shash_alg alg;
};
struct crypto_ahash_spawn {
struct crypto_spawn base;
};
struct crypto_shash_spawn {
struct crypto_spawn base;
};
extern const struct crypto_type crypto_ahash_type; extern const struct crypto_type crypto_ahash_type;
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err); int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err);
@ -43,18 +59,100 @@ int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
struct crypto_hash_walk *walk, struct crypto_hash_walk *walk,
struct scatterlist *sg, unsigned int len); struct scatterlist *sg, unsigned int len);
static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk)
{
return !(walk->entrylen | walk->total);
}
int crypto_register_ahash(struct ahash_alg *alg);
int crypto_unregister_ahash(struct ahash_alg *alg);
int ahash_register_instance(struct crypto_template *tmpl,
struct ahash_instance *inst);
void ahash_free_instance(struct crypto_instance *inst);
int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
struct hash_alg_common *alg,
struct crypto_instance *inst);
static inline void crypto_drop_ahash(struct crypto_ahash_spawn *spawn)
{
crypto_drop_spawn(&spawn->base);
}
struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask);
int crypto_register_shash(struct shash_alg *alg); int crypto_register_shash(struct shash_alg *alg);
int crypto_unregister_shash(struct shash_alg *alg); int crypto_unregister_shash(struct shash_alg *alg);
int shash_register_instance(struct crypto_template *tmpl,
struct shash_instance *inst);
void shash_free_instance(struct crypto_instance *inst);
int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn,
struct shash_alg *alg,
struct crypto_instance *inst);
static inline void crypto_drop_shash(struct crypto_shash_spawn *spawn)
{
crypto_drop_spawn(&spawn->base);
}
struct shash_alg *shash_attr_alg(struct rtattr *rta, u32 type, u32 mask);
int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc);
int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc);
int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc);
int crypto_init_shash_ops_async(struct crypto_tfm *tfm);
static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm) static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm)
{ {
return crypto_tfm_ctx(&tfm->base); return crypto_tfm_ctx(crypto_ahash_tfm(tfm));
} }
static inline struct ahash_alg *crypto_ahash_alg( static inline struct ahash_alg *__crypto_ahash_alg(struct crypto_alg *alg)
struct crypto_ahash *tfm)
{ {
return &crypto_ahash_tfm(tfm)->__crt_alg->cra_ahash; return container_of(__crypto_hash_alg_common(alg), struct ahash_alg,
halg);
}
static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm,
unsigned int reqsize)
{
tfm->reqsize = reqsize;
}
static inline struct crypto_instance *ahash_crypto_instance(
struct ahash_instance *inst)
{
return container_of(&inst->alg.halg.base, struct crypto_instance, alg);
}
static inline struct ahash_instance *ahash_instance(
struct crypto_instance *inst)
{
return container_of(&inst->alg, struct ahash_instance, alg.halg.base);
}
static inline void *ahash_instance_ctx(struct ahash_instance *inst)
{
return crypto_instance_ctx(ahash_crypto_instance(inst));
}
static inline unsigned int ahash_instance_headroom(void)
{
return sizeof(struct ahash_alg) - sizeof(struct crypto_alg);
}
static inline struct ahash_instance *ahash_alloc_instance(
const char *name, struct crypto_alg *alg)
{
return crypto_alloc_instance2(name, alg, ahash_instance_headroom());
}
static inline struct crypto_ahash *crypto_spawn_ahash(
struct crypto_ahash_spawn *spawn)
{
return crypto_spawn_tfm2(&spawn->base);
} }
static inline int ahash_enqueue_request(struct crypto_queue *queue, static inline int ahash_enqueue_request(struct crypto_queue *queue,
@ -80,5 +178,46 @@ static inline void *crypto_shash_ctx(struct crypto_shash *tfm)
return crypto_tfm_ctx(&tfm->base); return crypto_tfm_ctx(&tfm->base);
} }
static inline struct crypto_instance *shash_crypto_instance(
struct shash_instance *inst)
{
return container_of(&inst->alg.base, struct crypto_instance, alg);
}
static inline struct shash_instance *shash_instance(
struct crypto_instance *inst)
{
return container_of(__crypto_shash_alg(&inst->alg),
struct shash_instance, alg);
}
static inline void *shash_instance_ctx(struct shash_instance *inst)
{
return crypto_instance_ctx(shash_crypto_instance(inst));
}
static inline struct shash_instance *shash_alloc_instance(
const char *name, struct crypto_alg *alg)
{
return crypto_alloc_instance2(name, alg,
sizeof(struct shash_alg) - sizeof(*alg));
}
static inline struct crypto_shash *crypto_spawn_shash(
struct crypto_shash_spawn *spawn)
{
return crypto_spawn_tfm2(&spawn->base);
}
static inline void *crypto_shash_ctx_aligned(struct crypto_shash *tfm)
{
return crypto_tfm_ctx_aligned(&tfm->base);
}
static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm)
{
return container_of(tfm, struct crypto_shash, base);
}
#endif /* _CRYPTO_INTERNAL_HASH_H */ #endif /* _CRYPTO_INTERNAL_HASH_H */

View File

@ -5,6 +5,8 @@
#ifndef _CRYPTO_SHA_H #ifndef _CRYPTO_SHA_H
#define _CRYPTO_SHA_H #define _CRYPTO_SHA_H
#include <linux/types.h>
#define SHA1_DIGEST_SIZE 20 #define SHA1_DIGEST_SIZE 20
#define SHA1_BLOCK_SIZE 64 #define SHA1_BLOCK_SIZE 64
@ -62,4 +64,22 @@
#define SHA512_H6 0x1f83d9abfb41bd6bULL #define SHA512_H6 0x1f83d9abfb41bd6bULL
#define SHA512_H7 0x5be0cd19137e2179ULL #define SHA512_H7 0x5be0cd19137e2179ULL
struct sha1_state {
u64 count;
u32 state[SHA1_DIGEST_SIZE / 4];
u8 buffer[SHA1_BLOCK_SIZE];
};
struct sha256_state {
u64 count;
u32 state[SHA256_DIGEST_SIZE / 4];
u8 buf[SHA256_BLOCK_SIZE];
};
struct sha512_state {
u64 count[2];
u64 state[SHA512_DIGEST_SIZE / 8];
u8 buf[SHA512_BLOCK_SIZE];
};
#endif #endif

61
include/crypto/vmac.h Normal file
View File

@ -0,0 +1,61 @@
/*
* Modified to interface to the Linux kernel
* Copyright (c) 2009, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*/
#ifndef __CRYPTO_VMAC_H
#define __CRYPTO_VMAC_H
/* --------------------------------------------------------------------------
* VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
* This implementation is herby placed in the public domain.
* The authors offers no warranty. Use at your own risk.
* Please send bug reports to the authors.
* Last modified: 17 APR 08, 1700 PDT
* ----------------------------------------------------------------------- */
/*
* User definable settings.
*/
#define VMAC_TAG_LEN 64
#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
/*
* This implementation uses u32 and u64 as names for unsigned 32-
* and 64-bit integer types. These are defined in C99 stdint.h. The
* following may need adaptation if you are not running a C99 or
* Microsoft C environment.
*/
struct vmac_ctx {
u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
u64 polykey[2*VMAC_TAG_LEN/64];
u64 l3key[2*VMAC_TAG_LEN/64];
u64 polytmp[2*VMAC_TAG_LEN/64];
u64 cached_nonce[2];
u64 cached_aes[2];
int first_block_processed;
};
typedef u64 vmac_t;
struct vmac_ctx_t {
struct crypto_cipher *child;
struct vmac_ctx __vmac_ctx;
};
#endif /* __CRYPTO_VMAC_H */

View File

@ -115,7 +115,6 @@ struct crypto_async_request;
struct crypto_aead; struct crypto_aead;
struct crypto_blkcipher; struct crypto_blkcipher;
struct crypto_hash; struct crypto_hash;
struct crypto_ahash;
struct crypto_rng; struct crypto_rng;
struct crypto_tfm; struct crypto_tfm;
struct crypto_type; struct crypto_type;
@ -146,16 +145,6 @@ struct ablkcipher_request {
void *__ctx[] CRYPTO_MINALIGN_ATTR; void *__ctx[] CRYPTO_MINALIGN_ATTR;
}; };
struct ahash_request {
struct crypto_async_request base;
unsigned int nbytes;
struct scatterlist *src;
u8 *result;
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
/** /**
* struct aead_request - AEAD request * struct aead_request - AEAD request
* @base: Common attributes for async crypto requests * @base: Common attributes for async crypto requests
@ -220,18 +209,6 @@ struct ablkcipher_alg {
unsigned int ivsize; unsigned int ivsize;
}; };
struct ahash_alg {
int (*init)(struct ahash_request *req);
int (*reinit)(struct ahash_request *req);
int (*update)(struct ahash_request *req);
int (*final)(struct ahash_request *req);
int (*digest)(struct ahash_request *req);
int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
unsigned int digestsize;
};
struct aead_alg { struct aead_alg {
int (*setkey)(struct crypto_aead *tfm, const u8 *key, int (*setkey)(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen); unsigned int keylen);
@ -318,7 +295,6 @@ struct rng_alg {
#define cra_cipher cra_u.cipher #define cra_cipher cra_u.cipher
#define cra_digest cra_u.digest #define cra_digest cra_u.digest
#define cra_hash cra_u.hash #define cra_hash cra_u.hash
#define cra_ahash cra_u.ahash
#define cra_compress cra_u.compress #define cra_compress cra_u.compress
#define cra_rng cra_u.rng #define cra_rng cra_u.rng
@ -346,7 +322,6 @@ struct crypto_alg {
struct cipher_alg cipher; struct cipher_alg cipher;
struct digest_alg digest; struct digest_alg digest;
struct hash_alg hash; struct hash_alg hash;
struct ahash_alg ahash;
struct compress_alg compress; struct compress_alg compress;
struct rng_alg rng; struct rng_alg rng;
} cra_u; } cra_u;
@ -433,18 +408,6 @@ struct hash_tfm {
unsigned int digestsize; unsigned int digestsize;
}; };
struct ahash_tfm {
int (*init)(struct ahash_request *req);
int (*update)(struct ahash_request *req);
int (*final)(struct ahash_request *req);
int (*digest)(struct ahash_request *req);
int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
unsigned int digestsize;
unsigned int reqsize;
};
struct compress_tfm { struct compress_tfm {
int (*cot_compress)(struct crypto_tfm *tfm, int (*cot_compress)(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen, const u8 *src, unsigned int slen,
@ -465,7 +428,6 @@ struct rng_tfm {
#define crt_blkcipher crt_u.blkcipher #define crt_blkcipher crt_u.blkcipher
#define crt_cipher crt_u.cipher #define crt_cipher crt_u.cipher
#define crt_hash crt_u.hash #define crt_hash crt_u.hash
#define crt_ahash crt_u.ahash
#define crt_compress crt_u.compress #define crt_compress crt_u.compress
#define crt_rng crt_u.rng #define crt_rng crt_u.rng
@ -479,7 +441,6 @@ struct crypto_tfm {
struct blkcipher_tfm blkcipher; struct blkcipher_tfm blkcipher;
struct cipher_tfm cipher; struct cipher_tfm cipher;
struct hash_tfm hash; struct hash_tfm hash;
struct ahash_tfm ahash;
struct compress_tfm compress; struct compress_tfm compress;
struct rng_tfm rng; struct rng_tfm rng;
} crt_u; } crt_u;
@ -770,7 +731,7 @@ static inline struct ablkcipher_request *ablkcipher_request_alloc(
static inline void ablkcipher_request_free(struct ablkcipher_request *req) static inline void ablkcipher_request_free(struct ablkcipher_request *req)
{ {
kfree(req); kzfree(req);
} }
static inline void ablkcipher_request_set_callback( static inline void ablkcipher_request_set_callback(
@ -901,7 +862,7 @@ static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
static inline void aead_request_free(struct aead_request *req) static inline void aead_request_free(struct aead_request *req)
{ {
kfree(req); kzfree(req);
} }
static inline void aead_request_set_callback(struct aead_request *req, static inline void aead_request_set_callback(struct aead_request *req,

10
include/linux/fips.h Normal file
View File

@ -0,0 +1,10 @@
#ifndef _FIPS_H
#define _FIPS_H
#ifdef CONFIG_CRYPTO_FIPS
extern int fips_enabled;
#else
#define fips_enabled 0
#endif
#endif