Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto fixes from Herbert Xu: - fix crashes in skcipher/shash from zero-length input. - fix softirq GFP_KERNEL allocation in shash_setkey_unaligned. - error path bug fix in xts create function. - fix compiler warning regressions in axis and stm32 * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: crypto: shash - Fix zero-length shash ahash digest crash crypto: skcipher - Fix crash on zero-length input crypto: shash - Fix a sleep-in-atomic bug in shash_setkey_unaligned crypto: xts - Fix an error handling path in 'create()' crypto: stm32 - Try to fix hash padding crypto: axis - hide an unused variable
This commit is contained in:
commit
73a752cce2
|
@ -41,7 +41,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
|
|||
int err;
|
||||
|
||||
absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
|
||||
buffer = kmalloc(absize, GFP_KERNEL);
|
||||
buffer = kmalloc(absize, GFP_ATOMIC);
|
||||
if (!buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -275,12 +275,14 @@ static int shash_async_finup(struct ahash_request *req)
|
|||
|
||||
int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
|
||||
{
|
||||
struct scatterlist *sg = req->src;
|
||||
unsigned int offset = sg->offset;
|
||||
unsigned int nbytes = req->nbytes;
|
||||
struct scatterlist *sg;
|
||||
unsigned int offset;
|
||||
int err;
|
||||
|
||||
if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) {
|
||||
if (nbytes &&
|
||||
(sg = req->src, offset = sg->offset,
|
||||
nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
|
||||
void *data;
|
||||
|
||||
data = kmap_atomic(sg_page(sg));
|
||||
|
|
|
@ -426,14 +426,9 @@ static int skcipher_copy_iv(struct skcipher_walk *walk)
|
|||
|
||||
static int skcipher_walk_first(struct skcipher_walk *walk)
|
||||
{
|
||||
walk->nbytes = 0;
|
||||
|
||||
if (WARN_ON_ONCE(in_irq()))
|
||||
return -EDEADLK;
|
||||
|
||||
if (unlikely(!walk->total))
|
||||
return 0;
|
||||
|
||||
walk->buffer = NULL;
|
||||
if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
|
||||
int err = skcipher_copy_iv(walk);
|
||||
|
@ -452,10 +447,15 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
|
|||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
|
||||
walk->total = req->cryptlen;
|
||||
walk->nbytes = 0;
|
||||
|
||||
if (unlikely(!walk->total))
|
||||
return 0;
|
||||
|
||||
scatterwalk_start(&walk->in, req->src);
|
||||
scatterwalk_start(&walk->out, req->dst);
|
||||
|
||||
walk->total = req->cryptlen;
|
||||
walk->iv = req->iv;
|
||||
walk->oiv = req->iv;
|
||||
|
||||
|
@ -509,6 +509,11 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
|
|||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
int err;
|
||||
|
||||
walk->nbytes = 0;
|
||||
|
||||
if (unlikely(!walk->total))
|
||||
return 0;
|
||||
|
||||
walk->flags &= ~SKCIPHER_WALK_PHYS;
|
||||
|
||||
scatterwalk_start(&walk->in, req->src);
|
||||
|
|
|
@ -554,8 +554,10 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
|
|||
ctx->name[len - 1] = 0;
|
||||
|
||||
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||
"xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME)
|
||||
return -ENAMETOOLONG;
|
||||
"xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) {
|
||||
err = -ENAMETOOLONG;
|
||||
goto err_drop_spawn;
|
||||
}
|
||||
} else
|
||||
goto err_drop_spawn;
|
||||
|
||||
|
|
|
@ -349,8 +349,6 @@ struct artpec6_crypto_aead_req_ctx {
|
|||
/* The crypto framework makes it hard to avoid this global. */
|
||||
static struct device *artpec6_crypto_dev;
|
||||
|
||||
static struct dentry *dbgfs_root;
|
||||
|
||||
#ifdef CONFIG_FAULT_INJECTION
|
||||
static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read);
|
||||
static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full);
|
||||
|
@ -2984,6 +2982,8 @@ struct dbgfs_u32 {
|
|||
char *desc;
|
||||
};
|
||||
|
||||
static struct dentry *dbgfs_root;
|
||||
|
||||
static void artpec6_crypto_init_debugfs(void)
|
||||
{
|
||||
dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);
|
||||
|
|
|
@ -553,9 +553,9 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
|
|||
{
|
||||
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
|
||||
struct scatterlist sg[1], *tsg;
|
||||
int err = 0, len = 0, reg, ncp;
|
||||
int err = 0, len = 0, reg, ncp = 0;
|
||||
unsigned int i;
|
||||
const u32 *buffer = (const u32 *)rctx->buffer;
|
||||
u32 *buffer = (void *)rctx->buffer;
|
||||
|
||||
rctx->sg = hdev->req->src;
|
||||
rctx->total = hdev->req->nbytes;
|
||||
|
@ -620,10 +620,13 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
|
|||
reg |= HASH_CR_DMAA;
|
||||
stm32_hash_write(hdev, HASH_CR, reg);
|
||||
|
||||
for (i = 0; i < DIV_ROUND_UP(ncp, sizeof(u32)); i++)
|
||||
stm32_hash_write(hdev, HASH_DIN, buffer[i]);
|
||||
|
||||
stm32_hash_set_nblw(hdev, ncp);
|
||||
if (ncp) {
|
||||
memset(buffer + ncp, 0,
|
||||
DIV_ROUND_UP(ncp, sizeof(u32)) - ncp);
|
||||
writesl(hdev->io_base + HASH_DIN, buffer,
|
||||
DIV_ROUND_UP(ncp, sizeof(u32)));
|
||||
}
|
||||
stm32_hash_set_nblw(hdev, DIV_ROUND_UP(ncp, sizeof(u32)));
|
||||
reg = stm32_hash_read(hdev, HASH_STR);
|
||||
reg |= HASH_STR_DCAL;
|
||||
stm32_hash_write(hdev, HASH_STR, reg);
|
||||
|
|
Loading…
Reference in New Issue