crypto: xts - prefix function and struct names with "xts"

Overly-generic names can cause problems like naming collisions,
confusing crash reports, and reduced grep-ability.  E.g. see
commit d099ea6e6f ("crypto - Avoid free() namespace collision").

Clean this up for the xts template by prefixing the names with "xts_".

(I didn't use "crypto_xts_" instead because that seems overkill.)

Also constify the tfm context in a couple places, and make
xts_free_instance() use the instance context structure so that it
doesn't just assume the crypto_skcipher_spawn is at the beginning.

Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Eric Biggers 2020-07-10 20:34:28 -07:00 committed by Herbert Xu
parent 10f33d391e
commit a874f59104
1 changed files with 72 additions and 65 deletions

View File

@ -20,7 +20,7 @@
#include <crypto/b128ops.h> #include <crypto/b128ops.h>
#include <crypto/gf128mul.h> #include <crypto/gf128mul.h>
struct priv { struct xts_tfm_ctx {
struct crypto_skcipher *child; struct crypto_skcipher *child;
struct crypto_cipher *tweak; struct crypto_cipher *tweak;
}; };
@ -30,17 +30,17 @@ struct xts_instance_ctx {
char name[CRYPTO_MAX_ALG_NAME]; char name[CRYPTO_MAX_ALG_NAME];
}; };
struct rctx { struct xts_request_ctx {
le128 t; le128 t;
struct scatterlist *tail; struct scatterlist *tail;
struct scatterlist sg[2]; struct scatterlist sg[2];
struct skcipher_request subreq; struct skcipher_request subreq;
}; };
static int setkey(struct crypto_skcipher *parent, const u8 *key, static int xts_setkey(struct crypto_skcipher *parent, const u8 *key,
unsigned int keylen) unsigned int keylen)
{ {
struct priv *ctx = crypto_skcipher_ctx(parent); struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(parent);
struct crypto_skcipher *child; struct crypto_skcipher *child;
struct crypto_cipher *tweak; struct crypto_cipher *tweak;
int err; int err;
@ -78,9 +78,10 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key,
* mutliple calls to the 'ecb(..)' instance, which usually would be slower than * mutliple calls to the 'ecb(..)' instance, which usually would be slower than
* just doing the gf128mul_x_ble() calls again. * just doing the gf128mul_x_ble() calls again.
*/ */
static int xor_tweak(struct skcipher_request *req, bool second_pass, bool enc) static int xts_xor_tweak(struct skcipher_request *req, bool second_pass,
bool enc)
{ {
struct rctx *rctx = skcipher_request_ctx(req); struct xts_request_ctx *rctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const bool cts = (req->cryptlen % XTS_BLOCK_SIZE); const bool cts = (req->cryptlen % XTS_BLOCK_SIZE);
const int bs = XTS_BLOCK_SIZE; const int bs = XTS_BLOCK_SIZE;
@ -128,23 +129,23 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass, bool enc)
return err; return err;
} }
static int xor_tweak_pre(struct skcipher_request *req, bool enc) static int xts_xor_tweak_pre(struct skcipher_request *req, bool enc)
{ {
return xor_tweak(req, false, enc); return xts_xor_tweak(req, false, enc);
} }
static int xor_tweak_post(struct skcipher_request *req, bool enc) static int xts_xor_tweak_post(struct skcipher_request *req, bool enc)
{ {
return xor_tweak(req, true, enc); return xts_xor_tweak(req, true, enc);
} }
static void cts_done(struct crypto_async_request *areq, int err) static void xts_cts_done(struct crypto_async_request *areq, int err)
{ {
struct skcipher_request *req = areq->data; struct skcipher_request *req = areq->data;
le128 b; le128 b;
if (!err) { if (!err) {
struct rctx *rctx = skcipher_request_ctx(req); struct xts_request_ctx *rctx = skcipher_request_ctx(req);
scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 0); scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
le128_xor(&b, &rctx->t, &b); le128_xor(&b, &rctx->t, &b);
@ -154,12 +155,13 @@ static void cts_done(struct crypto_async_request *areq, int err)
skcipher_request_complete(req, err); skcipher_request_complete(req, err);
} }
static int cts_final(struct skcipher_request *req, static int xts_cts_final(struct skcipher_request *req,
int (*crypt)(struct skcipher_request *req)) int (*crypt)(struct skcipher_request *req))
{ {
struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); const struct xts_tfm_ctx *ctx =
crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1); int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1);
struct rctx *rctx = skcipher_request_ctx(req); struct xts_request_ctx *rctx = skcipher_request_ctx(req);
struct skcipher_request *subreq = &rctx->subreq; struct skcipher_request *subreq = &rctx->subreq;
int tail = req->cryptlen % XTS_BLOCK_SIZE; int tail = req->cryptlen % XTS_BLOCK_SIZE;
le128 b[2]; le128 b[2];
@ -177,7 +179,8 @@ static int cts_final(struct skcipher_request *req,
scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE + tail, 1); scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE + tail, 1);
skcipher_request_set_tfm(subreq, ctx->child); skcipher_request_set_tfm(subreq, ctx->child);
skcipher_request_set_callback(subreq, req->base.flags, cts_done, req); skcipher_request_set_callback(subreq, req->base.flags, xts_cts_done,
req);
skcipher_request_set_crypt(subreq, rctx->tail, rctx->tail, skcipher_request_set_crypt(subreq, rctx->tail, rctx->tail,
XTS_BLOCK_SIZE, NULL); XTS_BLOCK_SIZE, NULL);
@ -192,18 +195,18 @@ static int cts_final(struct skcipher_request *req,
return 0; return 0;
} }
static void encrypt_done(struct crypto_async_request *areq, int err) static void xts_encrypt_done(struct crypto_async_request *areq, int err)
{ {
struct skcipher_request *req = areq->data; struct skcipher_request *req = areq->data;
if (!err) { if (!err) {
struct rctx *rctx = skcipher_request_ctx(req); struct xts_request_ctx *rctx = skcipher_request_ctx(req);
rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
err = xor_tweak_post(req, true); err = xts_xor_tweak_post(req, true);
if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) { if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
err = cts_final(req, crypto_skcipher_encrypt); err = xts_cts_final(req, crypto_skcipher_encrypt);
if (err == -EINPROGRESS) if (err == -EINPROGRESS)
return; return;
} }
@ -212,18 +215,18 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
skcipher_request_complete(req, err); skcipher_request_complete(req, err);
} }
static void decrypt_done(struct crypto_async_request *areq, int err) static void xts_decrypt_done(struct crypto_async_request *areq, int err)
{ {
struct skcipher_request *req = areq->data; struct skcipher_request *req = areq->data;
if (!err) { if (!err) {
struct rctx *rctx = skcipher_request_ctx(req); struct xts_request_ctx *rctx = skcipher_request_ctx(req);
rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
err = xor_tweak_post(req, false); err = xts_xor_tweak_post(req, false);
if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) { if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
err = cts_final(req, crypto_skcipher_decrypt); err = xts_cts_final(req, crypto_skcipher_decrypt);
if (err == -EINPROGRESS) if (err == -EINPROGRESS)
return; return;
} }
@ -232,10 +235,12 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
skcipher_request_complete(req, err); skcipher_request_complete(req, err);
} }
static int init_crypt(struct skcipher_request *req, crypto_completion_t compl) static int xts_init_crypt(struct skcipher_request *req,
crypto_completion_t compl)
{ {
struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); const struct xts_tfm_ctx *ctx =
struct rctx *rctx = skcipher_request_ctx(req); crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
struct xts_request_ctx *rctx = skcipher_request_ctx(req);
struct skcipher_request *subreq = &rctx->subreq; struct skcipher_request *subreq = &rctx->subreq;
if (req->cryptlen < XTS_BLOCK_SIZE) if (req->cryptlen < XTS_BLOCK_SIZE)
@ -252,45 +257,45 @@ static int init_crypt(struct skcipher_request *req, crypto_completion_t compl)
return 0; return 0;
} }
static int encrypt(struct skcipher_request *req) static int xts_encrypt(struct skcipher_request *req)
{ {
struct rctx *rctx = skcipher_request_ctx(req); struct xts_request_ctx *rctx = skcipher_request_ctx(req);
struct skcipher_request *subreq = &rctx->subreq; struct skcipher_request *subreq = &rctx->subreq;
int err; int err;
err = init_crypt(req, encrypt_done) ?: err = xts_init_crypt(req, xts_encrypt_done) ?:
xor_tweak_pre(req, true) ?: xts_xor_tweak_pre(req, true) ?:
crypto_skcipher_encrypt(subreq) ?: crypto_skcipher_encrypt(subreq) ?:
xor_tweak_post(req, true); xts_xor_tweak_post(req, true);
if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0)) if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
return err; return err;
return cts_final(req, crypto_skcipher_encrypt); return xts_cts_final(req, crypto_skcipher_encrypt);
} }
static int decrypt(struct skcipher_request *req) static int xts_decrypt(struct skcipher_request *req)
{ {
struct rctx *rctx = skcipher_request_ctx(req); struct xts_request_ctx *rctx = skcipher_request_ctx(req);
struct skcipher_request *subreq = &rctx->subreq; struct skcipher_request *subreq = &rctx->subreq;
int err; int err;
err = init_crypt(req, decrypt_done) ?: err = xts_init_crypt(req, xts_decrypt_done) ?:
xor_tweak_pre(req, false) ?: xts_xor_tweak_pre(req, false) ?:
crypto_skcipher_decrypt(subreq) ?: crypto_skcipher_decrypt(subreq) ?:
xor_tweak_post(req, false); xts_xor_tweak_post(req, false);
if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0)) if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
return err; return err;
return cts_final(req, crypto_skcipher_decrypt); return xts_cts_final(req, crypto_skcipher_decrypt);
} }
static int init_tfm(struct crypto_skcipher *tfm) static int xts_init_tfm(struct crypto_skcipher *tfm)
{ {
struct skcipher_instance *inst = skcipher_alg_instance(tfm); struct skcipher_instance *inst = skcipher_alg_instance(tfm);
struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst); struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
struct priv *ctx = crypto_skcipher_ctx(tfm); struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_skcipher *child; struct crypto_skcipher *child;
struct crypto_cipher *tweak; struct crypto_cipher *tweak;
@ -309,26 +314,28 @@ static int init_tfm(struct crypto_skcipher *tfm)
ctx->tweak = tweak; ctx->tweak = tweak;
crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) + crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
sizeof(struct rctx)); sizeof(struct xts_request_ctx));
return 0; return 0;
} }
static void exit_tfm(struct crypto_skcipher *tfm) static void xts_exit_tfm(struct crypto_skcipher *tfm)
{ {
struct priv *ctx = crypto_skcipher_ctx(tfm); struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
crypto_free_skcipher(ctx->child); crypto_free_skcipher(ctx->child);
crypto_free_cipher(ctx->tweak); crypto_free_cipher(ctx->tweak);
} }
static void crypto_xts_free(struct skcipher_instance *inst) static void xts_free_instance(struct skcipher_instance *inst)
{ {
crypto_drop_skcipher(skcipher_instance_ctx(inst)); struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
crypto_drop_skcipher(&ictx->spawn);
kfree(inst); kfree(inst);
} }
static int create(struct crypto_template *tmpl, struct rtattr **tb) static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
{ {
struct skcipher_instance *inst; struct skcipher_instance *inst;
struct xts_instance_ctx *ctx; struct xts_instance_ctx *ctx;
@ -416,43 +423,43 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2; inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2;
inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2; inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2;
inst->alg.base.cra_ctxsize = sizeof(struct priv); inst->alg.base.cra_ctxsize = sizeof(struct xts_tfm_ctx);
inst->alg.init = init_tfm; inst->alg.init = xts_init_tfm;
inst->alg.exit = exit_tfm; inst->alg.exit = xts_exit_tfm;
inst->alg.setkey = setkey; inst->alg.setkey = xts_setkey;
inst->alg.encrypt = encrypt; inst->alg.encrypt = xts_encrypt;
inst->alg.decrypt = decrypt; inst->alg.decrypt = xts_decrypt;
inst->free = crypto_xts_free; inst->free = xts_free_instance;
err = skcipher_register_instance(tmpl, inst); err = skcipher_register_instance(tmpl, inst);
if (err) { if (err) {
err_free_inst: err_free_inst:
crypto_xts_free(inst); xts_free_instance(inst);
} }
return err; return err;
} }
static struct crypto_template crypto_tmpl = { static struct crypto_template xts_tmpl = {
.name = "xts", .name = "xts",
.create = create, .create = xts_create,
.module = THIS_MODULE, .module = THIS_MODULE,
}; };
static int __init crypto_module_init(void) static int __init xts_module_init(void)
{ {
return crypto_register_template(&crypto_tmpl); return crypto_register_template(&xts_tmpl);
} }
static void __exit crypto_module_exit(void) static void __exit xts_module_exit(void)
{ {
crypto_unregister_template(&crypto_tmpl); crypto_unregister_template(&xts_tmpl);
} }
subsys_initcall(crypto_module_init); subsys_initcall(xts_module_init);
module_exit(crypto_module_exit); module_exit(xts_module_exit);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("XTS block cipher mode"); MODULE_DESCRIPTION("XTS block cipher mode");