blk-crypto-fallback: properly prefix function and struct names
For clarity, avoid using just the "blk_crypto_" prefix for functions and structs that are specific to blk-crypto-fallback. Instead, use "blk_crypto_fallback_". Some places already did this, but others didn't. This is also a prerequisite for using "struct blk_crypto_keyslot" to mean a generic blk-crypto keyslot (which is what it sounds like). Rename the fallback one to "struct blk_crypto_fallback_keyslot". No change in behavior. Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com> Reviewed-by: Mike Snitzer <snitzer@redhat.com> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Eric Biggers <ebiggers@google.com> Link: https://lore.kernel.org/r/20211018180453.40441-2-ebiggers@kernel.org Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
435c2acb30
commit
eebcafaebb
|
@ -73,7 +73,7 @@ static mempool_t *bio_fallback_crypt_ctx_pool;
|
|||
static DEFINE_MUTEX(tfms_init_lock);
|
||||
static bool tfms_inited[BLK_ENCRYPTION_MODE_MAX];
|
||||
|
||||
static struct blk_crypto_keyslot {
|
||||
static struct blk_crypto_fallback_keyslot {
|
||||
enum blk_crypto_mode_num crypto_mode;
|
||||
struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX];
|
||||
} *blk_crypto_keyslots;
|
||||
|
@ -89,9 +89,9 @@ static struct bio_set crypto_bio_split;
|
|||
*/
|
||||
static u8 blank_key[BLK_CRYPTO_MAX_KEY_SIZE];
|
||||
|
||||
static void blk_crypto_evict_keyslot(unsigned int slot)
|
||||
static void blk_crypto_fallback_evict_keyslot(unsigned int slot)
|
||||
{
|
||||
struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot];
|
||||
struct blk_crypto_fallback_keyslot *slotp = &blk_crypto_keyslots[slot];
|
||||
enum blk_crypto_mode_num crypto_mode = slotp->crypto_mode;
|
||||
int err;
|
||||
|
||||
|
@ -104,34 +104,34 @@ static void blk_crypto_evict_keyslot(unsigned int slot)
|
|||
slotp->crypto_mode = BLK_ENCRYPTION_MODE_INVALID;
|
||||
}
|
||||
|
||||
static int blk_crypto_keyslot_program(struct blk_keyslot_manager *ksm,
|
||||
const struct blk_crypto_key *key,
|
||||
unsigned int slot)
|
||||
static int blk_crypto_fallback_keyslot_program(struct blk_keyslot_manager *ksm,
|
||||
const struct blk_crypto_key *key,
|
||||
unsigned int slot)
|
||||
{
|
||||
struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot];
|
||||
struct blk_crypto_fallback_keyslot *slotp = &blk_crypto_keyslots[slot];
|
||||
const enum blk_crypto_mode_num crypto_mode =
|
||||
key->crypto_cfg.crypto_mode;
|
||||
int err;
|
||||
|
||||
if (crypto_mode != slotp->crypto_mode &&
|
||||
slotp->crypto_mode != BLK_ENCRYPTION_MODE_INVALID)
|
||||
blk_crypto_evict_keyslot(slot);
|
||||
blk_crypto_fallback_evict_keyslot(slot);
|
||||
|
||||
slotp->crypto_mode = crypto_mode;
|
||||
err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], key->raw,
|
||||
key->size);
|
||||
if (err) {
|
||||
blk_crypto_evict_keyslot(slot);
|
||||
blk_crypto_fallback_evict_keyslot(slot);
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int blk_crypto_keyslot_evict(struct blk_keyslot_manager *ksm,
|
||||
const struct blk_crypto_key *key,
|
||||
unsigned int slot)
|
||||
static int blk_crypto_fallback_keyslot_evict(struct blk_keyslot_manager *ksm,
|
||||
const struct blk_crypto_key *key,
|
||||
unsigned int slot)
|
||||
{
|
||||
blk_crypto_evict_keyslot(slot);
|
||||
blk_crypto_fallback_evict_keyslot(slot);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -141,8 +141,8 @@ static int blk_crypto_keyslot_evict(struct blk_keyslot_manager *ksm,
|
|||
* hardware.
|
||||
*/
|
||||
static const struct blk_ksm_ll_ops blk_crypto_ksm_ll_ops = {
|
||||
.keyslot_program = blk_crypto_keyslot_program,
|
||||
.keyslot_evict = blk_crypto_keyslot_evict,
|
||||
.keyslot_program = blk_crypto_fallback_keyslot_program,
|
||||
.keyslot_evict = blk_crypto_fallback_keyslot_evict,
|
||||
};
|
||||
|
||||
static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio)
|
||||
|
@ -160,7 +160,7 @@ static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio)
|
|||
bio_endio(src_bio);
|
||||
}
|
||||
|
||||
static struct bio *blk_crypto_clone_bio(struct bio *bio_src)
|
||||
static struct bio *blk_crypto_fallback_clone_bio(struct bio *bio_src)
|
||||
{
|
||||
struct bvec_iter iter;
|
||||
struct bio_vec bv;
|
||||
|
@ -187,12 +187,13 @@ static struct bio *blk_crypto_clone_bio(struct bio *bio_src)
|
|||
return bio;
|
||||
}
|
||||
|
||||
static bool blk_crypto_alloc_cipher_req(struct blk_ksm_keyslot *slot,
|
||||
struct skcipher_request **ciph_req_ret,
|
||||
struct crypto_wait *wait)
|
||||
static bool
|
||||
blk_crypto_fallback_alloc_cipher_req(struct blk_ksm_keyslot *slot,
|
||||
struct skcipher_request **ciph_req_ret,
|
||||
struct crypto_wait *wait)
|
||||
{
|
||||
struct skcipher_request *ciph_req;
|
||||
const struct blk_crypto_keyslot *slotp;
|
||||
const struct blk_crypto_fallback_keyslot *slotp;
|
||||
int keyslot_idx = blk_ksm_get_slot_idx(slot);
|
||||
|
||||
slotp = &blk_crypto_keyslots[keyslot_idx];
|
||||
|
@ -210,7 +211,7 @@ static bool blk_crypto_alloc_cipher_req(struct blk_ksm_keyslot *slot,
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool blk_crypto_split_bio_if_needed(struct bio **bio_ptr)
|
||||
static bool blk_crypto_fallback_split_bio_if_needed(struct bio **bio_ptr)
|
||||
{
|
||||
struct bio *bio = *bio_ptr;
|
||||
unsigned int i = 0;
|
||||
|
@ -277,7 +278,7 @@ static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr)
|
|||
blk_status_t blk_st;
|
||||
|
||||
/* Split the bio if it's too big for single page bvec */
|
||||
if (!blk_crypto_split_bio_if_needed(bio_ptr))
|
||||
if (!blk_crypto_fallback_split_bio_if_needed(bio_ptr))
|
||||
return false;
|
||||
|
||||
src_bio = *bio_ptr;
|
||||
|
@ -285,7 +286,7 @@ static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr)
|
|||
data_unit_size = bc->bc_key->crypto_cfg.data_unit_size;
|
||||
|
||||
/* Allocate bounce bio for encryption */
|
||||
enc_bio = blk_crypto_clone_bio(src_bio);
|
||||
enc_bio = blk_crypto_fallback_clone_bio(src_bio);
|
||||
if (!enc_bio) {
|
||||
src_bio->bi_status = BLK_STS_RESOURCE;
|
||||
return false;
|
||||
|
@ -302,7 +303,7 @@ static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr)
|
|||
}
|
||||
|
||||
/* and then allocate an skcipher_request for it */
|
||||
if (!blk_crypto_alloc_cipher_req(slot, &ciph_req, &wait)) {
|
||||
if (!blk_crypto_fallback_alloc_cipher_req(slot, &ciph_req, &wait)) {
|
||||
src_bio->bi_status = BLK_STS_RESOURCE;
|
||||
goto out_release_keyslot;
|
||||
}
|
||||
|
@ -404,7 +405,7 @@ static void blk_crypto_fallback_decrypt_bio(struct work_struct *work)
|
|||
}
|
||||
|
||||
/* and then allocate an skcipher_request for it */
|
||||
if (!blk_crypto_alloc_cipher_req(slot, &ciph_req, &wait)) {
|
||||
if (!blk_crypto_fallback_alloc_cipher_req(slot, &ciph_req, &wait)) {
|
||||
bio->bi_status = BLK_STS_RESOURCE;
|
||||
goto out;
|
||||
}
|
||||
|
@ -474,9 +475,9 @@ static void blk_crypto_fallback_decrypt_endio(struct bio *bio)
|
|||
* @bio_ptr: pointer to the bio to prepare
|
||||
*
|
||||
* If bio is doing a WRITE operation, this splits the bio into two parts if it's
|
||||
* too big (see blk_crypto_split_bio_if_needed). It then allocates a bounce bio
|
||||
* for the first part, encrypts it, and update bio_ptr to point to the bounce
|
||||
* bio.
|
||||
* too big (see blk_crypto_fallback_split_bio_if_needed()). It then allocates a
|
||||
* bounce bio for the first part, encrypts it, and updates bio_ptr to point to
|
||||
* the bounce bio.
|
||||
*
|
||||
* For a READ operation, we mark the bio for decryption by using bi_private and
|
||||
* bi_end_io.
|
||||
|
@ -611,7 +612,7 @@ out:
|
|||
int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
|
||||
{
|
||||
const char *cipher_str = blk_crypto_modes[mode_num].cipher_str;
|
||||
struct blk_crypto_keyslot *slotp;
|
||||
struct blk_crypto_fallback_keyslot *slotp;
|
||||
unsigned int i;
|
||||
int err = 0;
|
||||
|
||||
|
|
Loading…
Reference in New Issue