From df7b54f1d983ca41e7b2add09664b1da128f3424 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=BD=D0=B0=D0=B1?= Date: Sat, 25 Dec 2021 04:34:29 +0100 Subject: [PATCH] module: icp: rip out insane crypto_req_handle_t mechanism, inline KM_SLEEP MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Brian Behlendorf Signed-off-by: Ahelenia ZiemiaƄska Closes #12901 --- include/sys/crypto/api.h | 15 +-- module/icp/algs/modes/gcm.c | 14 +-- module/icp/api/kcf_cipher.c | 22 ++--- module/icp/api/kcf_ctxops.c | 7 +- module/icp/api/kcf_mac.c | 39 +++----- module/icp/core/kcf_sched.c | 6 +- module/icp/include/modes/modes.h | 6 -- module/icp/include/sys/crypto/impl.h | 41 ++++---- module/icp/include/sys/crypto/sched_impl.h | 24 +---- module/icp/include/sys/crypto/spi.h | 59 +++++------- module/icp/io/aes.c | 107 +++++++++------------ module/icp/io/sha2_mod.c | 79 ++++++--------- module/icp/io/skein_mod.c | 69 ++++++------- module/icp/spi/kcf_spi.c | 12 --- module/os/linux/zfs/zio_crypt.c | 22 ++--- module/zfs/hkdf.c | 4 +- 16 files changed, 187 insertions(+), 339 deletions(-) diff --git a/include/sys/crypto/api.h b/include/sys/crypto/api.h index b2106e0c89..b3d6c9c071 100644 --- a/include/sys/crypto/api.h +++ b/include/sys/crypto/api.h @@ -36,8 +36,6 @@ extern "C" { typedef void *crypto_context_t; typedef void *crypto_ctx_template_t; -typedef struct {} crypto_call_req_t; - /* * Returns the mechanism type corresponding to a mechanism name. */ @@ -48,17 +46,16 @@ extern crypto_mech_type_t crypto_mech2id(const char *name); * Create and destroy context templates. */ extern int crypto_create_ctx_template(crypto_mechanism_t *mech, - crypto_key_t *key, crypto_ctx_template_t *tmpl, int kmflag); + crypto_key_t *key, crypto_ctx_template_t *tmpl); extern void crypto_destroy_ctx_template(crypto_ctx_template_t tmpl); /* * Single and multi-part MAC operations. */ extern int crypto_mac(crypto_mechanism_t *mech, crypto_data_t *data, - crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *mac, - crypto_call_req_t *cr); + crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *mac); extern int crypto_mac_init(crypto_mechanism_t *mech, crypto_key_t *key, - crypto_ctx_template_t tmpl, crypto_context_t *ctxp, crypto_call_req_t *cr); + crypto_ctx_template_t tmpl, crypto_context_t *ctxp); extern int crypto_mac_update(crypto_context_t ctx, crypto_data_t *data); extern int crypto_mac_final(crypto_context_t ctx, crypto_data_t *data); @@ -66,11 +63,9 @@ extern int crypto_mac_final(crypto_context_t ctx, crypto_data_t *data); * Single-part encryption/decryption operations. */ extern int crypto_encrypt(crypto_mechanism_t *mech, crypto_data_t *plaintext, - crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *ciphertext, - crypto_call_req_t *cr); + crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *ciphertext); extern int crypto_decrypt(crypto_mechanism_t *mech, crypto_data_t *ciphertext, - crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *plaintext, - crypto_call_req_t *cr); + crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *plaintext); #ifdef __cplusplus } diff --git a/module/icp/algs/modes/gcm.c b/module/icp/algs/modes/gcm.c index 8b3793daa5..7d34c2b040 100644 --- a/module/icp/algs/modes/gcm.c +++ b/module/icp/algs/modes/gcm.c @@ -342,7 +342,7 @@ gcm_mode_decrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length, */ if (length > 0) { new_len = ctx->gcm_pt_buf_len + length; - new = vmem_alloc(new_len, ctx->gcm_kmflag); + new = vmem_alloc(new_len, KM_SLEEP); if (new == NULL) { vmem_free(ctx->gcm_pt_buf, ctx->gcm_pt_buf_len); ctx->gcm_pt_buf = NULL; @@ -654,7 +654,7 @@ gcm_init_ctx(gcm_ctx_t *gcm_ctx, char *param, size_t block_size, } gcm_ctx->gcm_htab_len = htab_len; gcm_ctx->gcm_Htable = - (uint64_t *)kmem_alloc(htab_len, gcm_ctx->gcm_kmflag); + (uint64_t *)kmem_alloc(htab_len, KM_SLEEP); if (gcm_ctx->gcm_Htable == NULL) { return (CRYPTO_HOST_MEMORY); @@ -729,7 +729,7 @@ gmac_init_ctx(gcm_ctx_t *gcm_ctx, char *param, size_t block_size, } gcm_ctx->gcm_htab_len = htab_len; gcm_ctx->gcm_Htable = - (uint64_t *)kmem_alloc(htab_len, gcm_ctx->gcm_kmflag); + (uint64_t *)kmem_alloc(htab_len, KM_SLEEP); if (gcm_ctx->gcm_Htable == NULL) { return (CRYPTO_HOST_MEMORY); @@ -780,12 +780,6 @@ gmac_alloc_ctx(int kmflag) return (gcm_ctx); } -void -gcm_set_kmflag(gcm_ctx_t *ctx, int kmflag) -{ - ctx->gcm_kmflag = kmflag; -} - /* GCM implementation that contains the fastest methods */ static gcm_impl_ops_t gcm_fastest_impl = { .name = "fastest" @@ -1212,7 +1206,7 @@ gcm_mode_encrypt_contiguous_blocks_avx(gcm_ctx_t *ctx, char *data, /* Allocate a buffer to encrypt to if there is enough input. */ if (bleft >= GCM_AVX_MIN_ENCRYPT_BYTES) { - ct_buf = vmem_alloc(chunk_size, ctx->gcm_kmflag); + ct_buf = vmem_alloc(chunk_size, KM_SLEEP); if (ct_buf == NULL) { return (CRYPTO_HOST_MEMORY); } diff --git a/module/icp/api/kcf_cipher.c b/module/icp/api/kcf_cipher.c index 963e94bfd3..e192a6e19f 100644 --- a/module/icp/api/kcf_cipher.c +++ b/module/icp/api/kcf_cipher.c @@ -52,7 +52,6 @@ * tmpl: a crypto_ctx_template_t, opaque template of a context of an * encryption with the 'mech' using 'key'. 'tmpl' is created by * a previous call to crypto_create_ctx_template(). - * cr: crypto_call_req_t calling conditions and call back info. * * Description: * Asynchronously submits a request for, or synchronously performs a @@ -62,16 +61,12 @@ * message. * Relies on the KCF scheduler to pick a provider. * - * Context: - * Process or interrupt, according to the semantics dictated by the 'cr'. - * * Returns: * See comment in the beginning of the file. */ int crypto_encrypt(crypto_mechanism_t *mech, crypto_data_t *plaintext, - crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *ciphertext, - crypto_call_req_t *crq) + crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *ciphertext) { int error; kcf_mech_entry_t *me; @@ -95,12 +90,12 @@ retry: crypto_mechanism_t lmech = *mech; KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech); error = KCF_PROV_ENCRYPT_ATOMIC(pd, pd->pd_sid, &lmech, key, - plaintext, ciphertext, spi_ctx_tmpl, KCF_SWFP_RHNDL(crq)); + plaintext, ciphertext, spi_ctx_tmpl); KCF_PROV_INCRSTATS(pd, error); if (error != CRYPTO_SUCCESS && IS_RECOVERABLE(error)) { /* Add pd to the linked list of providers tried. */ - if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL) + if (kcf_insert_triedlist(&list, pd, KM_SLEEP) != NULL) goto retry; } @@ -129,7 +124,6 @@ retry: * tmpl: a crypto_ctx_template_t, opaque template of a context of an * encryption with the 'mech' using 'key'. 'tmpl' is created by * a previous call to crypto_create_ctx_template(). - * cr: crypto_call_req_t calling conditions and call back info. * * Description: * Asynchronously submits a request for, or synchronously performs a @@ -139,16 +133,12 @@ retry: * message. * Relies on the KCF scheduler to choose a provider. * - * Context: - * Process or interrupt, according to the semantics dictated by the 'cr'. - * * Returns: * See comment in the beginning of the file. */ int crypto_decrypt(crypto_mechanism_t *mech, crypto_data_t *ciphertext, - crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *plaintext, - crypto_call_req_t *crq) + crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *plaintext) { int error; kcf_mech_entry_t *me; @@ -173,12 +163,12 @@ retry: KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech); error = KCF_PROV_DECRYPT_ATOMIC(pd, pd->pd_sid, &lmech, key, - ciphertext, plaintext, spi_ctx_tmpl, KCF_SWFP_RHNDL(crq)); + ciphertext, plaintext, spi_ctx_tmpl); KCF_PROV_INCRSTATS(pd, error); if (error != CRYPTO_SUCCESS && IS_RECOVERABLE(error)) { /* Add pd to the linked list of providers tried. */ - if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL) + if (kcf_insert_triedlist(&list, pd, KM_SLEEP) != NULL) goto retry; } diff --git a/module/icp/api/kcf_ctxops.c b/module/icp/api/kcf_ctxops.c index 85cc55c8a7..67bf76a8f1 100644 --- a/module/icp/api/kcf_ctxops.c +++ b/module/icp/api/kcf_ctxops.c @@ -48,7 +48,6 @@ * ptmpl: a storage for the opaque crypto_ctx_template_t, allocated and * initialized by the software provider this routine is * dispatched to. - * kmflag: KM_SLEEP/KM_NOSLEEP mem. alloc. flag. * * Description: * Redirects the call to the software provider of the specified @@ -69,7 +68,7 @@ */ int crypto_create_ctx_template(crypto_mechanism_t *mech, crypto_key_t *key, - crypto_ctx_template_t *ptmpl, int kmflag) + crypto_ctx_template_t *ptmpl) { int error; kcf_mech_entry_t *me; @@ -90,7 +89,7 @@ crypto_create_ctx_template(crypto_mechanism_t *mech, crypto_key_t *key, return (error); if ((ctx_tmpl = (kcf_ctx_template_t *)kmem_alloc( - sizeof (kcf_ctx_template_t), kmflag)) == NULL) { + sizeof (kcf_ctx_template_t), KM_SLEEP)) == NULL) { KCF_PROV_REFRELE(pd); return (CRYPTO_HOST_MEMORY); } @@ -101,7 +100,7 @@ crypto_create_ctx_template(crypto_mechanism_t *mech, crypto_key_t *key, prov_mech.cm_param_len = mech->cm_param_len; error = KCF_PROV_CREATE_CTX_TEMPLATE(pd, &prov_mech, key, - &(ctx_tmpl->ct_prov_tmpl), &(ctx_tmpl->ct_size), KCF_RHNDL(kmflag)); + &(ctx_tmpl->ct_prov_tmpl), &(ctx_tmpl->ct_size)); if (error == CRYPTO_SUCCESS) { *ptmpl = ctx_tmpl; diff --git a/module/icp/api/kcf_mac.c b/module/icp/api/kcf_mac.c index 0249fe87fc..6766c61cfa 100644 --- a/module/icp/api/kcf_mac.c +++ b/module/icp/api/kcf_mac.c @@ -67,7 +67,6 @@ * tmpl: a crypto_ctx_template_t, opaque template of a context of a * MAC with the 'mech' using 'key'. 'tmpl' is created by * a previous call to crypto_create_ctx_template(). - * cr: crypto_call_req_t calling conditions and call back info. * * Description: * Asynchronously submits a request for, or synchronously performs a @@ -78,16 +77,12 @@ * authentication code. * Relies on the KCF scheduler to choose a provider. * - * Context: - * Process or interrupt, according to the semantics dictated by the 'crq'. - * * Returns: * See comment in the beginning of the file. */ int crypto_mac(crypto_mechanism_t *mech, crypto_data_t *data, - crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *mac, - crypto_call_req_t *crq) + crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *mac) { int error; kcf_mech_entry_t *me; @@ -111,12 +106,12 @@ retry: crypto_mechanism_t lmech = *mech; KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech); error = KCF_PROV_MAC_ATOMIC(pd, pd->pd_sid, &lmech, key, data, - mac, spi_ctx_tmpl, KCF_SWFP_RHNDL(crq)); + mac, spi_ctx_tmpl); KCF_PROV_INCRSTATS(pd, error); if (error != CRYPTO_SUCCESS && IS_RECOVERABLE(error)) { /* Add pd to the linked list of providers tried. */ - if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL) + if (kcf_insert_triedlist(&list, pd, KM_SLEEP) != NULL) goto retry; } @@ -143,7 +138,6 @@ retry: * MAC with the 'mech' using 'key'. 'tmpl' is created by * a previous call to crypto_create_ctx_template(). * ctxp: Pointer to a crypto_context_t. - * cr: crypto_call_req_t calling conditions and call back info. * * Description: * Asynchronously submits a request for, or synchronously performs the @@ -156,16 +150,13 @@ retry: * The caller should hold a reference on the specified provider * descriptor before calling this function. * - * Context: - * Process or interrupt, according to the semantics dictated by the 'cr'. - * * Returns: * See comment in the beginning of the file. */ static int crypto_mac_init_prov(kcf_provider_desc_t *pd, crypto_mechanism_t *mech, crypto_key_t *key, crypto_spi_ctx_template_t tmpl, - crypto_context_t *ctxp, crypto_call_req_t *crq) + crypto_context_t *ctxp) { int rv; crypto_ctx_t *ctx; @@ -174,13 +165,12 @@ crypto_mac_init_prov(kcf_provider_desc_t *pd, ASSERT(KCF_PROV_REFHELD(pd)); /* Allocate and initialize the canonical context */ - if ((ctx = kcf_new_ctx(crq, real_provider)) == NULL) + if ((ctx = kcf_new_ctx(real_provider)) == NULL) return (CRYPTO_HOST_MEMORY); crypto_mechanism_t lmech = *mech; KCF_SET_PROVIDER_MECHNUM(mech->cm_type, real_provider, &lmech); - rv = KCF_PROV_MAC_INIT(real_provider, ctx, &lmech, key, tmpl, - KCF_SWFP_RHNDL(crq)); + rv = KCF_PROV_MAC_INIT(real_provider, ctx, &lmech, key, tmpl); KCF_PROV_INCRSTATS(pd, rv); if (rv == CRYPTO_SUCCESS) @@ -200,8 +190,7 @@ crypto_mac_init_prov(kcf_provider_desc_t *pd, */ int crypto_mac_init(crypto_mechanism_t *mech, crypto_key_t *key, - crypto_ctx_template_t tmpl, crypto_context_t *ctxp, - crypto_call_req_t *crq) + crypto_ctx_template_t tmpl, crypto_context_t *ctxp) { int error; kcf_mech_entry_t *me; @@ -230,10 +219,10 @@ retry: spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl; error = crypto_mac_init_prov(pd, mech, key, - spi_ctx_tmpl, ctxp, crq); + spi_ctx_tmpl, ctxp); if (error != CRYPTO_SUCCESS && IS_RECOVERABLE(error)) { /* Add pd to the linked list of providers tried. */ - if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL) + if (kcf_insert_triedlist(&list, pd, KM_SLEEP) != NULL) goto retry; } @@ -254,9 +243,6 @@ retry: * Description: * Synchronously performs a part of a MAC operation. * - * Context: - * Process or interrupt, according to the semantics dictated by the 'cr'. - * * Returns: * See comment in the beginning of the file. */ @@ -273,7 +259,7 @@ crypto_mac_update(crypto_context_t context, crypto_data_t *data) return (CRYPTO_INVALID_CONTEXT); } - int rv = KCF_PROV_MAC_UPDATE(pd, ctx, data, NULL); + int rv = KCF_PROV_MAC_UPDATE(pd, ctx, data); KCF_PROV_INCRSTATS(pd, rv); return (rv); } @@ -288,9 +274,6 @@ crypto_mac_update(crypto_context_t context, crypto_data_t *data) * Description: * Synchronously performs a part of a message authentication operation. * - * Context: - * Process or interrupt, according to the semantics dictated by the 'cr'. - * * Returns: * See comment in the beginning of the file. */ @@ -307,7 +290,7 @@ crypto_mac_final(crypto_context_t context, crypto_data_t *mac) return (CRYPTO_INVALID_CONTEXT); } - int rv = KCF_PROV_MAC_FINAL(pd, ctx, mac, NULL); + int rv = KCF_PROV_MAC_FINAL(pd, ctx, mac); KCF_PROV_INCRSTATS(pd, rv); /* Release the hold done in kcf_new_ctx() during init step. */ diff --git a/module/icp/core/kcf_sched.c b/module/icp/core/kcf_sched.c index d074bab852..4c689c20f3 100644 --- a/module/icp/core/kcf_sched.c +++ b/module/icp/core/kcf_sched.c @@ -37,19 +37,17 @@ /* kmem caches used by the scheduler */ static kmem_cache_t *kcf_context_cache; -ulong_t kcf_swprov_hndl = 0; /* * Create a new context. */ crypto_ctx_t * -kcf_new_ctx(crypto_call_req_t *crq, kcf_provider_desc_t *pd) +kcf_new_ctx(kcf_provider_desc_t *pd) { crypto_ctx_t *ctx; kcf_context_t *kcf_ctx; - kcf_ctx = kmem_cache_alloc(kcf_context_cache, - (crq == NULL) ? KM_SLEEP : KM_NOSLEEP); + kcf_ctx = kmem_cache_alloc(kcf_context_cache, KM_SLEEP); if (kcf_ctx == NULL) return (NULL); diff --git a/module/icp/include/modes/modes.h b/module/icp/include/modes/modes.h index ab71197542..aa88ea97e0 100644 --- a/module/icp/include/modes/modes.h +++ b/module/icp/include/modes/modes.h @@ -207,10 +207,6 @@ typedef struct ccm_ctx { * * gcm_len_a_len_c: 64-bit representations of the bit lengths of * AAD and ciphertext. - * - * gcm_kmflag: Current value of kmflag. Used for allocating - * the plaintext buffer during decryption and a - * gcm_avx_chunk_size'd buffer for avx enabled encryption. */ typedef struct gcm_ctx { struct common_ctx gcm_common; @@ -231,7 +227,6 @@ typedef struct gcm_ctx { uint64_t gcm_J0[2]; uint64_t gcm_len_a_len_c[2]; uint8_t *gcm_pt_buf; - int gcm_kmflag; #ifdef CAN_USE_GCM_ASM boolean_t gcm_use_avx; #endif @@ -402,7 +397,6 @@ extern void *ccm_alloc_ctx(int); extern void *gcm_alloc_ctx(int); extern void *gmac_alloc_ctx(int); extern void crypto_free_mode_ctx(void *); -extern void gcm_set_kmflag(gcm_ctx_t *, int); #ifdef __cplusplus } diff --git a/module/icp/include/sys/crypto/impl.h b/module/icp/include/sys/crypto/impl.h index 3e57233b6a..ba37c99e96 100644 --- a/module/icp/include/sys/crypto/impl.h +++ b/module/icp/include/sys/crypto/impl.h @@ -76,7 +76,7 @@ typedef struct kcf_sched_info { * other purposes, that base value is mostly same across all providers. * So, it is a good measure of the load on a provider when it is not * in a busy state. Once a provider notifies it is busy, requests - * backup in the taskq. So, we use tq_nalloc in that case which gives + * back up in the taskq. So, we use tq_nalloc in that case which gives * the number of task entries in the task queue. Note that we do not * acquire any locks here as it is not critical to get the exact number * and the lock contention may be too costly for this code path. @@ -387,76 +387,73 @@ typedef struct crypto_minor { * Wrappers for crypto_digest_ops(9S) entry points. */ -#define KCF_PROV_DIGEST_INIT(pd, ctx, mech, req) ( \ +#define KCF_PROV_DIGEST_INIT(pd, ctx, mech) ( \ (KCF_PROV_DIGEST_OPS(pd) && KCF_PROV_DIGEST_OPS(pd)->digest_init) ? \ - KCF_PROV_DIGEST_OPS(pd)->digest_init(ctx, mech, req) : \ + KCF_PROV_DIGEST_OPS(pd)->digest_init(ctx, mech) : \ CRYPTO_NOT_SUPPORTED) /* * Wrappers for crypto_cipher_ops(9S) entry points. */ -#define KCF_PROV_ENCRYPT_INIT(pd, ctx, mech, key, template, req) ( \ +#define KCF_PROV_ENCRYPT_INIT(pd, ctx, mech, key, template) ( \ (KCF_PROV_CIPHER_OPS(pd) && KCF_PROV_CIPHER_OPS(pd)->encrypt_init) ? \ - KCF_PROV_CIPHER_OPS(pd)->encrypt_init(ctx, mech, key, template, \ - req) : \ + KCF_PROV_CIPHER_OPS(pd)->encrypt_init(ctx, mech, key, template) : \ CRYPTO_NOT_SUPPORTED) #define KCF_PROV_ENCRYPT_ATOMIC(pd, session, mech, key, plaintext, ciphertext, \ - template, req) ( \ + template) ( \ (KCF_PROV_CIPHER_OPS(pd) && KCF_PROV_CIPHER_OPS(pd)->encrypt_atomic) ? \ KCF_PROV_CIPHER_OPS(pd)->encrypt_atomic( \ (pd)->pd_prov_handle, session, mech, key, plaintext, ciphertext, \ - template, req) : \ + template) : \ CRYPTO_NOT_SUPPORTED) #define KCF_PROV_DECRYPT_ATOMIC(pd, session, mech, key, ciphertext, plaintext, \ - template, req) ( \ + template) ( \ (KCF_PROV_CIPHER_OPS(pd) && KCF_PROV_CIPHER_OPS(pd)->decrypt_atomic) ? \ KCF_PROV_CIPHER_OPS(pd)->decrypt_atomic( \ (pd)->pd_prov_handle, session, mech, key, ciphertext, plaintext, \ - template, req) : \ + template) : \ CRYPTO_NOT_SUPPORTED) /* * Wrappers for crypto_mac_ops(9S) entry points. */ -#define KCF_PROV_MAC_INIT(pd, ctx, mech, key, template, req) ( \ +#define KCF_PROV_MAC_INIT(pd, ctx, mech, key, template) ( \ (KCF_PROV_MAC_OPS(pd) && KCF_PROV_MAC_OPS(pd)->mac_init) ? \ - KCF_PROV_MAC_OPS(pd)->mac_init(ctx, mech, key, template, req) \ + KCF_PROV_MAC_OPS(pd)->mac_init(ctx, mech, key, template) \ : CRYPTO_NOT_SUPPORTED) /* * The _ (underscore) in _mac is needed to avoid replacing the * function mac(). */ -#define KCF_PROV_MAC_UPDATE(pd, ctx, data, req) ( \ +#define KCF_PROV_MAC_UPDATE(pd, ctx, data) ( \ (KCF_PROV_MAC_OPS(pd) && KCF_PROV_MAC_OPS(pd)->mac_update) ? \ - KCF_PROV_MAC_OPS(pd)->mac_update(ctx, data, req) : \ + KCF_PROV_MAC_OPS(pd)->mac_update(ctx, data) : \ CRYPTO_NOT_SUPPORTED) -#define KCF_PROV_MAC_FINAL(pd, ctx, mac, req) ( \ +#define KCF_PROV_MAC_FINAL(pd, ctx, mac) ( \ (KCF_PROV_MAC_OPS(pd) && KCF_PROV_MAC_OPS(pd)->mac_final) ? \ - KCF_PROV_MAC_OPS(pd)->mac_final(ctx, mac, req) : \ + KCF_PROV_MAC_OPS(pd)->mac_final(ctx, mac) : \ CRYPTO_NOT_SUPPORTED) -#define KCF_PROV_MAC_ATOMIC(pd, session, mech, key, data, mac, template, \ - req) ( \ +#define KCF_PROV_MAC_ATOMIC(pd, session, mech, key, data, mac, template) ( \ (KCF_PROV_MAC_OPS(pd) && KCF_PROV_MAC_OPS(pd)->mac_atomic) ? \ KCF_PROV_MAC_OPS(pd)->mac_atomic( \ - (pd)->pd_prov_handle, session, mech, key, data, mac, template, \ - req) : \ + (pd)->pd_prov_handle, session, mech, key, data, mac, template) : \ CRYPTO_NOT_SUPPORTED) /* * Wrappers for crypto_ctx_ops(9S) entry points. */ -#define KCF_PROV_CREATE_CTX_TEMPLATE(pd, mech, key, template, size, req) ( \ +#define KCF_PROV_CREATE_CTX_TEMPLATE(pd, mech, key, template, size) ( \ (KCF_PROV_CTX_OPS(pd) && KCF_PROV_CTX_OPS(pd)->create_ctx_template) ? \ KCF_PROV_CTX_OPS(pd)->create_ctx_template( \ - (pd)->pd_prov_handle, mech, key, template, size, req) : \ + (pd)->pd_prov_handle, mech, key, template, size) : \ CRYPTO_NOT_SUPPORTED) #define KCF_PROV_FREE_CONTEXT(pd, ctx) ( \ diff --git a/module/icp/include/sys/crypto/sched_impl.h b/module/icp/include/sys/crypto/sched_impl.h index 6174c3aa1d..1de4bd8b94 100644 --- a/module/icp/include/sys/crypto/sched_impl.h +++ b/module/icp/include/sys/crypto/sched_impl.h @@ -40,28 +40,6 @@ extern "C" { #include #include -#define KCF_KMFLAG(crq) (((crq) == NULL) ? KM_SLEEP : KM_NOSLEEP) - -/* - * The framework keeps an internal handle to use in the adaptive - * asynchronous case. This is the case when a client has the - * CRYPTO_ALWAYS_QUEUE bit clear and a provider is used for - * the request. The request is completed in the context of the calling - * thread and kernel memory must be allocated with KM_NOSLEEP. - * - * The framework passes a pointer to the handle in crypto_req_handle_t - * argument when it calls the SPI of the provider. The macros - * KCF_RHNDL() and KCF_SWFP_RHNDL() are used to do this. - * - * When a provider asks the framework for kmflag value via - * crypto_kmflag(9S) we use REQHNDL2_KMFLAG() macro. - */ -extern ulong_t kcf_swprov_hndl; -#define KCF_RHNDL(kmflag) (((kmflag) == KM_SLEEP) ? NULL : &kcf_swprov_hndl) -#define KCF_SWFP_RHNDL(crq) (((crq) == NULL) ? NULL : &kcf_swprov_hndl) -#define REQHNDL2_KMFLAG(rhndl) \ - ((rhndl == &kcf_swprov_hndl) ? KM_NOSLEEP : KM_SLEEP) - typedef struct kcf_prov_tried { kcf_provider_desc_t *pt_pd; struct kcf_prov_tried *pt_next; @@ -144,7 +122,7 @@ extern kcf_prov_tried_t *kcf_insert_triedlist(kcf_prov_tried_t **, kcf_provider_desc_t *, int); extern kcf_provider_desc_t *kcf_get_mech_provider(crypto_mech_type_t, kcf_mech_entry_t **, int *, kcf_prov_tried_t *, crypto_func_group_t); -extern crypto_ctx_t *kcf_new_ctx(crypto_call_req_t *, kcf_provider_desc_t *); +extern crypto_ctx_t *kcf_new_ctx(kcf_provider_desc_t *); extern void kcf_sched_destroy(void); extern void kcf_sched_init(void); extern void kcf_free_context(kcf_context_t *); diff --git a/module/icp/include/sys/crypto/spi.h b/module/icp/include/sys/crypto/spi.h index ba383a750e..6d656fef20 100644 --- a/module/icp/include/sys/crypto/spi.h +++ b/module/icp/include/sys/crypto/spi.h @@ -59,12 +59,6 @@ typedef void *crypto_provider_handle_t; */ typedef void *crypto_spi_ctx_template_t; -/* - * Request handles are used by the kernel to identify an asynchronous - * request being processed by a provider. - */ -typedef void *crypto_req_handle_t; - /* * The context structure is passed from the kernel to a provider. * It contains the information needed to process a multi-part or @@ -88,18 +82,14 @@ typedef struct crypto_ctx { * kernel using crypto_register_provider(9F). */ typedef struct crypto_digest_ops { - int (*digest_init)(crypto_ctx_t *, crypto_mechanism_t *, - crypto_req_handle_t); - int (*digest)(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, - crypto_req_handle_t); - int (*digest_update)(crypto_ctx_t *, crypto_data_t *, - crypto_req_handle_t); - int (*digest_key)(crypto_ctx_t *, crypto_key_t *, crypto_req_handle_t); - int (*digest_final)(crypto_ctx_t *, crypto_data_t *, - crypto_req_handle_t); + int (*digest_init)(crypto_ctx_t *, crypto_mechanism_t *); + int (*digest)(crypto_ctx_t *, crypto_data_t *, crypto_data_t *); + int (*digest_update)(crypto_ctx_t *, crypto_data_t *); + int (*digest_key)(crypto_ctx_t *, crypto_key_t *); + int (*digest_final)(crypto_ctx_t *, crypto_data_t *); int (*digest_atomic)(crypto_provider_handle_t, crypto_session_id_t, crypto_mechanism_t *, crypto_data_t *, - crypto_data_t *, crypto_req_handle_t); + crypto_data_t *); } __no_const crypto_digest_ops_t; /* @@ -111,29 +101,29 @@ typedef struct crypto_digest_ops { typedef struct crypto_cipher_ops { int (*encrypt_init)(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *, - crypto_spi_ctx_template_t, crypto_req_handle_t); + crypto_spi_ctx_template_t); int (*encrypt)(crypto_ctx_t *, - crypto_data_t *, crypto_data_t *, crypto_req_handle_t); + crypto_data_t *, crypto_data_t *); int (*encrypt_update)(crypto_ctx_t *, - crypto_data_t *, crypto_data_t *, crypto_req_handle_t); + crypto_data_t *, crypto_data_t *); int (*encrypt_final)(crypto_ctx_t *, - crypto_data_t *, crypto_req_handle_t); + crypto_data_t *); int (*encrypt_atomic)(crypto_provider_handle_t, crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, - crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); + crypto_data_t *, crypto_spi_ctx_template_t); int (*decrypt_init)(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *, - crypto_spi_ctx_template_t, crypto_req_handle_t); + crypto_spi_ctx_template_t); int (*decrypt)(crypto_ctx_t *, - crypto_data_t *, crypto_data_t *, crypto_req_handle_t); + crypto_data_t *, crypto_data_t *); int (*decrypt_update)(crypto_ctx_t *, - crypto_data_t *, crypto_data_t *, crypto_req_handle_t); + crypto_data_t *, crypto_data_t *); int (*decrypt_final)(crypto_ctx_t *, - crypto_data_t *, crypto_req_handle_t); + crypto_data_t *); int (*decrypt_atomic)(crypto_provider_handle_t, crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, - crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); + crypto_data_t *, crypto_spi_ctx_template_t); } __no_const crypto_cipher_ops_t; /* @@ -145,21 +135,19 @@ typedef struct crypto_cipher_ops { typedef struct crypto_mac_ops { int (*mac_init)(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *, - crypto_spi_ctx_template_t, crypto_req_handle_t); + crypto_spi_ctx_template_t); int (*mac)(crypto_ctx_t *, - crypto_data_t *, crypto_data_t *, crypto_req_handle_t); + crypto_data_t *, crypto_data_t *); int (*mac_update)(crypto_ctx_t *, - crypto_data_t *, crypto_req_handle_t); + crypto_data_t *); int (*mac_final)(crypto_ctx_t *, - crypto_data_t *, crypto_req_handle_t); + crypto_data_t *); int (*mac_atomic)(crypto_provider_handle_t, crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, - crypto_data_t *, crypto_spi_ctx_template_t, - crypto_req_handle_t); + crypto_data_t *, crypto_spi_ctx_template_t); int (*mac_verify_atomic)(crypto_provider_handle_t, crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, - crypto_data_t *, crypto_spi_ctx_template_t, - crypto_req_handle_t); + crypto_data_t *, crypto_spi_ctx_template_t); } __no_const crypto_mac_ops_t; /* @@ -171,7 +159,7 @@ typedef struct crypto_mac_ops { typedef struct crypto_ctx_ops { int (*create_ctx_template)(crypto_provider_handle_t, crypto_mechanism_t *, crypto_key_t *, - crypto_spi_ctx_template_t *, size_t *, crypto_req_handle_t); + crypto_spi_ctx_template_t *, size_t *); int (*free_context)(crypto_ctx_t *); } __no_const crypto_ctx_ops_t; @@ -263,7 +251,6 @@ typedef struct crypto_provider_info { extern int crypto_register_provider(const crypto_provider_info_t *, crypto_kcf_provider_handle_t *); extern int crypto_unregister_provider(crypto_kcf_provider_handle_t); -extern int crypto_kmflag(crypto_req_handle_t); #ifdef __cplusplus diff --git a/module/icp/io/aes.c b/module/icp/io/aes.c index ad8a15a722..034cf4d07e 100644 --- a/module/icp/io/aes.c +++ b/module/icp/io/aes.c @@ -74,33 +74,29 @@ static const crypto_mech_info_t aes_mech_info_tab[] = { }; static int aes_encrypt_init(crypto_ctx_t *, crypto_mechanism_t *, - crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); + crypto_key_t *, crypto_spi_ctx_template_t); static int aes_decrypt_init(crypto_ctx_t *, crypto_mechanism_t *, - crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); + crypto_key_t *, crypto_spi_ctx_template_t); static int aes_common_init(crypto_ctx_t *, crypto_mechanism_t *, - crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t, boolean_t); + crypto_key_t *, crypto_spi_ctx_template_t, boolean_t); static int aes_common_init_ctx(aes_ctx_t *, crypto_spi_ctx_template_t *, crypto_mechanism_t *, crypto_key_t *, int, boolean_t); -static int aes_encrypt_final(crypto_ctx_t *, crypto_data_t *, - crypto_req_handle_t); -static int aes_decrypt_final(crypto_ctx_t *, crypto_data_t *, - crypto_req_handle_t); +static int aes_encrypt_final(crypto_ctx_t *, crypto_data_t *); +static int aes_decrypt_final(crypto_ctx_t *, crypto_data_t *); -static int aes_encrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, - crypto_req_handle_t); +static int aes_encrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *); static int aes_encrypt_update(crypto_ctx_t *, crypto_data_t *, - crypto_data_t *, crypto_req_handle_t); + crypto_data_t *); static int aes_encrypt_atomic(crypto_provider_handle_t, crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, - crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); + crypto_data_t *, crypto_spi_ctx_template_t); -static int aes_decrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, - crypto_req_handle_t); +static int aes_decrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *); static int aes_decrypt_update(crypto_ctx_t *, crypto_data_t *, - crypto_data_t *, crypto_req_handle_t); + crypto_data_t *); static int aes_decrypt_atomic(crypto_provider_handle_t, crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, - crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); + crypto_data_t *, crypto_spi_ctx_template_t); static const crypto_cipher_ops_t aes_cipher_ops = { .encrypt_init = aes_encrypt_init, @@ -117,10 +113,10 @@ static const crypto_cipher_ops_t aes_cipher_ops = { static int aes_mac_atomic(crypto_provider_handle_t, crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *, - crypto_spi_ctx_template_t, crypto_req_handle_t); + crypto_spi_ctx_template_t); static int aes_mac_verify_atomic(crypto_provider_handle_t, crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *, - crypto_spi_ctx_template_t, crypto_req_handle_t); + crypto_spi_ctx_template_t); static const crypto_mac_ops_t aes_mac_ops = { .mac_init = NULL, @@ -133,7 +129,7 @@ static const crypto_mac_ops_t aes_mac_ops = { static int aes_create_ctx_template(crypto_provider_handle_t, crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *, - size_t *, crypto_req_handle_t); + size_t *); static int aes_free_context(crypto_ctx_t *); static const crypto_ctx_ops_t aes_ctx_ops = { @@ -188,7 +184,7 @@ aes_mod_fini(void) } static int -aes_check_mech_param(crypto_mechanism_t *mechanism, aes_ctx_t **ctx, int kmflag) +aes_check_mech_param(crypto_mechanism_t *mechanism, aes_ctx_t **ctx) { void *p = NULL; boolean_t param_required = B_TRUE; @@ -230,7 +226,7 @@ aes_check_mech_param(crypto_mechanism_t *mechanism, aes_ctx_t **ctx, int kmflag) rv = CRYPTO_MECHANISM_PARAM_INVALID; } if (ctx != NULL) { - p = (alloc_fun)(kmflag); + p = (alloc_fun)(KM_SLEEP); *ctx = p; } return (rv); @@ -257,18 +253,16 @@ init_keysched(crypto_key_t *key, void *newbie) static int aes_encrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, - crypto_key_t *key, crypto_spi_ctx_template_t template, - crypto_req_handle_t req) + crypto_key_t *key, crypto_spi_ctx_template_t template) { - return (aes_common_init(ctx, mechanism, key, template, req, B_TRUE)); + return (aes_common_init(ctx, mechanism, key, template, B_TRUE)); } static int aes_decrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, - crypto_key_t *key, crypto_spi_ctx_template_t template, - crypto_req_handle_t req) + crypto_key_t *key, crypto_spi_ctx_template_t template) { - return (aes_common_init(ctx, mechanism, key, template, req, B_FALSE)); + return (aes_common_init(ctx, mechanism, key, template, B_FALSE)); } @@ -279,18 +273,16 @@ aes_decrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, static int aes_common_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_spi_ctx_template_t template, - crypto_req_handle_t req, boolean_t is_encrypt_init) + boolean_t is_encrypt_init) { aes_ctx_t *aes_ctx; int rv; - int kmflag; - kmflag = crypto_kmflag(req); - if ((rv = aes_check_mech_param(mechanism, &aes_ctx, kmflag)) + if ((rv = aes_check_mech_param(mechanism, &aes_ctx)) != CRYPTO_SUCCESS) return (rv); - rv = aes_common_init_ctx(aes_ctx, template, mechanism, key, kmflag, + rv = aes_common_init_ctx(aes_ctx, template, mechanism, key, KM_SLEEP, is_encrypt_init); if (rv != CRYPTO_SUCCESS) { crypto_free_mode_ctx(aes_ctx); @@ -320,7 +312,7 @@ aes_copy_block64(uint8_t *in, uint64_t *out) static int aes_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext, - crypto_data_t *ciphertext, crypto_req_handle_t req) + crypto_data_t *ciphertext) { int ret = CRYPTO_FAILED; @@ -372,7 +364,7 @@ aes_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext, /* * Do an update on the specified input data. */ - ret = aes_encrypt_update(ctx, plaintext, ciphertext, req); + ret = aes_encrypt_update(ctx, plaintext, ciphertext); if (ret != CRYPTO_SUCCESS) { return (ret); } @@ -435,7 +427,7 @@ aes_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext, static int aes_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext, - crypto_data_t *plaintext, crypto_req_handle_t req) + crypto_data_t *plaintext) { int ret = CRYPTO_FAILED; @@ -493,7 +485,7 @@ aes_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext, /* * Do an update on the specified input data. */ - ret = aes_decrypt_update(ctx, ciphertext, plaintext, req); + ret = aes_decrypt_update(ctx, ciphertext, plaintext); if (ret != CRYPTO_SUCCESS) { goto cleanup; } @@ -549,9 +541,8 @@ cleanup: static int aes_encrypt_update(crypto_ctx_t *ctx, crypto_data_t *plaintext, - crypto_data_t *ciphertext, crypto_req_handle_t req) + crypto_data_t *ciphertext) { - (void) req; off_t saved_offset; size_t saved_length, out_len; int ret = CRYPTO_SUCCESS; @@ -618,7 +609,7 @@ aes_encrypt_update(crypto_ctx_t *ctx, crypto_data_t *plaintext, static int aes_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext, - crypto_data_t *plaintext, crypto_req_handle_t req) + crypto_data_t *plaintext) { off_t saved_offset; size_t saved_length, out_len; @@ -650,9 +641,6 @@ aes_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext, saved_offset = plaintext->cd_offset; saved_length = plaintext->cd_length; - if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) - gcm_set_kmflag((gcm_ctx_t *)aes_ctx, crypto_kmflag(req)); - /* * Do the AES update on the specified input data. */ @@ -696,10 +684,8 @@ aes_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext, } static int -aes_encrypt_final(crypto_ctx_t *ctx, crypto_data_t *data, - crypto_req_handle_t req) +aes_encrypt_final(crypto_ctx_t *ctx, crypto_data_t *data) { - (void) req; aes_ctx_t *aes_ctx; int ret; @@ -753,10 +739,8 @@ aes_encrypt_final(crypto_ctx_t *ctx, crypto_data_t *data, } static int -aes_decrypt_final(crypto_ctx_t *ctx, crypto_data_t *data, - crypto_req_handle_t req) +aes_decrypt_final(crypto_ctx_t *ctx, crypto_data_t *data) { - (void) req; aes_ctx_t *aes_ctx; int ret; off_t saved_offset; @@ -859,7 +843,7 @@ static int aes_encrypt_atomic(crypto_provider_handle_t provider, crypto_session_id_t session_id, crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_data_t *plaintext, crypto_data_t *ciphertext, - crypto_spi_ctx_template_t template, crypto_req_handle_t req) + crypto_spi_ctx_template_t template) { (void) provider, (void) session_id; aes_ctx_t aes_ctx; /* on the stack */ @@ -885,13 +869,13 @@ aes_encrypt_atomic(crypto_provider_handle_t provider, return (CRYPTO_DATA_LEN_RANGE); } - if ((ret = aes_check_mech_param(mechanism, NULL, 0)) != CRYPTO_SUCCESS) + if ((ret = aes_check_mech_param(mechanism, NULL)) != CRYPTO_SUCCESS) return (ret); bzero(&aes_ctx, sizeof (aes_ctx_t)); ret = aes_common_init_ctx(&aes_ctx, template, mechanism, key, - crypto_kmflag(req), B_TRUE); + KM_SLEEP, B_TRUE); if (ret != CRYPTO_SUCCESS) return (ret); @@ -995,7 +979,7 @@ static int aes_decrypt_atomic(crypto_provider_handle_t provider, crypto_session_id_t session_id, crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_data_t *ciphertext, crypto_data_t *plaintext, - crypto_spi_ctx_template_t template, crypto_req_handle_t req) + crypto_spi_ctx_template_t template) { (void) provider, (void) session_id; aes_ctx_t aes_ctx; /* on the stack */ @@ -1021,13 +1005,13 @@ aes_decrypt_atomic(crypto_provider_handle_t provider, return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE); } - if ((ret = aes_check_mech_param(mechanism, NULL, 0)) != CRYPTO_SUCCESS) + if ((ret = aes_check_mech_param(mechanism, NULL)) != CRYPTO_SUCCESS) return (ret); bzero(&aes_ctx, sizeof (aes_ctx_t)); ret = aes_common_init_ctx(&aes_ctx, template, mechanism, key, - crypto_kmflag(req), B_FALSE); + KM_SLEEP, B_FALSE); if (ret != CRYPTO_SUCCESS) return (ret); @@ -1057,10 +1041,6 @@ aes_decrypt_atomic(crypto_provider_handle_t provider, saved_offset = plaintext->cd_offset; saved_length = plaintext->cd_length; - if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE || - mechanism->cm_type == AES_GMAC_MECH_INFO_TYPE) - gcm_set_kmflag((gcm_ctx_t *)&aes_ctx, crypto_kmflag(req)); - /* * Do an update on the specified input data. */ @@ -1164,7 +1144,7 @@ out: static int aes_create_ctx_template(crypto_provider_handle_t provider, crypto_mechanism_t *mechanism, crypto_key_t *key, - crypto_spi_ctx_template_t *tmpl, size_t *tmpl_size, crypto_req_handle_t req) + crypto_spi_ctx_template_t *tmpl, size_t *tmpl_size) { (void) provider; void *keysched; @@ -1179,8 +1159,7 @@ aes_create_ctx_template(crypto_provider_handle_t provider, mechanism->cm_type != AES_GMAC_MECH_INFO_TYPE) return (CRYPTO_MECHANISM_INVALID); - if ((keysched = aes_alloc_keysched(&size, - crypto_kmflag(req))) == NULL) { + if ((keysched = aes_alloc_keysched(&size, KM_SLEEP)) == NULL) { return (CRYPTO_HOST_MEMORY); } @@ -1342,7 +1321,7 @@ static int aes_mac_atomic(crypto_provider_handle_t provider, crypto_session_id_t session_id, crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac, - crypto_spi_ctx_template_t template, crypto_req_handle_t req) + crypto_spi_ctx_template_t template) { CK_AES_GCM_PARAMS gcm_params; crypto_mechanism_t gcm_mech; @@ -1357,14 +1336,14 @@ aes_mac_atomic(crypto_provider_handle_t provider, gcm_mech.cm_param = (char *)&gcm_params; return (aes_encrypt_atomic(provider, session_id, &gcm_mech, - key, &null_crypto_data, mac, template, req)); + key, &null_crypto_data, mac, template)); } static int aes_mac_verify_atomic(crypto_provider_handle_t provider, crypto_session_id_t session_id, crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac, - crypto_spi_ctx_template_t template, crypto_req_handle_t req) + crypto_spi_ctx_template_t template) { CK_AES_GCM_PARAMS gcm_params; crypto_mechanism_t gcm_mech; @@ -1379,5 +1358,5 @@ aes_mac_verify_atomic(crypto_provider_handle_t provider, gcm_mech.cm_param = (char *)&gcm_params; return (aes_decrypt_atomic(provider, session_id, &gcm_mech, - key, mac, &null_crypto_data, template, req)); + key, mac, &null_crypto_data, template)); } diff --git a/module/icp/io/sha2_mod.c b/module/icp/io/sha2_mod.c index d5a8d5bb72..db6cc539c6 100644 --- a/module/icp/io/sha2_mod.c +++ b/module/icp/io/sha2_mod.c @@ -105,17 +105,12 @@ static const crypto_mech_info_t sha2_mech_info_tab[] = { CRYPTO_KEYSIZE_UNIT_IN_BYTES} }; -static int sha2_digest_init(crypto_ctx_t *, crypto_mechanism_t *, - crypto_req_handle_t); -static int sha2_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, - crypto_req_handle_t); -static int sha2_digest_update(crypto_ctx_t *, crypto_data_t *, - crypto_req_handle_t); -static int sha2_digest_final(crypto_ctx_t *, crypto_data_t *, - crypto_req_handle_t); +static int sha2_digest_init(crypto_ctx_t *, crypto_mechanism_t *); +static int sha2_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *); +static int sha2_digest_update(crypto_ctx_t *, crypto_data_t *); +static int sha2_digest_final(crypto_ctx_t *, crypto_data_t *); static int sha2_digest_atomic(crypto_provider_handle_t, crypto_session_id_t, - crypto_mechanism_t *, crypto_data_t *, crypto_data_t *, - crypto_req_handle_t); + crypto_mechanism_t *, crypto_data_t *, crypto_data_t *); static const crypto_digest_ops_t sha2_digest_ops = { .digest_init = sha2_digest_init, @@ -126,16 +121,15 @@ static const crypto_digest_ops_t sha2_digest_ops = { }; static int sha2_mac_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *, - crypto_spi_ctx_template_t, crypto_req_handle_t); -static int sha2_mac_update(crypto_ctx_t *, crypto_data_t *, - crypto_req_handle_t); -static int sha2_mac_final(crypto_ctx_t *, crypto_data_t *, crypto_req_handle_t); + crypto_spi_ctx_template_t); +static int sha2_mac_update(crypto_ctx_t *, crypto_data_t *); +static int sha2_mac_final(crypto_ctx_t *, crypto_data_t *); static int sha2_mac_atomic(crypto_provider_handle_t, crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *, - crypto_spi_ctx_template_t, crypto_req_handle_t); + crypto_spi_ctx_template_t); static int sha2_mac_verify_atomic(crypto_provider_handle_t, crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *, - crypto_spi_ctx_template_t, crypto_req_handle_t); + crypto_spi_ctx_template_t); static const crypto_mac_ops_t sha2_mac_ops = { .mac_init = sha2_mac_init, @@ -148,7 +142,7 @@ static const crypto_mac_ops_t sha2_mac_ops = { static int sha2_create_ctx_template(crypto_provider_handle_t, crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *, - size_t *, crypto_req_handle_t); + size_t *); static int sha2_free_context(crypto_ctx_t *); static const crypto_ctx_ops_t sha2_ctx_ops = { @@ -215,15 +209,13 @@ sha2_mod_fini(void) */ static int -sha2_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, - crypto_req_handle_t req) +sha2_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism) { /* * Allocate and initialize SHA2 context. */ - ctx->cc_provider_private = kmem_alloc(sizeof (sha2_ctx_t), - crypto_kmflag(req)); + ctx->cc_provider_private = kmem_alloc(sizeof (sha2_ctx_t), KM_SLEEP); if (ctx->cc_provider_private == NULL) return (CRYPTO_HOST_MEMORY); @@ -388,10 +380,8 @@ sha2_digest_final_uio(SHA2_CTX *sha2_ctx, crypto_data_t *digest, } static int -sha2_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest, - crypto_req_handle_t req) +sha2_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest) { - (void) req; int ret = CRYPTO_SUCCESS; uint_t sha_digest_len; @@ -476,10 +466,8 @@ sha2_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest, } static int -sha2_digest_update(crypto_ctx_t *ctx, crypto_data_t *data, - crypto_req_handle_t req) +sha2_digest_update(crypto_ctx_t *ctx, crypto_data_t *data) { - (void) req; int ret = CRYPTO_SUCCESS; ASSERT(ctx->cc_provider_private != NULL); @@ -505,10 +493,8 @@ sha2_digest_update(crypto_ctx_t *ctx, crypto_data_t *data, } static int -sha2_digest_final(crypto_ctx_t *ctx, crypto_data_t *digest, - crypto_req_handle_t req) +sha2_digest_final(crypto_ctx_t *ctx, crypto_data_t *digest) { - (void) req; int ret = CRYPTO_SUCCESS; uint_t sha_digest_len; @@ -570,10 +556,9 @@ sha2_digest_final(crypto_ctx_t *ctx, crypto_data_t *digest, static int sha2_digest_atomic(crypto_provider_handle_t provider, crypto_session_id_t session_id, crypto_mechanism_t *mechanism, - crypto_data_t *data, crypto_data_t *digest, - crypto_req_handle_t req) + crypto_data_t *data, crypto_data_t *digest) { - (void) provider, (void) session_id, (void) req; + (void) provider, (void) session_id; int ret = CRYPTO_SUCCESS; SHA2_CTX sha2_ctx; uint32_t sha_digest_len; @@ -709,8 +694,7 @@ sha2_mac_init_ctx(sha2_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes) */ static int sha2_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, - crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, - crypto_req_handle_t req) + crypto_key_t *key, crypto_spi_ctx_template_t ctx_template) { int ret = CRYPTO_SUCCESS; uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length); @@ -737,8 +721,8 @@ sha2_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, return (CRYPTO_MECHANISM_INVALID); } - ctx->cc_provider_private = kmem_alloc(sizeof (sha2_hmac_ctx_t), - crypto_kmflag(req)); + ctx->cc_provider_private = + kmem_alloc(sizeof (sha2_hmac_ctx_t), KM_SLEEP); if (ctx->cc_provider_private == NULL) return (CRYPTO_HOST_MEMORY); @@ -792,10 +776,8 @@ sha2_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, } static int -sha2_mac_update(crypto_ctx_t *ctx, crypto_data_t *data, - crypto_req_handle_t req) +sha2_mac_update(crypto_ctx_t *ctx, crypto_data_t *data) { - (void) req; int ret = CRYPTO_SUCCESS; ASSERT(ctx->cc_provider_private != NULL); @@ -822,9 +804,8 @@ sha2_mac_update(crypto_ctx_t *ctx, crypto_data_t *data, } static int -sha2_mac_final(crypto_ctx_t *ctx, crypto_data_t *mac, crypto_req_handle_t req) +sha2_mac_final(crypto_ctx_t *ctx, crypto_data_t *mac) { - (void) req; int ret = CRYPTO_SUCCESS; uchar_t digest[SHA512_DIGEST_LENGTH]; uint32_t digest_len, sha_digest_len; @@ -938,9 +919,9 @@ static int sha2_mac_atomic(crypto_provider_handle_t provider, crypto_session_id_t session_id, crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac, - crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) + crypto_spi_ctx_template_t ctx_template) { - (void) provider, (void) session_id, (void) req; + (void) provider, (void) session_id; int ret = CRYPTO_SUCCESS; uchar_t digest[SHA512_DIGEST_LENGTH]; sha2_hmac_ctx_t sha2_hmac_ctx; @@ -1072,9 +1053,9 @@ static int sha2_mac_verify_atomic(crypto_provider_handle_t provider, crypto_session_id_t session_id, crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac, - crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) + crypto_spi_ctx_template_t ctx_template) { - (void) provider, (void) session_id, (void) req; + (void) provider, (void) session_id; int ret = CRYPTO_SUCCESS; uchar_t digest[SHA512_DIGEST_LENGTH]; sha2_hmac_ctx_t sha2_hmac_ctx; @@ -1247,8 +1228,7 @@ bail: static int sha2_create_ctx_template(crypto_provider_handle_t provider, crypto_mechanism_t *mechanism, crypto_key_t *key, - crypto_spi_ctx_template_t *ctx_template, size_t *ctx_template_size, - crypto_req_handle_t req) + crypto_spi_ctx_template_t *ctx_template, size_t *ctx_template_size) { (void) provider; sha2_hmac_ctx_t *sha2_hmac_ctx_tmpl; @@ -1279,8 +1259,7 @@ sha2_create_ctx_template(crypto_provider_handle_t provider, /* * Allocate and initialize SHA2 context. */ - sha2_hmac_ctx_tmpl = kmem_alloc(sizeof (sha2_hmac_ctx_t), - crypto_kmflag(req)); + sha2_hmac_ctx_tmpl = kmem_alloc(sizeof (sha2_hmac_ctx_t), KM_SLEEP); if (sha2_hmac_ctx_tmpl == NULL) return (CRYPTO_HOST_MEMORY); diff --git a/module/icp/io/skein_mod.c b/module/icp/io/skein_mod.c index 48e4358b8d..43d9c9db11 100644 --- a/module/icp/io/skein_mod.c +++ b/module/icp/io/skein_mod.c @@ -51,15 +51,12 @@ static const crypto_mech_info_t skein_mech_info_tab[] = { CRYPTO_KEYSIZE_UNIT_IN_BYTES} }; -static int skein_digest_init(crypto_ctx_t *, crypto_mechanism_t *, - crypto_req_handle_t); -static int skein_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, - crypto_req_handle_t); -static int skein_update(crypto_ctx_t *, crypto_data_t *, crypto_req_handle_t); -static int skein_final(crypto_ctx_t *, crypto_data_t *, crypto_req_handle_t); +static int skein_digest_init(crypto_ctx_t *, crypto_mechanism_t *); +static int skein_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *); +static int skein_update(crypto_ctx_t *, crypto_data_t *); +static int skein_final(crypto_ctx_t *, crypto_data_t *); static int skein_digest_atomic(crypto_provider_handle_t, crypto_session_id_t, - crypto_mechanism_t *, crypto_data_t *, crypto_data_t *, - crypto_req_handle_t); + crypto_mechanism_t *, crypto_data_t *, crypto_data_t *); static const crypto_digest_ops_t skein_digest_ops = { .digest_init = skein_digest_init, @@ -70,10 +67,10 @@ static const crypto_digest_ops_t skein_digest_ops = { }; static int skein_mac_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *, - crypto_spi_ctx_template_t, crypto_req_handle_t); + crypto_spi_ctx_template_t); static int skein_mac_atomic(crypto_provider_handle_t, crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *, - crypto_spi_ctx_template_t, crypto_req_handle_t); + crypto_spi_ctx_template_t); static const crypto_mac_ops_t skein_mac_ops = { .mac_init = skein_mac_init, @@ -86,7 +83,7 @@ static const crypto_mac_ops_t skein_mac_ops = { static int skein_create_ctx_template(crypto_provider_handle_t, crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *, - size_t *, crypto_req_handle_t); + size_t *); static int skein_free_context(crypto_ctx_t *); static const crypto_ctx_ops_t skein_ctx_ops = { @@ -264,8 +261,7 @@ skein_digest_update_uio(skein_ctx_t *ctx, const crypto_data_t *data) * Performs a Final on a context and writes to a uio digest output. */ static int -skein_digest_final_uio(skein_ctx_t *ctx, crypto_data_t *digest, - crypto_req_handle_t req) +skein_digest_final_uio(skein_ctx_t *ctx, crypto_data_t *digest) { off_t offset = digest->cd_offset; uint_t vec_idx = 0; @@ -298,7 +294,7 @@ skein_digest_final_uio(skein_ctx_t *ctx, crypto_data_t *digest, size_t cur_len; digest_tmp = kmem_alloc(CRYPTO_BITS2BYTES( - ctx->sc_digest_bitlen), crypto_kmflag(req)); + ctx->sc_digest_bitlen), KM_SLEEP); if (digest_tmp == NULL) return (CRYPTO_HOST_MEMORY); SKEIN_OP(ctx, Final, digest_tmp); @@ -342,16 +338,14 @@ skein_digest_final_uio(skein_ctx_t *ctx, crypto_data_t *digest, * for Skein-1024). */ static int -skein_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, - crypto_req_handle_t req) +skein_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism) { int error = CRYPTO_SUCCESS; if (!VALID_SKEIN_DIGEST_MECH(mechanism->cm_type)) return (CRYPTO_MECHANISM_INVALID); - SKEIN_CTX_LVALUE(ctx) = kmem_alloc(sizeof (*SKEIN_CTX(ctx)), - crypto_kmflag(req)); + SKEIN_CTX_LVALUE(ctx) = kmem_alloc(sizeof (*SKEIN_CTX(ctx)), KM_SLEEP); if (SKEIN_CTX(ctx) == NULL) return (CRYPTO_HOST_MEMORY); @@ -376,8 +370,7 @@ errout: * see what to pass here. */ static int -skein_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest, - crypto_req_handle_t req) +skein_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest) { int error = CRYPTO_SUCCESS; @@ -390,7 +383,7 @@ skein_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest, return (CRYPTO_BUFFER_TOO_SMALL); } - error = skein_update(ctx, data, req); + error = skein_update(ctx, data); if (error != CRYPTO_SUCCESS) { bzero(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx))); kmem_free(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx))); @@ -398,7 +391,7 @@ skein_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest, digest->cd_length = 0; return (error); } - error = skein_final(ctx, digest, req); + error = skein_final(ctx, digest); return (error); } @@ -409,9 +402,8 @@ skein_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest, * Supported input data formats are raw, uio and mblk. */ static int -skein_update(crypto_ctx_t *ctx, crypto_data_t *data, crypto_req_handle_t req) +skein_update(crypto_ctx_t *ctx, crypto_data_t *data) { - (void) req; int error = CRYPTO_SUCCESS; ASSERT(SKEIN_CTX(ctx) != NULL); @@ -438,7 +430,7 @@ skein_update(crypto_ctx_t *ctx, crypto_data_t *data, crypto_req_handle_t req) * Supported output digest formats are raw, uio and mblk. */ static int -skein_final(crypto_ctx_t *ctx, crypto_data_t *digest, crypto_req_handle_t req) +skein_final(crypto_ctx_t *ctx, crypto_data_t *digest) { int error = CRYPTO_SUCCESS; @@ -457,7 +449,7 @@ skein_final(crypto_ctx_t *ctx, crypto_data_t *digest, crypto_req_handle_t req) (uint8_t *)digest->cd_raw.iov_base + digest->cd_offset); break; case CRYPTO_DATA_UIO: - error = skein_digest_final_uio(SKEIN_CTX(ctx), digest, req); + error = skein_digest_final_uio(SKEIN_CTX(ctx), digest); break; default: error = CRYPTO_ARGUMENTS_BAD; @@ -485,9 +477,9 @@ skein_final(crypto_ctx_t *ctx, crypto_data_t *digest, crypto_req_handle_t req) static int skein_digest_atomic(crypto_provider_handle_t provider, crypto_session_id_t session_id, crypto_mechanism_t *mechanism, - crypto_data_t *data, crypto_data_t *digest, crypto_req_handle_t req) + crypto_data_t *data, crypto_data_t *digest) { - (void) provider, (void) session_id, (void) req; + (void) provider, (void) session_id; int error; skein_ctx_t skein_ctx; crypto_ctx_t ctx; @@ -502,9 +494,9 @@ skein_digest_atomic(crypto_provider_handle_t provider, goto out; SKEIN_OP(&skein_ctx, Init, skein_ctx.sc_digest_bitlen); - if ((error = skein_update(&ctx, data, digest)) != CRYPTO_SUCCESS) + if ((error = skein_update(&ctx, data)) != CRYPTO_SUCCESS) goto out; - if ((error = skein_final(&ctx, data, digest)) != CRYPTO_SUCCESS) + if ((error = skein_final(&ctx, data)) != CRYPTO_SUCCESS) goto out; out: @@ -553,13 +545,11 @@ skein_mac_ctx_build(skein_ctx_t *ctx, crypto_mechanism_t *mechanism, */ static int skein_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, - crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, - crypto_req_handle_t req) + crypto_key_t *key, crypto_spi_ctx_template_t ctx_template) { int error; - SKEIN_CTX_LVALUE(ctx) = kmem_alloc(sizeof (*SKEIN_CTX(ctx)), - crypto_kmflag(req)); + SKEIN_CTX_LVALUE(ctx) = kmem_alloc(sizeof (*SKEIN_CTX(ctx)), KM_SLEEP); if (SKEIN_CTX(ctx) == NULL) return (CRYPTO_HOST_MEMORY); @@ -592,7 +582,7 @@ static int skein_mac_atomic(crypto_provider_handle_t provider, crypto_session_id_t session_id, crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac, - crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) + crypto_spi_ctx_template_t ctx_template) { /* faux crypto context just for skein_digest_{update,final} */ (void) provider, (void) session_id; @@ -609,9 +599,9 @@ skein_mac_atomic(crypto_provider_handle_t provider, goto errout; } - if ((error = skein_update(&ctx, data, req)) != CRYPTO_SUCCESS) + if ((error = skein_update(&ctx, data)) != CRYPTO_SUCCESS) goto errout; - if ((error = skein_final(&ctx, mac, req)) != CRYPTO_SUCCESS) + if ((error = skein_final(&ctx, mac)) != CRYPTO_SUCCESS) goto errout; return (CRYPTO_SUCCESS); @@ -632,14 +622,13 @@ errout: static int skein_create_ctx_template(crypto_provider_handle_t provider, crypto_mechanism_t *mechanism, crypto_key_t *key, - crypto_spi_ctx_template_t *ctx_template, size_t *ctx_template_size, - crypto_req_handle_t req) + crypto_spi_ctx_template_t *ctx_template, size_t *ctx_template_size) { (void) provider; int error; skein_ctx_t *ctx_tmpl; - ctx_tmpl = kmem_alloc(sizeof (*ctx_tmpl), crypto_kmflag(req)); + ctx_tmpl = kmem_alloc(sizeof (*ctx_tmpl), KM_SLEEP); if (ctx_tmpl == NULL) return (CRYPTO_HOST_MEMORY); error = skein_mac_ctx_build(ctx_tmpl, mechanism, key); diff --git a/module/icp/spi/kcf_spi.c b/module/icp/spi/kcf_spi.c index de95855143..bfcb353e53 100644 --- a/module/icp/spi/kcf_spi.c +++ b/module/icp/spi/kcf_spi.c @@ -220,18 +220,6 @@ crypto_unregister_provider(crypto_kcf_provider_handle_t handle) return (CRYPTO_SUCCESS); } -/* - * This routine is used by providers to determine - * whether to use KM_SLEEP or KM_NOSLEEP during memory allocation. - * - * This routine can be called from user or interrupt context. - */ -int -crypto_kmflag(crypto_req_handle_t handle) -{ - return (REQHNDL2_KMFLAG(handle)); -} - /* * Process the mechanism info structures specified by the provider * during registration. A NULL crypto_provider_info_t indicates diff --git a/module/os/linux/zfs/zio_crypt.c b/module/os/linux/zfs/zio_crypt.c index 31126a78bc..099d23484d 100644 --- a/module/os/linux/zfs/zio_crypt.c +++ b/module/os/linux/zfs/zio_crypt.c @@ -269,13 +269,13 @@ zio_crypt_key_init(uint64_t crypt, zio_crypt_key_t *key) */ mech.cm_type = crypto_mech2id(zio_crypt_table[crypt].ci_mechname); ret = crypto_create_ctx_template(&mech, &key->zk_current_key, - &key->zk_current_tmpl, KM_SLEEP); + &key->zk_current_tmpl); if (ret != CRYPTO_SUCCESS) key->zk_current_tmpl = NULL; mech.cm_type = crypto_mech2id(SUN_CKM_SHA512_HMAC); ret = crypto_create_ctx_template(&mech, &key->zk_hmac_key, - &key->zk_hmac_tmpl, KM_SLEEP); + &key->zk_hmac_tmpl); if (ret != CRYPTO_SUCCESS) key->zk_hmac_tmpl = NULL; @@ -323,7 +323,7 @@ zio_crypt_key_change_salt(zio_crypt_key_t *key) /* destroy the old context template and create the new one */ crypto_destroy_ctx_template(key->zk_current_tmpl); ret = crypto_create_ctx_template(&mech, &key->zk_current_key, - &key->zk_current_tmpl, KM_SLEEP); + &key->zk_current_tmpl); if (ret != CRYPTO_SUCCESS) key->zk_current_tmpl = NULL; @@ -447,15 +447,13 @@ zio_do_crypt_uio(boolean_t encrypt, uint64_t crypt, crypto_key_t *key, /* perform the actual encryption */ if (encrypt) { - ret = crypto_encrypt(&mech, &plaindata, key, tmpl, &cipherdata, - NULL); + ret = crypto_encrypt(&mech, &plaindata, key, tmpl, &cipherdata); if (ret != CRYPTO_SUCCESS) { ret = SET_ERROR(EIO); goto error; } } else { - ret = crypto_decrypt(&mech, &cipherdata, key, tmpl, &plaindata, - NULL); + ret = crypto_decrypt(&mech, &cipherdata, key, tmpl, &plaindata); if (ret != CRYPTO_SUCCESS) { ASSERT3U(ret, ==, CRYPTO_INVALID_MAC); ret = SET_ERROR(ECKSUM); @@ -619,13 +617,13 @@ zio_crypt_key_unwrap(crypto_key_t *cwkey, uint64_t crypt, uint64_t version, */ mech.cm_type = crypto_mech2id(zio_crypt_table[crypt].ci_mechname); ret = crypto_create_ctx_template(&mech, &key->zk_current_key, - &key->zk_current_tmpl, KM_SLEEP); + &key->zk_current_tmpl); if (ret != CRYPTO_SUCCESS) key->zk_current_tmpl = NULL; mech.cm_type = crypto_mech2id(SUN_CKM_SHA512_HMAC); ret = crypto_create_ctx_template(&mech, &key->zk_hmac_key, - &key->zk_hmac_tmpl, KM_SLEEP); + &key->zk_hmac_tmpl); if (ret != CRYPTO_SUCCESS) key->zk_hmac_tmpl = NULL; @@ -689,7 +687,7 @@ zio_crypt_do_hmac(zio_crypt_key_t *key, uint8_t *data, uint_t datalen, /* generate the hmac */ ret = crypto_mac(&mech, &in_data, &key->zk_hmac_key, key->zk_hmac_tmpl, - &digest_data, NULL); + &digest_data); if (ret != CRYPTO_SUCCESS) { ret = SET_ERROR(EIO); goto error; @@ -1139,7 +1137,7 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen, cd.cd_offset = 0; /* calculate the portable MAC from the portable fields and metadnode */ - ret = crypto_mac_init(&mech, &key->zk_hmac_key, NULL, &ctx, NULL); + ret = crypto_mac_init(&mech, &key->zk_hmac_key, NULL, &ctx); if (ret != CRYPTO_SUCCESS) { ret = SET_ERROR(EIO); goto error; @@ -1226,7 +1224,7 @@ zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen, } /* calculate the local MAC from the userused and groupused dnodes */ - ret = crypto_mac_init(&mech, &key->zk_hmac_key, NULL, &ctx, NULL); + ret = crypto_mac_init(&mech, &key->zk_hmac_key, NULL, &ctx); if (ret != CRYPTO_SUCCESS) { ret = SET_ERROR(EIO); goto error; diff --git a/module/zfs/hkdf.c b/module/zfs/hkdf.c index 9017727689..2c91401d5b 100644 --- a/module/zfs/hkdf.c +++ b/module/zfs/hkdf.c @@ -52,7 +52,7 @@ hkdf_sha512_extract(uint8_t *salt, uint_t salt_len, uint8_t *key_material, output_cd.cd_raw.iov_base = (char *)out_buf; output_cd.cd_raw.iov_len = output_cd.cd_length; - ret = crypto_mac(&mech, &input_cd, &key, NULL, &output_cd, NULL); + ret = crypto_mac(&mech, &input_cd, &key, NULL, &output_cd); if (ret != CRYPTO_SUCCESS) return (SET_ERROR(EIO)); @@ -108,7 +108,7 @@ hkdf_sha512_expand(uint8_t *extract_key, uint8_t *info, uint_t info_len, T_cd.cd_length = T_len; T_cd.cd_raw.iov_len = T_cd.cd_length; - ret = crypto_mac_init(&mech, &key, NULL, &ctx, NULL); + ret = crypto_mac_init(&mech, &key, NULL, &ctx); if (ret != CRYPTO_SUCCESS) return (SET_ERROR(EIO));