module: icp: remove other provider types

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Ahelenia Ziemiańska <nabijaczleweli@nabijaczleweli.xyz>
Closes #12901
This commit is contained in:
наб 2021-12-23 19:51:00 +01:00 committed by Brian Behlendorf
parent 167ced3fb1
commit 710657f51d
21 changed files with 236 additions and 1422 deletions

View File

@ -163,8 +163,6 @@ extern taskq_t *taskq_of_curthread(void);
((void) sizeof (dc), \
taskq_create(name, nthreads, maxclsyspri, min, max, flags))
extern boolean_t taskq_empty(taskq_t *);
int spl_taskq_init(void);
void spl_taskq_fini(void);

View File

@ -168,7 +168,6 @@ typedef enum {
/* The event_arg argument structure for CRYPTO_EVENT_PROVIDERS_CHANGE event */
typedef struct crypto_notify_event_change {
crypto_mech_name_t ec_mech_name;
crypto_provider_type_t ec_provider_type;
crypto_event_change_t ec_change;
} crypto_notify_event_change_t;

View File

@ -369,12 +369,6 @@ typedef struct crypto_key32 {
/* Providers */
typedef enum {
CRYPTO_HW_PROVIDER = 0,
CRYPTO_SW_PROVIDER,
CRYPTO_LOGICAL_PROVIDER
} crypto_provider_type_t;
typedef uint32_t crypto_provider_id_t;
#define KCF_PROVID_INVALID ((uint32_t)-1)

View File

@ -509,7 +509,6 @@ extern taskq_t *taskq_of_curthread(void);
extern int taskq_cancel_id(taskq_t *, taskqid_t);
extern void system_taskq_init(void);
extern void system_taskq_fini(void);
extern boolean_t taskq_empty(taskq_t *);
#define XVA_MAPSIZE 3
#define XVA_MAGIC 0x78766174

View File

@ -363,12 +363,6 @@ taskq_cancel_id(taskq_t *tq, taskqid_t id)
return (ENOENT);
}
boolean_t
taskq_empty(taskq_t *tq)
{
return (tq->tq_task.tqent_next == &tq->tq_task || tq->tq_active == 0);
}
void
system_taskq_init(void)
{

View File

@ -110,27 +110,9 @@ crypto_cipher_init_prov(crypto_provider_t provider, crypto_session_id_t sid,
ASSERT(KCF_PROV_REFHELD(pd));
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
if (func == CRYPTO_FG_ENCRYPT) {
error = kcf_get_hardware_provider(mech->cm_type,
CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd,
&real_provider, CRYPTO_FG_ENCRYPT);
} else {
error = kcf_get_hardware_provider(mech->cm_type,
CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd,
&real_provider, CRYPTO_FG_DECRYPT);
}
if (error != CRYPTO_SUCCESS)
return (error);
}
/* Allocate and initialize the canonical context */
if ((ctx = kcf_new_ctx(crq, real_provider, sid)) == NULL) {
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
KCF_PROV_REFRELE(real_provider);
if ((ctx = kcf_new_ctx(crq, real_provider, sid)) == NULL)
return (CRYPTO_HOST_MEMORY);
}
/* The fast path for SW providers. */
if (CHECK_FASTPATH(crq, pd)) {
@ -153,42 +135,6 @@ crypto_cipher_init_prov(crypto_provider_t provider, crypto_session_id_t sid,
goto done;
}
/* Check if context sharing is possible */
if (pd->pd_prov_type == CRYPTO_HW_PROVIDER &&
key->ck_format == CRYPTO_KEY_RAW &&
KCF_CAN_SHARE_OPSTATE(pd, mech->cm_type)) {
kcf_context_t *tctxp = (kcf_context_t *)ctx;
kcf_provider_desc_t *tpd = NULL;
const crypto_mech_info_t *sinfo;
if ((kcf_get_sw_prov(mech->cm_type, &tpd, &tctxp->kc_mech,
B_FALSE) == CRYPTO_SUCCESS)) {
int tlen;
sinfo = &(KCF_TO_PROV_MECHINFO(tpd, mech->cm_type));
/*
* key->ck_length from the consumer is always in bits.
* We convert it to be in the same unit registered by
* the provider in order to do a comparison.
*/
if (sinfo->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BYTES)
tlen = key->ck_length >> 3;
else
tlen = key->ck_length;
/*
* Check if the software provider can support context
* sharing and support this key length.
*/
if ((sinfo->cm_mech_flags & CRYPTO_CAN_SHARE_OPSTATE) &&
(tlen >= sinfo->cm_min_key_length) &&
(tlen <= sinfo->cm_max_key_length)) {
ctx->cc_flags = CRYPTO_INIT_OPSTATE;
tctxp->kc_sw_prov_desc = tpd;
} else
KCF_PROV_REFRELE(tpd);
}
}
if (func == CRYPTO_FG_ENCRYPT) {
KCF_WRAP_ENCRYPT_OPS_PARAMS(&params, KCF_OP_INIT, sid,
mech, key, NULL, NULL, tmpl);
@ -200,9 +146,6 @@ crypto_cipher_init_prov(crypto_provider_t provider, crypto_session_id_t sid,
error = kcf_submit_request(real_provider, ctx, crq, &params);
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
KCF_PROV_REFRELE(real_provider);
done:
if ((error == CRYPTO_SUCCESS) || (error == CRYPTO_QUEUED))
*ctxp = (crypto_context_t)ctx;
@ -234,7 +177,7 @@ crypto_cipher_init(crypto_mechanism_t *mech, crypto_key_t *key,
retry:
/* pd is returned held */
if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error,
list, func, CHECK_RESTRICT(crq), 0)) == NULL) {
list, func, CHECK_RESTRICT(crq))) == NULL) {
if (list != NULL)
kcf_free_triedlist(list);
return (error);
@ -247,8 +190,7 @@ retry:
* freeing this tmpl and create a new one for the key and new SW
* provider
*/
if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) {
if (((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) {
if (ctx_tmpl->ct_generation != me->me_gen_swprov) {
if (list != NULL)
kcf_free_triedlist(list);
@ -321,21 +263,10 @@ crypto_encrypt_prov(crypto_provider_t provider, crypto_session_id_t sid,
ASSERT(KCF_PROV_REFHELD(pd));
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
error = kcf_get_hardware_provider(mech->cm_type,
CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd,
&real_provider, CRYPTO_FG_ENCRYPT_ATOMIC);
if (error != CRYPTO_SUCCESS)
return (error);
}
KCF_WRAP_ENCRYPT_OPS_PARAMS(&params, KCF_OP_ATOMIC, sid, mech, key,
plaintext, ciphertext, tmpl);
error = kcf_submit_request(real_provider, NULL, crq, &params);
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
KCF_PROV_REFRELE(real_provider);
return (error);
}
@ -360,22 +291,19 @@ crypto_encrypt(crypto_mechanism_t *mech, crypto_data_t *plaintext,
retry:
/* pd is returned held */
if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error,
list, CRYPTO_FG_ENCRYPT_ATOMIC, CHECK_RESTRICT(crq),
plaintext->cd_length)) == NULL) {
list, CRYPTO_FG_ENCRYPT_ATOMIC, CHECK_RESTRICT(crq))) == NULL) {
if (list != NULL)
kcf_free_triedlist(list);
return (error);
}
/*
* For SW providers, check the validity of the context template
* Check the validity of the context template
* It is very rare that the generation number mis-matches, so
* is acceptable to fail here, and let the consumer recover by
* freeing this tmpl and create a new one for the key and new SW
* provider
* freeing this tmpl and create a new one for the key and new provider
*/
if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) {
if (((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) {
if (ctx_tmpl->ct_generation != me->me_gen_swprov) {
if (list != NULL)
kcf_free_triedlist(list);
@ -480,8 +408,6 @@ crypto_encrypt_update(crypto_context_t context, crypto_data_t *plaintext,
return (CRYPTO_INVALID_CONTEXT);
}
ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
/* The fast path for SW providers. */
if (CHECK_FASTPATH(cr, pd)) {
error = KCF_PROV_ENCRYPT_UPDATE(pd, ctx, plaintext,
@ -540,8 +466,6 @@ crypto_encrypt_final(crypto_context_t context, crypto_data_t *ciphertext,
return (CRYPTO_INVALID_CONTEXT);
}
ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
/* The fast path for SW providers. */
if (CHECK_FASTPATH(cr, pd)) {
error = KCF_PROV_ENCRYPT_FINAL(pd, ctx, ciphertext, NULL);
@ -599,27 +523,13 @@ crypto_decrypt_prov(crypto_provider_t provider, crypto_session_id_t sid,
kcf_req_params_t params;
kcf_provider_desc_t *pd = provider;
kcf_provider_desc_t *real_provider = pd;
int rv;
ASSERT(KCF_PROV_REFHELD(pd));
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
rv = kcf_get_hardware_provider(mech->cm_type,
CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd,
&real_provider, CRYPTO_FG_DECRYPT_ATOMIC);
if (rv != CRYPTO_SUCCESS)
return (rv);
}
KCF_WRAP_DECRYPT_OPS_PARAMS(&params, KCF_OP_ATOMIC, sid, mech, key,
ciphertext, plaintext, tmpl);
rv = kcf_submit_request(real_provider, NULL, crq, &params);
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
KCF_PROV_REFRELE(real_provider);
return (rv);
return (kcf_submit_request(real_provider, NULL, crq, &params));
}
/*
@ -643,22 +553,19 @@ crypto_decrypt(crypto_mechanism_t *mech, crypto_data_t *ciphertext,
retry:
/* pd is returned held */
if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error,
list, CRYPTO_FG_DECRYPT_ATOMIC, CHECK_RESTRICT(crq),
ciphertext->cd_length)) == NULL) {
list, CRYPTO_FG_DECRYPT_ATOMIC, CHECK_RESTRICT(crq))) == NULL) {
if (list != NULL)
kcf_free_triedlist(list);
return (error);
}
/*
* For SW providers, check the validity of the context template
* Check the validity of the context template
* It is very rare that the generation number mis-matches, so
* is acceptable to fail here, and let the consumer recover by
* freeing this tmpl and create a new one for the key and new SW
* provider
* freeing this tmpl and create a new one for the key and new provider
*/
if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) {
if (((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) {
if (ctx_tmpl->ct_generation != me->me_gen_swprov) {
if (list != NULL)
kcf_free_triedlist(list);
@ -763,8 +670,6 @@ crypto_decrypt_update(crypto_context_t context, crypto_data_t *ciphertext,
return (CRYPTO_INVALID_CONTEXT);
}
ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
/* The fast path for SW providers. */
if (CHECK_FASTPATH(cr, pd)) {
error = KCF_PROV_DECRYPT_UPDATE(pd, ctx, ciphertext,
@ -823,8 +728,6 @@ crypto_decrypt_final(crypto_context_t context, crypto_data_t *plaintext,
return (CRYPTO_INVALID_CONTEXT);
}
ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
/* The fast path for SW providers. */
if (CHECK_FASTPATH(cr, pd)) {
error = KCF_PROV_DECRYPT_FINAL(pd, ctx, plaintext,

View File

@ -93,27 +93,14 @@ crypto_digest_prov(crypto_provider_t provider, crypto_session_id_t sid,
kcf_req_params_t params;
kcf_provider_desc_t *pd = provider;
kcf_provider_desc_t *real_provider = pd;
int rv;
ASSERT(KCF_PROV_REFHELD(pd));
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
rv = kcf_get_hardware_provider(mech->cm_type,
CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq),
pd, &real_provider, CRYPTO_FG_DIGEST_ATOMIC);
if (rv != CRYPTO_SUCCESS)
return (rv);
}
KCF_WRAP_DIGEST_OPS_PARAMS(&params, KCF_OP_ATOMIC, sid, mech, NULL,
data, digest);
/* no crypto context to carry between multiple parts. */
rv = kcf_submit_request(real_provider, NULL, crq, &params);
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
KCF_PROV_REFRELE(real_provider);
return (rv);
return (kcf_submit_request(real_provider, NULL, crq, &params));
}
@ -133,8 +120,7 @@ crypto_digest(crypto_mechanism_t *mech, crypto_data_t *data,
retry:
/* The pd is returned held */
if ((pd = kcf_get_mech_provider(mech->cm_type, NULL, &error, list,
CRYPTO_FG_DIGEST_ATOMIC, CHECK_RESTRICT(crq),
data->cd_length)) == NULL) {
CRYPTO_FG_DIGEST_ATOMIC, CHECK_RESTRICT(crq))) == NULL) {
if (list != NULL)
kcf_free_triedlist(list);
return (error);
@ -150,17 +136,11 @@ retry:
digest, KCF_SWFP_RHNDL(crq));
KCF_PROV_INCRSTATS(pd, error);
} else {
if (pd->pd_prov_type == CRYPTO_HW_PROVIDER &&
(pd->pd_flags & CRYPTO_HASH_NO_UPDATE) &&
(data->cd_length > pd->pd_hash_limit)) {
error = CRYPTO_BUFFER_TOO_BIG;
} else {
KCF_WRAP_DIGEST_OPS_PARAMS(&params, KCF_OP_ATOMIC,
pd->pd_sid, mech, NULL, data, digest);
KCF_WRAP_DIGEST_OPS_PARAMS(&params, KCF_OP_ATOMIC,
pd->pd_sid, mech, NULL, data, digest);
/* no crypto context to carry between multiple parts. */
error = kcf_submit_request(pd, NULL, crq, &params);
}
/* no crypto context to carry between multiple parts. */
error = kcf_submit_request(pd, NULL, crq, &params);
}
if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
@ -212,21 +192,9 @@ crypto_digest_init_prov(crypto_provider_t provider, crypto_session_id_t sid,
ASSERT(KCF_PROV_REFHELD(pd));
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
error = kcf_get_hardware_provider(mech->cm_type,
CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd,
&real_provider, CRYPTO_FG_DIGEST);
if (error != CRYPTO_SUCCESS)
return (error);
}
/* Allocate and initialize the canonical context */
if ((ctx = kcf_new_ctx(crq, real_provider, sid)) == NULL) {
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
KCF_PROV_REFRELE(real_provider);
if ((ctx = kcf_new_ctx(crq, real_provider, sid)) == NULL)
return (CRYPTO_HOST_MEMORY);
}
/* The fast path for SW providers. */
if (CHECK_FASTPATH(crq, pd)) {
@ -243,9 +211,6 @@ crypto_digest_init_prov(crypto_provider_t provider, crypto_session_id_t sid,
error = kcf_submit_request(real_provider, ctx, crq, &params);
}
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
KCF_PROV_REFRELE(real_provider);
if ((error == CRYPTO_SUCCESS) || (error == CRYPTO_QUEUED))
*ctxp = (crypto_context_t)ctx;
else {
@ -272,27 +237,14 @@ crypto_digest_init(crypto_mechanism_t *mech, crypto_context_t *ctxp,
retry:
/* The pd is returned held */
if ((pd = kcf_get_mech_provider(mech->cm_type, NULL, &error,
list, CRYPTO_FG_DIGEST, CHECK_RESTRICT(crq), 0)) == NULL) {
list, CRYPTO_FG_DIGEST, CHECK_RESTRICT(crq))) == NULL) {
if (list != NULL)
kcf_free_triedlist(list);
return (error);
}
if (pd->pd_prov_type == CRYPTO_HW_PROVIDER &&
(pd->pd_flags & CRYPTO_HASH_NO_UPDATE)) {
/*
* The hardware provider has limited digest support.
* So, we fallback early here to using a software provider.
*
* XXX - need to enhance to do the fallback later in
* crypto_digest_update() if the size of accumulated input data
* exceeds the maximum size digestable by hardware provider.
*/
error = CRYPTO_BUFFER_TOO_BIG;
} else {
error = crypto_digest_init_prov(pd, pd->pd_sid,
mech, ctxp, crq);
}
error = crypto_digest_init_prov(pd, pd->pd_sid,
mech, ctxp, crq);
if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
IS_RECOVERABLE(error)) {
@ -341,8 +293,6 @@ crypto_digest_update(crypto_context_t context, crypto_data_t *data,
return (CRYPTO_INVALID_CONTEXT);
}
ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
/* The fast path for SW providers. */
if (CHECK_FASTPATH(cr, pd)) {
error = KCF_PROV_DIGEST_UPDATE(pd, ctx, data, NULL);
@ -390,8 +340,6 @@ crypto_digest_final(crypto_context_t context, crypto_data_t *digest,
return (CRYPTO_INVALID_CONTEXT);
}
ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
/* The fast path for SW providers. */
if (CHECK_FASTPATH(cr, pd)) {
error = KCF_PROV_DIGEST_FINAL(pd, ctx, digest, NULL);

View File

@ -98,20 +98,9 @@ crypto_mac_prov(crypto_provider_t provider, crypto_session_id_t sid,
ASSERT(KCF_PROV_REFHELD(pd));
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
rv = kcf_get_hardware_provider(mech->cm_type,
CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd,
&real_provider, CRYPTO_FG_MAC_ATOMIC);
if (rv != CRYPTO_SUCCESS)
return (rv);
}
KCF_WRAP_MAC_OPS_PARAMS(&params, KCF_OP_ATOMIC, sid, mech, key,
data, mac, tmpl);
rv = kcf_submit_request(real_provider, NULL, crq, &params);
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
KCF_PROV_REFRELE(real_provider);
return (rv);
}
@ -136,22 +125,19 @@ crypto_mac(crypto_mechanism_t *mech, crypto_data_t *data,
retry:
/* The pd is returned held */
if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error,
list, CRYPTO_FG_MAC_ATOMIC, CHECK_RESTRICT(crq),
data->cd_length)) == NULL) {
list, CRYPTO_FG_MAC_ATOMIC, CHECK_RESTRICT(crq))) == NULL) {
if (list != NULL)
kcf_free_triedlist(list);
return (error);
}
/*
* For SW providers, check the validity of the context template
* Check the validity of the context template
* It is very rare that the generation number mis-matches, so
* is acceptable to fail here, and let the consumer recover by
* freeing this tmpl and create a new one for the key and new SW
* provider
* freeing this tmpl and create a new one for the key and new provider
*/
if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) {
if (((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) {
if (ctx_tmpl->ct_generation != me->me_gen_swprov) {
if (list != NULL)
kcf_free_triedlist(list);
@ -173,22 +159,10 @@ retry:
mac, spi_ctx_tmpl, KCF_SWFP_RHNDL(crq));
KCF_PROV_INCRSTATS(pd, error);
} else {
if (pd->pd_prov_type == CRYPTO_HW_PROVIDER &&
(pd->pd_flags & CRYPTO_HASH_NO_UPDATE) &&
(data->cd_length > pd->pd_hash_limit)) {
/*
* XXX - We need a check to see if this is indeed
* a HMAC. So far, all kernel clients use
* this interface only for HMAC. So, this is fine
* for now.
*/
error = CRYPTO_BUFFER_TOO_BIG;
} else {
KCF_WRAP_MAC_OPS_PARAMS(&params, KCF_OP_ATOMIC,
pd->pd_sid, mech, key, data, mac, spi_ctx_tmpl);
KCF_WRAP_MAC_OPS_PARAMS(&params, KCF_OP_ATOMIC,
pd->pd_sid, mech, key, data, mac, spi_ctx_tmpl);
error = kcf_submit_request(pd, NULL, crq, &params);
}
error = kcf_submit_request(pd, NULL, crq, &params);
}
if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
@ -218,26 +192,13 @@ crypto_mac_verify_prov(crypto_provider_t provider, crypto_session_id_t sid,
kcf_req_params_t params;
kcf_provider_desc_t *pd = provider;
kcf_provider_desc_t *real_provider = pd;
int rv;
ASSERT(KCF_PROV_REFHELD(pd));
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
rv = kcf_get_hardware_provider(mech->cm_type,
CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd,
&real_provider, CRYPTO_FG_MAC_ATOMIC);
if (rv != CRYPTO_SUCCESS)
return (rv);
}
KCF_WRAP_MAC_OPS_PARAMS(&params, KCF_OP_MAC_VERIFY_ATOMIC, sid, mech,
key, data, mac, tmpl);
rv = kcf_submit_request(real_provider, NULL, crq, &params);
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
KCF_PROV_REFRELE(real_provider);
return (rv);
return (kcf_submit_request(real_provider, NULL, crq, &params));
}
/*
@ -260,22 +221,19 @@ crypto_mac_verify(crypto_mechanism_t *mech, crypto_data_t *data,
retry:
/* The pd is returned held */
if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error,
list, CRYPTO_FG_MAC_ATOMIC, CHECK_RESTRICT(crq),
data->cd_length)) == NULL) {
list, CRYPTO_FG_MAC_ATOMIC, CHECK_RESTRICT(crq))) == NULL) {
if (list != NULL)
kcf_free_triedlist(list);
return (error);
}
/*
* For SW providers, check the validity of the context template
* Check the validity of the context template
* It is very rare that the generation number mis-matches, so
* is acceptable to fail here, and let the consumer recover by
* freeing this tmpl and create a new one for the key and new SW
* provider
* freeing this tmpl and create a new one for the key and new provider
*/
if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) {
if (((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) {
if (ctx_tmpl->ct_generation != me->me_gen_swprov) {
if (list != NULL)
kcf_free_triedlist(list);
@ -297,18 +255,11 @@ retry:
data, mac, spi_ctx_tmpl, KCF_SWFP_RHNDL(crq));
KCF_PROV_INCRSTATS(pd, error);
} else {
if (pd->pd_prov_type == CRYPTO_HW_PROVIDER &&
(pd->pd_flags & CRYPTO_HASH_NO_UPDATE) &&
(data->cd_length > pd->pd_hash_limit)) {
/* see comments in crypto_mac() */
error = CRYPTO_BUFFER_TOO_BIG;
} else {
KCF_WRAP_MAC_OPS_PARAMS(&params,
KCF_OP_MAC_VERIFY_ATOMIC, pd->pd_sid, mech,
key, data, mac, spi_ctx_tmpl);
KCF_WRAP_MAC_OPS_PARAMS(&params,
KCF_OP_MAC_VERIFY_ATOMIC, pd->pd_sid, mech,
key, data, mac, spi_ctx_tmpl);
error = kcf_submit_request(pd, NULL, crq, &params);
}
error = kcf_submit_request(pd, NULL, crq, &params);
}
if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
@ -374,21 +325,9 @@ crypto_mac_init_prov(crypto_provider_t provider, crypto_session_id_t sid,
ASSERT(KCF_PROV_REFHELD(pd));
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
rv = kcf_get_hardware_provider(mech->cm_type,
CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd,
&real_provider, CRYPTO_FG_MAC);
if (rv != CRYPTO_SUCCESS)
return (rv);
}
/* Allocate and initialize the canonical context */
if ((ctx = kcf_new_ctx(crq, real_provider, sid)) == NULL) {
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
KCF_PROV_REFRELE(real_provider);
if ((ctx = kcf_new_ctx(crq, real_provider, sid)) == NULL)
return (CRYPTO_HOST_MEMORY);
}
/* The fast path for SW providers. */
if (CHECK_FASTPATH(crq, pd)) {
@ -405,9 +344,6 @@ crypto_mac_init_prov(crypto_provider_t provider, crypto_session_id_t sid,
rv = kcf_submit_request(real_provider, ctx, crq, &params);
}
if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
KCF_PROV_REFRELE(real_provider);
if ((rv == CRYPTO_SUCCESS) || (rv == CRYPTO_QUEUED))
*ctxp = (crypto_context_t)ctx;
else {
@ -438,22 +374,20 @@ crypto_mac_init(crypto_mechanism_t *mech, crypto_key_t *key,
retry:
/* The pd is returned held */
if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error,
list, CRYPTO_FG_MAC, CHECK_RESTRICT(crq), 0)) == NULL) {
list, CRYPTO_FG_MAC, CHECK_RESTRICT(crq))) == NULL) {
if (list != NULL)
kcf_free_triedlist(list);
return (error);
}
/*
* For SW providers, check the validity of the context template
* Check the validity of the context template
* It is very rare that the generation number mis-matches, so
* is acceptable to fail here, and let the consumer recover by
* freeing this tmpl and create a new one for the key and new SW
* provider
* freeing this tmpl and create a new one for the key and new provider
*/
if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) {
if (((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) {
if (ctx_tmpl->ct_generation != me->me_gen_swprov) {
if (list != NULL)
kcf_free_triedlist(list);
@ -464,21 +398,8 @@ retry:
}
}
if (pd->pd_prov_type == CRYPTO_HW_PROVIDER &&
(pd->pd_flags & CRYPTO_HASH_NO_UPDATE)) {
/*
* The hardware provider has limited HMAC support.
* So, we fallback early here to using a software provider.
*
* XXX - need to enhance to do the fallback later in
* crypto_mac_update() if the size of accumulated input data
* exceeds the maximum size digestable by hardware provider.
*/
error = CRYPTO_BUFFER_TOO_BIG;
} else {
error = crypto_mac_init_prov(pd, pd->pd_sid, mech, key,
spi_ctx_tmpl, ctxp, crq);
}
error = crypto_mac_init_prov(pd, pd->pd_sid, mech, key,
spi_ctx_tmpl, ctxp, crq);
if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
IS_RECOVERABLE(error)) {
/* Add pd to the linked list of providers tried. */
@ -527,8 +448,6 @@ crypto_mac_update(crypto_context_t context, crypto_data_t *data,
return (CRYPTO_INVALID_CONTEXT);
}
ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
/* The fast path for SW providers. */
if (CHECK_FASTPATH(cr, pd)) {
rv = KCF_PROV_MAC_UPDATE(pd, ctx, data, NULL);
@ -576,8 +495,6 @@ crypto_mac_final(crypto_context_t context, crypto_data_t *mac,
return (CRYPTO_INVALID_CONTEXT);
}
ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
/* The fast path for SW providers. */
if (CHECK_FASTPATH(cr, pd)) {
rv = KCF_PROV_MAC_FINAL(pd, ctx, mac, NULL);

View File

@ -50,8 +50,8 @@ static kcf_ntfy_elem_t *ntfy_list_head;
* Description:
* Walks the mechanisms tables, looking for an entry that matches the
* mechname. Once it find it, it builds the 64-bit mech_type and returns
* it. If there are no hardware or software providers for the mechanism,
* but there is an unloaded software provider, this routine will attempt
* it. If there are no providers for the mechanism,
* but there is an unloaded provider, this routine will attempt
* to load it.
*
* Context:

View File

@ -68,168 +68,6 @@ is_in_triedlist(kcf_provider_desc_t *pd, kcf_prov_tried_t *triedl)
return (B_FALSE);
}
/*
* Search a mech entry's hardware provider list for the specified
* provider. Return true if found.
*/
static boolean_t
is_valid_provider_for_mech(kcf_provider_desc_t *pd, kcf_mech_entry_t *me,
crypto_func_group_t fg)
{
kcf_prov_mech_desc_t *prov_chain;
prov_chain = me->me_hw_prov_chain;
if (prov_chain != NULL) {
ASSERT(me->me_num_hwprov > 0);
for (; prov_chain != NULL; prov_chain = prov_chain->pm_next) {
if (prov_chain->pm_prov_desc == pd &&
IS_FG_SUPPORTED(prov_chain, fg)) {
return (B_TRUE);
}
}
}
return (B_FALSE);
}
/*
* This routine, given a logical provider, returns the least loaded
* provider belonging to the logical provider. The provider must be
* able to do the specified mechanism, i.e. check that the mechanism
* hasn't been disabled. In addition, just in case providers are not
* entirely equivalent, the provider's entry point is checked for
* non-nullness. This is accomplished by having the caller pass, as
* arguments, the offset of the function group (offset_1), and the
* offset of the function within the function group (offset_2).
* Returns NULL if no provider can be found.
*/
int
kcf_get_hardware_provider(crypto_mech_type_t mech_type_1,
crypto_mech_type_t mech_type_2, boolean_t call_restrict,
kcf_provider_desc_t *old, kcf_provider_desc_t **new, crypto_func_group_t fg)
{
kcf_provider_desc_t *provider, *real_pd = old;
kcf_provider_desc_t *gpd = NULL; /* good provider */
kcf_provider_desc_t *bpd = NULL; /* busy provider */
kcf_provider_list_t *p;
kcf_ops_class_t class;
kcf_mech_entry_t *me;
const kcf_mech_entry_tab_t *me_tab;
int index, len, gqlen = INT_MAX, rv = CRYPTO_SUCCESS;
/* get the mech entry for the specified mechanism */
class = KCF_MECH2CLASS(mech_type_1);
if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS)) {
return (CRYPTO_MECHANISM_INVALID);
}
me_tab = &kcf_mech_tabs_tab[class];
index = KCF_MECH2INDEX(mech_type_1);
if ((index < 0) || (index >= me_tab->met_size)) {
return (CRYPTO_MECHANISM_INVALID);
}
me = &((me_tab->met_tab)[index]);
mutex_enter(&me->me_mutex);
/*
* We assume the provider descriptor will not go away because
* it is being held somewhere, i.e. its reference count has been
* incremented. In the case of the crypto module, the provider
* descriptor is held by the session structure.
*/
if (old->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
if (old->pd_provider_list == NULL) {
real_pd = NULL;
rv = CRYPTO_DEVICE_ERROR;
goto out;
}
/*
* Find the least loaded real provider. KCF_PROV_LOAD gives
* the load (number of pending requests) of the provider.
*/
mutex_enter(&old->pd_lock);
p = old->pd_provider_list;
while (p != NULL) {
provider = p->pl_provider;
ASSERT(provider->pd_prov_type !=
CRYPTO_LOGICAL_PROVIDER);
if (call_restrict &&
(provider->pd_flags & KCF_PROV_RESTRICTED)) {
p = p->pl_next;
continue;
}
if (!is_valid_provider_for_mech(provider, me, fg)) {
p = p->pl_next;
continue;
}
/* provider does second mech */
if (mech_type_2 != CRYPTO_MECH_INVALID) {
int i;
i = KCF_TO_PROV_MECH_INDX(provider,
mech_type_2);
if (i == KCF_INVALID_INDX) {
p = p->pl_next;
continue;
}
}
if (provider->pd_state != KCF_PROV_READY) {
/* choose BUSY if no READY providers */
if (provider->pd_state == KCF_PROV_BUSY)
bpd = provider;
p = p->pl_next;
continue;
}
len = KCF_PROV_LOAD(provider);
if (len < gqlen) {
gqlen = len;
gpd = provider;
}
p = p->pl_next;
}
if (gpd != NULL) {
real_pd = gpd;
KCF_PROV_REFHOLD(real_pd);
} else if (bpd != NULL) {
real_pd = bpd;
KCF_PROV_REFHOLD(real_pd);
} else {
/* can't find provider */
real_pd = NULL;
rv = CRYPTO_MECHANISM_INVALID;
}
mutex_exit(&old->pd_lock);
} else {
if (!KCF_IS_PROV_USABLE(old) ||
(call_restrict && (old->pd_flags & KCF_PROV_RESTRICTED))) {
real_pd = NULL;
rv = CRYPTO_DEVICE_ERROR;
goto out;
}
if (!is_valid_provider_for_mech(old, me, fg)) {
real_pd = NULL;
rv = CRYPTO_MECHANISM_INVALID;
goto out;
}
KCF_PROV_REFHOLD(real_pd);
}
out:
mutex_exit(&me->me_mutex);
*new = real_pd;
return (rv);
}
/*
* Return the best provider for the specified mechanism. The provider
* is held and it is the caller's responsibility to release it when done.
@ -247,11 +85,10 @@ out:
kcf_provider_desc_t *
kcf_get_mech_provider(crypto_mech_type_t mech_type, kcf_mech_entry_t **mepp,
int *error, kcf_prov_tried_t *triedl, crypto_func_group_t fg,
boolean_t call_restrict, size_t data_size)
boolean_t call_restrict)
{
kcf_provider_desc_t *pd = NULL, *gpd = NULL;
kcf_prov_mech_desc_t *prov_chain, *mdesc;
int len, gqlen = INT_MAX;
kcf_provider_desc_t *pd = NULL;
kcf_prov_mech_desc_t *mdesc;
kcf_ops_class_t class;
int index;
kcf_mech_entry_t *me;
@ -276,50 +113,7 @@ kcf_get_mech_provider(crypto_mech_type_t mech_type, kcf_mech_entry_t **mepp,
mutex_enter(&me->me_mutex);
prov_chain = me->me_hw_prov_chain;
/*
* We check for the threshold for using a hardware provider for
* this amount of data. If there is no software provider available
* for the mechanism, then the threshold is ignored.
*/
if ((prov_chain != NULL) &&
((data_size == 0) || (me->me_threshold == 0) ||
(data_size >= me->me_threshold) ||
((mdesc = me->me_sw_prov) == NULL) ||
(!IS_FG_SUPPORTED(mdesc, fg)) ||
(!KCF_IS_PROV_USABLE(mdesc->pm_prov_desc)))) {
ASSERT(me->me_num_hwprov > 0);
/* there is at least one provider */
/*
* Find the least loaded real provider. KCF_PROV_LOAD gives
* the load (number of pending requests) of the provider.
*/
while (prov_chain != NULL) {
pd = prov_chain->pm_prov_desc;
if (!IS_FG_SUPPORTED(prov_chain, fg) ||
!KCF_IS_PROV_USABLE(pd) ||
IS_PROVIDER_TRIED(pd, triedl) ||
(call_restrict &&
(pd->pd_flags & KCF_PROV_RESTRICTED))) {
prov_chain = prov_chain->pm_next;
continue;
}
if ((len = KCF_PROV_LOAD(pd)) < gqlen) {
gqlen = len;
gpd = pd;
}
prov_chain = prov_chain->pm_next;
}
pd = gpd;
}
/* No HW provider for this mech, is there a SW provider? */
/* Is there a provider? */
if (pd == NULL && (mdesc = me->me_sw_prov) != NULL) {
pd = mdesc->pm_prov_desc;
if (!IS_FG_SUPPORTED(mdesc, fg) ||

View File

@ -369,8 +369,6 @@ kcf_add_mech_provider(short mech_indx,
crypto_mech_type_t kcf_mech_type;
kcf_prov_mech_desc_t *prov_mech;
ASSERT(prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
mech_info = &prov_desc->pd_mechanisms[mech_indx];
/*
@ -425,50 +423,34 @@ kcf_add_mech_provider(short mech_indx,
* Add new kcf_prov_mech_desc at the front of HW providers
* chain.
*/
switch (prov_desc->pd_prov_type) {
mutex_enter(&mech_entry->me_mutex);
if (mech_entry->me_sw_prov != NULL) {
/*
* There is already a provider for this mechanism.
* Since we allow only one provider per mechanism,
* report this condition.
*/
cmn_err(CE_WARN, "The cryptographic provider "
"\"%s\" will not be used for %s. The provider "
"\"%s\" will be used for this mechanism "
"instead.", prov_desc->pd_description,
mech_info->cm_mech_name,
mech_entry->me_sw_prov->pm_prov_desc->
pd_description);
KCF_PROV_REFRELE(prov_desc);
kmem_free(prov_mech, sizeof (kcf_prov_mech_desc_t));
prov_mech = NULL;
} else {
/*
* Set the provider as the provider for
* this mechanism.
*/
mech_entry->me_sw_prov = prov_mech;
case CRYPTO_HW_PROVIDER:
mutex_enter(&mech_entry->me_mutex);
prov_mech->pm_me = mech_entry;
prov_mech->pm_next = mech_entry->me_hw_prov_chain;
mech_entry->me_hw_prov_chain = prov_mech;
mech_entry->me_num_hwprov++;
mutex_exit(&mech_entry->me_mutex);
break;
case CRYPTO_SW_PROVIDER:
mutex_enter(&mech_entry->me_mutex);
if (mech_entry->me_sw_prov != NULL) {
/*
* There is already a SW provider for this mechanism.
* Since we allow only one SW provider per mechanism,
* report this condition.
*/
cmn_err(CE_WARN, "The cryptographic software provider "
"\"%s\" will not be used for %s. The provider "
"\"%s\" will be used for this mechanism "
"instead.", prov_desc->pd_description,
mech_info->cm_mech_name,
mech_entry->me_sw_prov->pm_prov_desc->
pd_description);
KCF_PROV_REFRELE(prov_desc);
kmem_free(prov_mech, sizeof (kcf_prov_mech_desc_t));
prov_mech = NULL;
} else {
/*
* Set the provider as the software provider for
* this mechanism.
*/
mech_entry->me_sw_prov = prov_mech;
/* We'll wrap around after 4 billion registrations! */
mech_entry->me_gen_swprov = kcf_gen_swprov++;
}
mutex_exit(&mech_entry->me_mutex);
break;
default:
break;
/* We'll wrap around after 4 billion registrations! */
mech_entry->me_gen_swprov = kcf_gen_swprov++;
}
mutex_exit(&mech_entry->me_mutex);
*pmdpp = prov_mech;
@ -494,12 +476,8 @@ void
kcf_remove_mech_provider(const char *mech_name, kcf_provider_desc_t *prov_desc)
{
crypto_mech_type_t mech_type;
kcf_prov_mech_desc_t *prov_mech = NULL, *prov_chain;
kcf_prov_mech_desc_t **prev_entry_next;
kcf_prov_mech_desc_t *prov_mech = NULL;
kcf_mech_entry_t *mech_entry;
crypto_mech_info_list_t *mil, *mil2, *next, **prev_next;
ASSERT(prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER);
/* get the KCF mech type that was assigned to the mechanism */
if ((mech_type = kcf_mech_hash_find(mech_name)) ==
@ -521,88 +499,16 @@ kcf_remove_mech_provider(const char *mech_name, kcf_provider_desc_t *prov_desc)
}
mutex_enter(&mech_entry->me_mutex);
switch (prov_desc->pd_prov_type) {
case CRYPTO_HW_PROVIDER:
/* find the provider in the mech_entry chain */
prev_entry_next = &mech_entry->me_hw_prov_chain;
prov_mech = mech_entry->me_hw_prov_chain;
while (prov_mech != NULL &&
prov_mech->pm_prov_desc != prov_desc) {
prev_entry_next = &prov_mech->pm_next;
prov_mech = prov_mech->pm_next;
}
if (prov_mech == NULL) {
/* entry not found, simply return */
mutex_exit(&mech_entry->me_mutex);
return;
}
/* remove provider entry from mech_entry chain */
*prev_entry_next = prov_mech->pm_next;
ASSERT(mech_entry->me_num_hwprov > 0);
mech_entry->me_num_hwprov--;
break;
case CRYPTO_SW_PROVIDER:
if (mech_entry->me_sw_prov == NULL ||
mech_entry->me_sw_prov->pm_prov_desc != prov_desc) {
/* not the software provider for this mechanism */
mutex_exit(&mech_entry->me_mutex);
return;
}
prov_mech = mech_entry->me_sw_prov;
mech_entry->me_sw_prov = NULL;
break;
default:
/* unexpected crypto_provider_type_t */
if (mech_entry->me_sw_prov == NULL ||
mech_entry->me_sw_prov->pm_prov_desc != prov_desc) {
/* not the provider for this mechanism */
mutex_exit(&mech_entry->me_mutex);
return;
}
prov_mech = mech_entry->me_sw_prov;
mech_entry->me_sw_prov = NULL;
mutex_exit(&mech_entry->me_mutex);
/* Free the dual ops cross-reference lists */
mil = prov_mech->pm_mi_list;
while (mil != NULL) {
next = mil->ml_next;
if (kcf_get_mech_entry(mil->ml_kcf_mechid,
&mech_entry) != KCF_SUCCESS) {
mil = next;
continue;
}
mutex_enter(&mech_entry->me_mutex);
if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
prov_chain = mech_entry->me_hw_prov_chain;
else
prov_chain = mech_entry->me_sw_prov;
while (prov_chain != NULL) {
if (prov_chain->pm_prov_desc == prov_desc) {
prev_next = &prov_chain->pm_mi_list;
mil2 = prov_chain->pm_mi_list;
while (mil2 != NULL &&
mil2->ml_kcf_mechid != mech_type) {
prev_next = &mil2->ml_next;
mil2 = mil2->ml_next;
}
if (mil2 != NULL) {
*prev_next = mil2->ml_next;
kmem_free(mil2, sizeof (*mil2));
}
break;
}
prov_chain = prov_chain->pm_next;
}
mutex_exit(&mech_entry->me_mutex);
kmem_free(mil, sizeof (crypto_mech_info_list_t));
mil = next;
}
/* free entry */
KCF_PROV_REFRELE(prov_mech->pm_prov_desc);
KCF_PROV_IREFRELE(prov_mech->pm_prov_desc);
@ -656,8 +562,8 @@ kcf_get_mech_entry(crypto_mech_type_t mech_type, kcf_mech_entry_t **mep)
/*
* Lookup the hash table for an entry that matches the mechname.
* If there are no hardware or software providers for the mechanism,
* but there is an unloaded software provider, this routine will attempt
* If there are no providers for the mechanism,
* but there is an unloaded provider, this routine will attempt
* to load it.
*/
crypto_mech_type_t

View File

@ -201,7 +201,7 @@ kcf_prov_tab_lookup(crypto_provider_id_t prov_id)
* since it is invoked from user context during provider registration.
*/
kcf_provider_desc_t *
kcf_alloc_provider_desc(const crypto_provider_info_t *info)
kcf_alloc_provider_desc(void)
{
kcf_provider_desc_t *desc =
kmem_zalloc(sizeof (kcf_provider_desc_t), KM_SLEEP);
@ -223,7 +223,7 @@ kcf_alloc_provider_desc(const crypto_provider_info_t *info)
/*
* Called by KCF_PROV_REFRELE when a provider's reference count drops
* to zero. We free the descriptor when the last reference is released.
* However, for software providers, we do not free it when there is an
* However, for providers, we do not free it when there is an
* unregister thread waiting. We signal that thread in this case and
* that thread is responsible for freeing the descriptor.
*/
@ -231,22 +231,16 @@ void
kcf_provider_zero_refcnt(kcf_provider_desc_t *desc)
{
mutex_enter(&desc->pd_lock);
switch (desc->pd_prov_type) {
case CRYPTO_SW_PROVIDER:
if (desc->pd_state == KCF_PROV_REMOVED ||
desc->pd_state == KCF_PROV_DISABLED) {
desc->pd_state = KCF_PROV_FREED;
cv_broadcast(&desc->pd_remove_cv);
mutex_exit(&desc->pd_lock);
break;
}
zfs_fallthrough;
case CRYPTO_HW_PROVIDER:
case CRYPTO_LOGICAL_PROVIDER:
if (desc->pd_state == KCF_PROV_REMOVED ||
desc->pd_state == KCF_PROV_DISABLED) {
desc->pd_state = KCF_PROV_FREED;
cv_broadcast(&desc->pd_remove_cv);
mutex_exit(&desc->pd_lock);
kcf_free_provider_desc(desc);
return;
}
mutex_exit(&desc->pd_lock);
kcf_free_provider_desc(desc);
}
/*
@ -269,9 +263,6 @@ kcf_free_provider_desc(kcf_provider_desc_t *desc)
/* free the kernel memory associated with the provider descriptor */
if (desc->pd_sched_info.ks_taskq != NULL)
taskq_destroy(desc->pd_sched_info.ks_taskq);
mutex_destroy(&desc->pd_lock);
cv_destroy(&desc->pd_resume_cv);
cv_destroy(&desc->pd_remove_cv);
@ -281,7 +272,7 @@ kcf_free_provider_desc(kcf_provider_desc_t *desc)
/*
* Returns in the location pointed to by pd a pointer to the descriptor
* for the software provider for the specified mechanism.
* for the provider for the specified mechanism.
* The provider descriptor is returned held and it is the caller's
* responsibility to release it when done. The mechanism entry
* is returned if the optional argument mep is non NULL.
@ -300,16 +291,16 @@ kcf_get_sw_prov(crypto_mech_type_t mech_type, kcf_provider_desc_t **pd,
return (CRYPTO_MECHANISM_INVALID);
/*
* Get the software provider for this mechanism.
* Get the provider for this mechanism.
* Lock the mech_entry until we grab the 'pd'.
*/
mutex_enter(&me->me_mutex);
if (me->me_sw_prov == NULL ||
(*pd = me->me_sw_prov->pm_prov_desc) == NULL) {
/* no SW provider for this mechanism */
/* no provider for this mechanism */
if (log_warn)
cmn_err(CE_WARN, "no SW provider for \"%s\"\n",
cmn_err(CE_WARN, "no provider for \"%s\"\n",
me->me_name);
mutex_exit(&me->me_mutex);
return (CRYPTO_MECH_NOT_SUPPORTED);

View File

@ -35,7 +35,7 @@
#include <sys/crypto/sched_impl.h>
#include <sys/crypto/api.h>
static kcf_global_swq_t *gswq; /* Global software queue */
static kcf_global_swq_t *gswq; /* Global queue */
/* Thread pool related variables */
static kcf_pool_t *kcfpool; /* Thread pool of kcfd LWPs */
@ -58,16 +58,13 @@ static kcf_stats_t kcf_ksdata = {
{ "max threads in pool", KSTAT_DATA_UINT32},
{ "requests in gswq", KSTAT_DATA_UINT32},
{ "max requests in gswq", KSTAT_DATA_UINT32},
{ "threads for HW taskq", KSTAT_DATA_UINT32},
{ "minalloc for HW taskq", KSTAT_DATA_UINT32},
{ "maxalloc for HW taskq", KSTAT_DATA_UINT32}
{ "maxalloc for gwsq", KSTAT_DATA_UINT32}
};
static kstat_t *kcf_misc_kstat = NULL;
ulong_t kcf_swprov_hndl = 0;
static int kcf_disp_sw_request(kcf_areq_node_t *);
static void process_req_hwp(void *);
static int kcf_enqueue(kcf_areq_node_t *);
static void kcfpool_alloc(void);
static void kcf_reqid_delete(kcf_areq_node_t *areq);
@ -224,118 +221,6 @@ kcf_disp_sw_request(kcf_areq_node_t *areq)
return (CRYPTO_QUEUED);
}
/*
* This routine is called by the taskq associated with
* each hardware provider. We notify the kernel consumer
* via the callback routine in case of CRYPTO_SUCCESS or
* a failure.
*
* A request can be of type kcf_areq_node_t or of type
* kcf_sreq_node_t.
*/
static void
process_req_hwp(void *ireq)
{
int error = 0;
crypto_ctx_t *ctx;
kcf_call_type_t ctype;
kcf_provider_desc_t *pd;
kcf_areq_node_t *areq = (kcf_areq_node_t *)ireq;
kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)ireq;
pd = ((ctype = GET_REQ_TYPE(ireq)) == CRYPTO_SYNCH) ?
sreq->sn_provider : areq->an_provider;
/*
* Wait if flow control is in effect for the provider. A
* CRYPTO_PROVIDER_READY or CRYPTO_PROVIDER_FAILED
* notification will signal us. We also get signaled if
* the provider is unregistering.
*/
if (pd->pd_state == KCF_PROV_BUSY) {
mutex_enter(&pd->pd_lock);
while (pd->pd_state == KCF_PROV_BUSY)
cv_wait(&pd->pd_resume_cv, &pd->pd_lock);
mutex_exit(&pd->pd_lock);
}
/*
* Bump the internal reference count while the request is being
* processed. This is how we know when it's safe to unregister
* a provider. This step must precede the pd_state check below.
*/
KCF_PROV_IREFHOLD(pd);
/*
* Fail the request if the provider has failed. We return a
* recoverable error and the notified clients attempt any
* recovery. For async clients this is done in kcf_aop_done()
* and for sync clients it is done in the k-api routines.
*/
if (pd->pd_state >= KCF_PROV_FAILED) {
error = CRYPTO_DEVICE_ERROR;
goto bail;
}
if (ctype == CRYPTO_SYNCH) {
mutex_enter(&sreq->sn_lock);
sreq->sn_state = REQ_INPROGRESS;
mutex_exit(&sreq->sn_lock);
ctx = sreq->sn_context ? &sreq->sn_context->kc_glbl_ctx : NULL;
error = common_submit_request(sreq->sn_provider, ctx,
sreq->sn_params, sreq);
} else {
kcf_context_t *ictx;
ASSERT(ctype == CRYPTO_ASYNCH);
/*
* We are in the per-hardware provider thread context and
* hence can sleep. Note that the caller would have done
* a taskq_dispatch(..., TQ_NOSLEEP) and would have returned.
*/
ctx = (ictx = areq->an_context) ? &ictx->kc_glbl_ctx : NULL;
mutex_enter(&areq->an_lock);
/*
* We need to maintain ordering for multi-part requests.
* an_is_my_turn is set to B_TRUE initially for a request
* when it is enqueued and there are no other requests
* for that context. It is set later from kcf_aop_done() when
* the request before us in the chain of requests for the
* context completes. We get signaled at that point.
*/
if (ictx != NULL) {
ASSERT(ictx->kc_prov_desc == areq->an_provider);
while (areq->an_is_my_turn == B_FALSE) {
cv_wait(&areq->an_turn_cv, &areq->an_lock);
}
}
areq->an_state = REQ_INPROGRESS;
mutex_exit(&areq->an_lock);
error = common_submit_request(areq->an_provider, ctx,
&areq->an_params, areq);
}
bail:
if (error == CRYPTO_QUEUED) {
/*
* The request is queued by the provider and we should
* get a crypto_op_notification() from the provider later.
* We notify the consumer at that time.
*/
return;
} else { /* CRYPTO_SUCCESS or other failure */
KCF_PROV_IREFRELE(pd);
if (ctype == CRYPTO_SYNCH)
kcf_sop_done(sreq, error);
else
kcf_aop_done(areq, error);
}
}
/*
* This routine checks if a request can be retried on another
* provider. If true, mech1 is initialized to point to the mechanism
@ -441,7 +326,7 @@ kcf_resubmit_request(kcf_areq_node_t *areq)
new_pd = kcf_get_mech_provider(mech1->cm_type, NULL, &error,
areq->an_tried_plist, fg,
(areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), 0);
(areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED));
if (new_pd == NULL)
return (error);
@ -472,26 +357,7 @@ kcf_resubmit_request(kcf_areq_node_t *areq)
areq->an_state = REQ_WAITING;
mutex_exit(&areq->an_lock);
switch (new_pd->pd_prov_type) {
case CRYPTO_SW_PROVIDER:
error = kcf_disp_sw_request(areq);
break;
case CRYPTO_HW_PROVIDER: {
taskq_t *taskq = new_pd->pd_sched_info.ks_taskq;
if (taskq_dispatch(taskq, process_req_hwp, areq, TQ_NOSLEEP) ==
TASKQID_INVALID) {
error = CRYPTO_HOST_MEMORY;
} else {
error = CRYPTO_QUEUED;
}
break;
default:
break;
}
}
error = kcf_disp_sw_request(areq);
return (error);
}
@ -515,196 +381,58 @@ kcf_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx,
{
int error = CRYPTO_SUCCESS;
kcf_areq_node_t *areq;
kcf_sreq_node_t *sreq;
kcf_context_t *kcf_ctx;
taskq_t *taskq = pd->pd_sched_info.ks_taskq;
kcf_ctx = ctx ? (kcf_context_t *)ctx->cc_framework_private : NULL;
/* Synchronous cases */
/* Synchronous */
if (crq == NULL) {
switch (pd->pd_prov_type) {
case CRYPTO_SW_PROVIDER:
error = common_submit_request(pd, ctx, params,
KCF_RHNDL(KM_SLEEP));
} else { /* Asynchronous */
if (!(crq->cr_flag & CRYPTO_ALWAYS_QUEUE)) {
/*
* This case has less overhead since there is
* no switching of context.
*/
error = common_submit_request(pd, ctx, params,
KCF_RHNDL(KM_SLEEP));
break;
case CRYPTO_HW_PROVIDER:
KCF_RHNDL(KM_NOSLEEP));
} else {
/*
* Special case for CRYPTO_SYNCHRONOUS providers that
* never return a CRYPTO_QUEUED error. We skip any
* request allocation and call the SPI directly.
* CRYPTO_ALWAYS_QUEUE is set. We need to
* queue the request and return.
*/
if ((pd->pd_flags & CRYPTO_SYNCHRONOUS) &&
taskq_empty(taskq)) {
KCF_PROV_IREFHOLD(pd);
if (pd->pd_state == KCF_PROV_READY) {
error = common_submit_request(pd, ctx,
params, KCF_RHNDL(KM_SLEEP));
KCF_PROV_IREFRELE(pd);
ASSERT(error != CRYPTO_QUEUED);
break;
}
KCF_PROV_IREFRELE(pd);
}
sreq = kmem_cache_alloc(kcf_sreq_cache, KM_SLEEP);
sreq->sn_state = REQ_ALLOCATED;
sreq->sn_rv = CRYPTO_FAILED;
sreq->sn_params = params;
/*
* Note that we do not need to hold the context
* for synchronous case as the context will never
* become invalid underneath us. We do not need to hold
* the provider here either as the caller has a hold.
*/
sreq->sn_context = kcf_ctx;
ASSERT(KCF_PROV_REFHELD(pd));
sreq->sn_provider = pd;
ASSERT(taskq != NULL);
/*
* Call the SPI directly if the taskq is empty and the
* provider is not busy, else dispatch to the taskq.
* Calling directly is fine as this is the synchronous
* case. This is unlike the asynchronous case where we
* must always dispatch to the taskq.
*/
if (taskq_empty(taskq) &&
pd->pd_state == KCF_PROV_READY) {
process_req_hwp(sreq);
} else {
areq = kcf_areqnode_alloc(pd, kcf_ctx, crq,
params);
if (areq == NULL)
error = CRYPTO_HOST_MEMORY;
else {
if (!(crq->cr_flag
& CRYPTO_SKIP_REQID)) {
/*
* We can not tell from taskq_dispatch() return
* value if we exceeded maxalloc. Hence the
* check here. Since we are allowed to wait in
* the synchronous case, we wait for the taskq
* to become empty.
* Set the request handle. We have to
* do this before dispatching the
* request.
*/
if (taskq->tq_nalloc >= crypto_taskq_maxalloc) {
taskq_wait(taskq);
crq->cr_reqid = kcf_reqid_insert(areq);
}
(void) taskq_dispatch(taskq, process_req_hwp,
sreq, TQ_SLEEP);
}
/*
* Wait for the notification to arrive,
* if the operation is not done yet.
* Bug# 4722589 will make the wait a cv_wait_sig().
*/
mutex_enter(&sreq->sn_lock);
while (sreq->sn_state < REQ_DONE)
cv_wait(&sreq->sn_cv, &sreq->sn_lock);
mutex_exit(&sreq->sn_lock);
error = sreq->sn_rv;
kmem_cache_free(kcf_sreq_cache, sreq);
break;
default:
error = CRYPTO_FAILED;
break;
}
} else { /* Asynchronous cases */
switch (pd->pd_prov_type) {
case CRYPTO_SW_PROVIDER:
if (!(crq->cr_flag & CRYPTO_ALWAYS_QUEUE)) {
error = kcf_disp_sw_request(areq);
/*
* This case has less overhead since there is
* no switching of context.
* There is an error processing this
* request. Remove the handle and
* release the request structure.
*/
error = common_submit_request(pd, ctx, params,
KCF_RHNDL(KM_NOSLEEP));
} else {
/*
* CRYPTO_ALWAYS_QUEUE is set. We need to
* queue the request and return.
*/
areq = kcf_areqnode_alloc(pd, kcf_ctx, crq,
params);
if (areq == NULL)
error = CRYPTO_HOST_MEMORY;
else {
if (error != CRYPTO_QUEUED) {
if (!(crq->cr_flag
& CRYPTO_SKIP_REQID)) {
/*
* Set the request handle. We have to
* do this before dispatching the
* request.
*/
crq->cr_reqid = kcf_reqid_insert(areq);
}
error = kcf_disp_sw_request(areq);
/*
* There is an error processing this
* request. Remove the handle and
* release the request structure.
*/
if (error != CRYPTO_QUEUED) {
if (!(crq->cr_flag
& CRYPTO_SKIP_REQID))
kcf_reqid_delete(areq);
KCF_AREQ_REFRELE(areq);
}
& CRYPTO_SKIP_REQID))
kcf_reqid_delete(areq);
KCF_AREQ_REFRELE(areq);
}
}
break;
case CRYPTO_HW_PROVIDER:
/*
* We need to queue the request and return.
*/
areq = kcf_areqnode_alloc(pd, kcf_ctx, crq, params);
if (areq == NULL) {
error = CRYPTO_HOST_MEMORY;
goto done;
}
ASSERT(taskq != NULL);
/*
* We can not tell from taskq_dispatch() return
* value if we exceeded maxalloc. Hence the check
* here.
*/
if (taskq->tq_nalloc >= crypto_taskq_maxalloc) {
error = CRYPTO_BUSY;
KCF_AREQ_REFRELE(areq);
goto done;
}
if (!(crq->cr_flag & CRYPTO_SKIP_REQID)) {
/*
* Set the request handle. We have to do this
* before dispatching the request.
*/
crq->cr_reqid = kcf_reqid_insert(areq);
}
if (taskq_dispatch(taskq,
process_req_hwp, areq, TQ_NOSLEEP) ==
TASKQID_INVALID) {
error = CRYPTO_HOST_MEMORY;
if (!(crq->cr_flag & CRYPTO_SKIP_REQID))
kcf_reqid_delete(areq);
KCF_AREQ_REFRELE(areq);
} else {
error = CRYPTO_QUEUED;
}
break;
default:
error = CRYPTO_FAILED;
break;
}
}
done:
return (error);
}
@ -750,7 +478,7 @@ kcf_free_context(kcf_context_t *kcf_ctx)
/* kcf_ctx->kc_prov_desc has a hold on pd */
KCF_PROV_REFRELE(kcf_ctx->kc_prov_desc);
/* check if this context is shared with a software provider */
/* check if this context is shared with a provider */
if ((gctx->cc_flags & CRYPTO_INIT_OPSTATE) &&
kcf_ctx->kc_sw_prov_desc != NULL) {
KCF_PROV_REFRELE(kcf_ctx->kc_sw_prov_desc);
@ -775,7 +503,7 @@ kcf_free_req(kcf_areq_node_t *areq)
}
/*
* Add the request node to the end of the global software queue.
* Add the request node to the end of the global queue.
*
* The caller should not hold the queue lock. Returns 0 if the
* request is successfully queued. Returns CRYPTO_BUSY if the limit
@ -969,7 +697,7 @@ kcf_sched_init(void)
mutex_init(&gswq->gs_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&gswq->gs_cv, NULL, CV_DEFAULT, NULL);
gswq->gs_njobs = 0;
gswq->gs_maxjobs = kcf_maxthreads * crypto_taskq_maxalloc;
gswq->gs_maxjobs = kcf_maxthreads * CRYPTO_TASKQ_MAX;
gswq->gs_first = gswq->gs_last = NULL;
/* Initialize the global reqid table */
@ -1216,9 +944,7 @@ kcf_misc_kstat_update(kstat_t *ksp, int rw)
ks_data->ks_maxthrs.value.ui32 = kcf_maxthreads;
ks_data->ks_swq_njobs.value.ui32 = gswq->gs_njobs;
ks_data->ks_swq_maxjobs.value.ui32 = gswq->gs_maxjobs;
ks_data->ks_taskq_threads.value.ui32 = crypto_taskq_threads;
ks_data->ks_taskq_minalloc.value.ui32 = crypto_taskq_minalloc;
ks_data->ks_taskq_maxalloc.value.ui32 = crypto_taskq_maxalloc;
ks_data->ks_swq_maxalloc.value.ui32 = CRYPTO_TASKQ_MAX;
return (0);
}

View File

@ -62,9 +62,7 @@ typedef struct kcf_stats {
kstat_named_t ks_maxthrs;
kstat_named_t ks_swq_njobs;
kstat_named_t ks_swq_maxjobs;
kstat_named_t ks_taskq_threads;
kstat_named_t ks_taskq_minalloc;
kstat_named_t ks_taskq_maxalloc;
kstat_named_t ks_swq_maxalloc;
} kcf_stats_t;
/*
@ -80,9 +78,6 @@ typedef struct kcf_sched_info {
/* The number of operations that returned CRYPTO_BUSY. */
uint64_t ks_nbusy_rval;
/* taskq used to dispatch crypto requests */
taskq_t *ks_taskq;
} kcf_sched_info_t;
/*
@ -96,8 +91,7 @@ typedef struct kcf_sched_info {
* acquire any locks here as it is not critical to get the exact number
* and the lock contention may be too costly for this code path.
*/
#define KCF_PROV_LOAD(pd) ((pd)->pd_state != KCF_PROV_BUSY ? \
(pd)->pd_irefcnt : (pd)->pd_sched_info.ks_taskq->tq_nalloc)
#define KCF_PROV_LOAD(pd) ((pd)->pd_irefcnt)
#define KCF_PROV_INCRSTATS(pd, error) { \
(pd)->pd_sched_info.ks_ndispatches++; \
@ -125,21 +119,17 @@ typedef struct kcf_sched_info {
* the elements is important.
*
* Routines which get a provider or the list of providers
* should pick only those that are either in KCF_PROV_READY state
* or in KCF_PROV_BUSY state.
* should pick only those that are in KCF_PROV_READY state.
*/
typedef enum {
KCF_PROV_ALLOCATED = 1,
KCF_PROV_UNVERIFIED,
KCF_PROV_VERIFICATION_FAILED,
/*
* state < KCF_PROV_READY means the provider can not
* be used at all.
*/
KCF_PROV_READY,
KCF_PROV_BUSY,
/*
* state > KCF_PROV_BUSY means the provider can not
* state > KCF_PROV_READY means the provider can not
* be used for new requests.
*/
KCF_PROV_FAILED,
@ -152,30 +142,23 @@ typedef enum {
KCF_PROV_FREED
} kcf_prov_state_t;
#define KCF_IS_PROV_UNVERIFIED(pd) ((pd)->pd_state == KCF_PROV_UNVERIFIED)
#define KCF_IS_PROV_USABLE(pd) ((pd)->pd_state == KCF_PROV_READY || \
(pd)->pd_state == KCF_PROV_BUSY)
#define KCF_IS_PROV_USABLE(pd) ((pd)->pd_state == KCF_PROV_READY)
#define KCF_IS_PROV_REMOVED(pd) ((pd)->pd_state >= KCF_PROV_REMOVED)
/* Internal flags valid for pd_flags field */
#define KCF_PROV_RESTRICTED 0x40000000
#define KCF_LPROV_MEMBER 0x80000000 /* is member of a logical provider */
/*
* A provider descriptor structure. There is one such structure per
* provider. It is allocated and initialized at registration time and
* freed when the provider unregisters.
*
* pd_prov_type: Provider type, hardware or software
* pd_sid: Session ID of the provider used by kernel clients.
* This is valid only for session-oriented providers.
* pd_refcnt: Reference counter to this provider descriptor
* pd_irefcnt: References held by the framework internal structs
* pd_lock: lock protects pd_state and pd_provider_list
* pd_lock: lock protects pd_state
* pd_state: State value of the provider
* pd_provider_list: Used to cross-reference logical providers and their
* members. Not used for software providers.
* pd_resume_cv: cv to wait for state to change from KCF_PROV_BUSY
* pd_prov_handle: Provider handle specified by provider
* pd_ops_vector: The ops vector specified by Provider
* pd_mech_indx: Lookup table which maps a core framework mechanism
@ -185,10 +168,6 @@ typedef enum {
* pd_sched_info: Scheduling information associated with the provider
* pd_mech_list_count: The number of entries in pi_mechanisms, specified
* by the provider during registration
* pd_name: Device name or module name
* pd_instance: Device instance
* pd_module_id: Module ID returned by modload
* pd_mctlp: Pointer to modctl structure for this provider
* pd_remove_cv: cv to wait on while the provider queue drains
* pd_description: Provider description string
* pd_flags bitwise OR of pi_flags from crypto_provider_info_t
@ -201,13 +180,11 @@ typedef enum {
* pd_ks_data: kstat data
*/
typedef struct kcf_provider_desc {
crypto_provider_type_t pd_prov_type;
crypto_session_id_t pd_sid;
uint_t pd_refcnt;
uint_t pd_irefcnt;
kmutex_t pd_lock;
kcf_prov_state_t pd_state;
struct kcf_provider_list *pd_provider_list;
kcondvar_t pd_resume_cv;
crypto_provider_handle_t pd_prov_handle;
const crypto_ops_t *pd_ops_vector;
@ -216,10 +193,6 @@ typedef struct kcf_provider_desc {
const crypto_mech_info_t *pd_mechanisms;
kcf_sched_info_t pd_sched_info;
uint_t pd_mech_list_count;
// char *pd_name;
// uint_t pd_instance;
// int pd_module_id;
// struct modctl *pd_mctlp;
kcondvar_t pd_remove_cv;
const char *pd_description;
uint_t pd_flags;
@ -230,12 +203,6 @@ typedef struct kcf_provider_desc {
kcf_prov_stats_t pd_ks_data;
} kcf_provider_desc_t;
/* useful for making a list of providers */
typedef struct kcf_provider_list {
struct kcf_provider_list *pl_next;
struct kcf_provider_desc *pl_provider;
} kcf_provider_list_t;
/* atomic operations in linux implicitly form a memory barrier */
#define membar_exit()
@ -273,14 +240,6 @@ typedef struct kcf_provider_list {
}
/* list of crypto_mech_info_t valid as the second mech in a dual operation */
typedef struct crypto_mech_info_list {
struct crypto_mech_info_list *ml_next;
crypto_mech_type_t ml_kcf_mechid; /* KCF's id */
crypto_mech_info_t ml_mech_info;
} crypto_mech_info_list_t;
/*
* An element in a mechanism provider descriptors chain.
* The kcf_prov_mech_desc_t is duplicated in every chain the provider belongs
@ -292,15 +251,9 @@ typedef struct kcf_prov_mech_desc {
struct kcf_mech_entry *pm_me; /* Back to the head */
struct kcf_prov_mech_desc *pm_next; /* Next in the chain */
crypto_mech_info_t pm_mech_info; /* Provider mech info */
crypto_mech_info_list_t *pm_mi_list; /* list for duals */
kcf_provider_desc_t *pm_prov_desc; /* Common desc. */
} kcf_prov_mech_desc_t;
/* and the notation shortcuts ... */
#define pm_provider_type pm_prov_desc.pd_provider_type
#define pm_provider_handle pm_prov_desc.pd_provider_handle
#define pm_ops_vector pm_prov_desc.pd_ops_vector
/*
* A mechanism entry in an xxx_mech_tab[]. me_pad was deemed
* to be unnecessary and removed.
@ -309,16 +262,10 @@ typedef struct kcf_mech_entry {
crypto_mech_name_t me_name; /* mechanism name */
crypto_mech_type_t me_mechid; /* Internal id for mechanism */
kmutex_t me_mutex; /* access protection */
kcf_prov_mech_desc_t *me_hw_prov_chain; /* list of HW providers */
kcf_prov_mech_desc_t *me_sw_prov; /* SW provider */
kcf_prov_mech_desc_t *me_sw_prov; /* provider */
/*
* Number of HW providers in the chain. There is only one
* SW provider. So, we need only a count of HW providers.
*/
int me_num_hwprov;
/*
* When a SW provider is present, this is the generation number that
* ensures no objects from old SW providers are used in the new one
* When a provider is present, this is the generation number that
* ensures no objects from old providers are used in the new one
*/
uint32_t me_gen_swprov;
/*
@ -327,28 +274,6 @@ typedef struct kcf_mech_entry {
size_t me_threshold;
} kcf_mech_entry_t;
/*
* A policy descriptor structure. It is allocated and initialized
* when administrative ioctls load disabled mechanisms.
*
* pd_prov_type: Provider type, hardware or software
* pd_name: Device name or module name.
* pd_instance: Device instance.
* pd_refcnt: Reference counter for this policy descriptor
* pd_mutex: Protects array and count of disabled mechanisms.
* pd_disabled_count: Count of disabled mechanisms.
* pd_disabled_mechs: Array of disabled mechanisms.
*/
typedef struct kcf_policy_desc {
crypto_provider_type_t pd_prov_type;
char *pd_name;
uint_t pd_instance;
uint_t pd_refcnt;
kmutex_t pd_mutex;
uint_t pd_disabled_count;
crypto_mech_name_t *pd_disabled_mechs;
} kcf_policy_desc_t;
/*
* If a component has a reference to a kcf_policy_desc_t,
* it REFHOLD()s. A new policy descriptor which is referenced only
@ -370,21 +295,6 @@ typedef struct kcf_policy_desc {
kcf_policy_free_desc(desc); \
}
/*
* This entry stores the name of a software module and its
* mechanisms. The mechanisms are 'hints' that are used to
* trigger loading of the module.
*/
typedef struct kcf_soft_conf_entry {
struct kcf_soft_conf_entry *ce_next;
char *ce_name;
crypto_mech_name_t *ce_mechs;
uint_t ce_count;
} kcf_soft_conf_entry_t;
extern kmutex_t soft_config_mutex;
extern kcf_soft_conf_entry_t *soft_config_list;
/*
* Global tables. The sizes are from the predefined PKCS#11 v2.20 mechanisms,
* with a margin of few extra empty entry points
@ -671,8 +581,7 @@ extern int kcf_add_mech_provider(short, kcf_provider_desc_t *,
kcf_prov_mech_desc_t **);
extern void kcf_remove_mech_provider(const char *, kcf_provider_desc_t *);
extern int kcf_get_mech_entry(crypto_mech_type_t, kcf_mech_entry_t **);
extern kcf_provider_desc_t *kcf_alloc_provider_desc(
const crypto_provider_info_t *);
extern kcf_provider_desc_t *kcf_alloc_provider_desc(void);
extern void kcf_provider_zero_refcnt(kcf_provider_desc_t *);
extern void kcf_free_provider_desc(kcf_provider_desc_t *);
extern crypto_mech_type_t crypto_mech2id_common(const char *, boolean_t);

View File

@ -59,23 +59,20 @@ typedef enum kcf_call_type {
#define CHECK_RESTRICT(crq) (crq != NULL && \
((crq)->cr_flag & CRYPTO_RESTRICTED))
#define CHECK_RESTRICT_FALSE B_FALSE
#define CHECK_FASTPATH(crq, pd) ((crq) == NULL || \
!((crq)->cr_flag & CRYPTO_ALWAYS_QUEUE)) && \
(pd)->pd_prov_type == CRYPTO_SW_PROVIDER
!((crq)->cr_flag & CRYPTO_ALWAYS_QUEUE))
#define KCF_KMFLAG(crq) (((crq) == NULL) ? KM_SLEEP : KM_NOSLEEP)
/*
* The framework keeps an internal handle to use in the adaptive
* asynchronous case. This is the case when a client has the
* CRYPTO_ALWAYS_QUEUE bit clear and a software provider is used for
* CRYPTO_ALWAYS_QUEUE bit clear and a provider is used for
* the request. The request is completed in the context of the calling
* thread and kernel memory must be allocated with KM_NOSLEEP.
*
* The framework passes a pointer to the handle in crypto_req_handle_t
* argument when it calls the SPI of the software provider. The macros
* argument when it calls the SPI of the provider. The macros
* KCF_RHNDL() and KCF_SWFP_RHNDL() are used to do this.
*
* When a provider asks the framework for kmflag value via
@ -146,7 +143,7 @@ typedef struct kcf_sreq_node {
/*
* Node structure for asynchronous requests. A node can be on
* on a chain of requests hanging of the internal context
* structure and can be in the global software provider queue.
* structure and can be in the global provider queue.
*/
typedef struct kcf_areq_node {
/* Should always be the first field in this structure */
@ -176,11 +173,7 @@ typedef struct kcf_areq_node {
kcondvar_t an_turn_cv;
boolean_t an_is_my_turn;
/*
* Next and previous nodes in the global software
* queue. These fields are NULL for a hardware
* provider since we use a taskq there.
*/
/* Next and previous nodes in the global queue. */
struct kcf_areq_node *an_next;
struct kcf_areq_node *an_prev;
@ -244,8 +237,8 @@ typedef struct kcf_reqid_table {
} kcf_reqid_table_t;
/*
* Global software provider queue structure. Requests to be
* handled by a SW provider and have the ALWAYS_QUEUE flag set
* Global provider queue structure. Requests to be
* handled by a provider and have the ALWAYS_QUEUE flag set
* get queued here.
*/
typedef struct kcf_global_swq {
@ -339,11 +332,11 @@ typedef struct kcf_ctx_template {
uint_t ct_generation; /* generation # */
size_t ct_size; /* for freeing */
crypto_spi_ctx_template_t ct_prov_tmpl; /* context template */
/* from the SW prov */
/* from the provider */
} kcf_ctx_template_t;
/*
* Structure for pool of threads working on global software queue.
* Structure for pool of threads working on the global queue.
*/
typedef struct kcf_pool {
uint32_t kp_threads; /* Number of threads in pool */
@ -431,19 +424,12 @@ typedef struct kcf_ntfy_elem {
* The following values are based on the assumption that it would
* take around eight cpus to load a hardware provider (This is true for
* at least one product) and a kernel client may come from different
* low-priority interrupt levels. We will have CRYPTO_TASKQ_MIN number
* of cached taskq entries. The CRYPTO_TASKQ_MAX number is based on
* low-priority interrupt levels. The CRYPTO_TASKQ_MAX number is based on
* a throughput of 1GB/s using 512-byte buffers. These are just
* reasonable estimates and might need to change in future.
*/
#define CRYPTO_TASKQ_THREADS 8
#define CRYPTO_TASKQ_MIN 64
#define CRYPTO_TASKQ_MAX 2 * 1024 * 1024
extern const int crypto_taskq_threads;
extern const int crypto_taskq_minalloc;
extern const int crypto_taskq_maxalloc;
/*
* All pending crypto bufcalls are put on a list. cbuf_list_lock
* protects changes to this list.
@ -458,19 +444,12 @@ extern kcondvar_t cbuf_list_cv;
extern kmutex_t ntfy_list_lock;
extern kcondvar_t ntfy_list_cv;
boolean_t kcf_get_next_logical_provider_member(kcf_provider_desc_t *,
kcf_provider_desc_t *, kcf_provider_desc_t **);
extern int kcf_get_hardware_provider(crypto_mech_type_t, crypto_mech_type_t,
boolean_t, kcf_provider_desc_t *, kcf_provider_desc_t **,
crypto_func_group_t);
extern int kcf_get_hardware_provider_nomech(offset_t, offset_t,
boolean_t, kcf_provider_desc_t *, kcf_provider_desc_t **);
extern void kcf_free_triedlist(kcf_prov_tried_t *);
extern kcf_prov_tried_t *kcf_insert_triedlist(kcf_prov_tried_t **,
kcf_provider_desc_t *, int);
extern kcf_provider_desc_t *kcf_get_mech_provider(crypto_mech_type_t,
kcf_mech_entry_t **, int *, kcf_prov_tried_t *, crypto_func_group_t,
boolean_t, size_t);
boolean_t);
extern crypto_ctx_t *kcf_new_ctx(crypto_call_req_t *, kcf_provider_desc_t *,
crypto_session_id_t);
extern int kcf_submit_request(kcf_provider_desc_t *, crypto_ctx_t *,

View File

@ -43,10 +43,6 @@ extern "C" {
#define __no_const
#endif /* CONSTIFY_PLUGIN */
#define CRYPTO_SPI_VERSION_1 1
#define CRYPTO_SPI_VERSION_2 2
#define CRYPTO_SPI_VERSION_3 3
/*
* Provider-private handle. This handle is specified by a provider
* when it registers by means of the pi_provider_handle field of
@ -56,18 +52,16 @@ extern "C" {
typedef void *crypto_provider_handle_t;
/*
* Context templates can be used to by software providers to pre-process
* Context templates can be used to by providers to pre-process
* keying material, such as key schedules. They are allocated by
* a software provider create_ctx_template(9E) entry point, and passed
* a provider create_ctx_template(9E) entry point, and passed
* as argument to initialization and atomic provider entry points.
*/
typedef void *crypto_spi_ctx_template_t;
/*
* Request handles are used by the kernel to identify an asynchronous
* request being processed by a provider. It is passed by the kernel
* to a hardware provider when submitting a request, and must be
* specified by a provider when calling crypto_op_notification(9F)
* request being processed by a provider.
*/
typedef void *crypto_req_handle_t;
@ -268,18 +262,13 @@ typedef uint_t crypto_kcf_provider_handle_t;
*/
typedef struct crypto_provider_info {
const char *pi_provider_description;
crypto_provider_type_t pi_provider_type;
crypto_provider_handle_t pi_provider_handle;
const crypto_ops_t *pi_ops_vector;
uint_t pi_mech_list_count;
const crypto_mech_info_t *pi_mechanisms;
uint_t pi_logical_provider_count;
crypto_kcf_provider_handle_t *pi_logical_providers;
uint_t pi_flags;
} crypto_provider_info_t;
/* hidden providers can only be accessed via a logical provider */
#define CRYPTO_HIDE_PROVIDER 0x00000001
/*
* provider can not do multi-part digest (updates) and has a limit
* on maximum input data that it can digest.

View File

@ -150,7 +150,6 @@ static const crypto_ops_t aes_crypto_ops = {
static const crypto_provider_info_t aes_prov_info = {
"AES Software Provider",
CRYPTO_SW_PROVIDER,
NULL,
&aes_crypto_ops,
sizeof (aes_mech_info_tab) / sizeof (crypto_mech_info_t),

View File

@ -166,7 +166,6 @@ static const crypto_ops_t sha2_crypto_ops = {
static const crypto_provider_info_t sha2_prov_info = {
"SHA2 Software Provider",
CRYPTO_SW_PROVIDER,
NULL,
&sha2_crypto_ops,
sizeof (sha2_mech_info_tab) / sizeof (crypto_mech_info_t),

View File

@ -104,7 +104,6 @@ static const crypto_ops_t skein_crypto_ops = {
static const crypto_provider_info_t skein_prov_info = {
"Skein Software Provider",
CRYPTO_SW_PROVIDER,
NULL,
&skein_crypto_ops,
sizeof (skein_mech_info_tab) / sizeof (crypto_mech_info_t),

View File

@ -36,16 +36,6 @@
#include <sys/crypto/sched_impl.h>
#include <sys/crypto/spi.h>
/*
* minalloc and maxalloc values to be used for taskq_create().
*/
const int crypto_taskq_threads = CRYPTO_TASKQ_THREADS;
const int crypto_taskq_minalloc = CRYPTO_TASKQ_MIN;
const int crypto_taskq_maxalloc = CRYPTO_TASKQ_MAX;
static void remove_provider(kcf_provider_desc_t *);
static void process_logical_providers(const crypto_provider_info_t *,
kcf_provider_desc_t *);
static int init_prov_mechs(const crypto_provider_info_t *,
kcf_provider_desc_t *);
static int kcf_prov_kstat_update(kstat_t *, int);
@ -63,35 +53,22 @@ static const kcf_prov_stats_t kcf_stats_ks_data_template = {
* Providers pass a crypto_provider_info structure to crypto_register_provider()
* and get back a handle. The crypto_provider_info structure contains a
* list of mechanisms supported by the provider and an ops vector containing
* provider entry points. Hardware providers call this routine in their attach
* routines. Software providers call this routine in their _init() routine.
* provider entry points. Providers call this routine in their _init() routine.
*/
int
crypto_register_provider(const crypto_provider_info_t *info,
crypto_kcf_provider_handle_t *handle)
{
char *ks_name;
kcf_provider_desc_t *prov_desc = NULL;
int ret = CRYPTO_ARGUMENTS_BAD;
/*
* Check provider type, must be software, hardware, or logical.
*/
if (info->pi_provider_type != CRYPTO_HW_PROVIDER &&
info->pi_provider_type != CRYPTO_SW_PROVIDER &&
info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER)
return (CRYPTO_ARGUMENTS_BAD);
/*
* Allocate and initialize a new provider descriptor. We also
* hold it and release it when done.
*/
prov_desc = kcf_alloc_provider_desc(info);
prov_desc = kcf_alloc_provider_desc();
KCF_PROV_REFHOLD(prov_desc);
prov_desc->pd_prov_type = info->pi_provider_type;
/* provider-private handle, opaque to KCF */
prov_desc->pd_prov_handle = info->pi_provider_handle;
@ -99,10 +76,8 @@ crypto_register_provider(const crypto_provider_info_t *info,
prov_desc->pd_description = info->pi_provider_description;
/* Change from Illumos: the ops vector is persistent. */
if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) {
prov_desc->pd_ops_vector = info->pi_ops_vector;
prov_desc->pd_flags = info->pi_flags;
}
prov_desc->pd_ops_vector = info->pi_ops_vector;
prov_desc->pd_flags = info->pi_flags;
/* process the mechanisms supported by the provider */
if ((ret = init_prov_mechs(info, prov_desc)) != CRYPTO_SUCCESS)
@ -118,56 +93,33 @@ crypto_register_provider(const crypto_provider_info_t *info,
}
/*
* We create a taskq only for a hardware provider. The global
* software queue is used for software providers. We handle ordering
* The global queue is used for providers. We handle ordering
* of multi-part requests in the taskq routine. So, it is safe to
* have multiple threads for the taskq. We pass TASKQ_PREPOPULATE flag
* to keep some entries cached to improve performance.
*/
if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
prov_desc->pd_sched_info.ks_taskq = taskq_create("kcf_taskq",
CRYPTO_TASKQ_THREADS, minclsyspri,
CRYPTO_TASKQ_MIN, CRYPTO_TASKQ_MAX,
TASKQ_PREPOPULATE);
else
prov_desc->pd_sched_info.ks_taskq = NULL;
if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
/*
* Create the kstat for this provider. There is a kstat
* installed for each successfully registered provider.
* This kstat is deleted, when the provider unregisters.
*/
if (prov_desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
ks_name = kmem_asprintf("%s_%s",
"NONAME", "provider_stats");
} else {
ks_name = kmem_asprintf("%s_%d_%u_%s",
"NONAME", 0, prov_desc->pd_prov_id,
"provider_stats");
}
/*
* Create the kstat for this provider. There is a kstat
* installed for each successfully registered provider.
* This kstat is deleted, when the provider unregisters.
*/
prov_desc->pd_kstat = kstat_create("kcf", 0, "NONAME_provider_stats",
"crypto", KSTAT_TYPE_NAMED, sizeof (kcf_prov_stats_t) /
sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
prov_desc->pd_kstat = kstat_create("kcf", 0, ks_name, "crypto",
KSTAT_TYPE_NAMED, sizeof (kcf_prov_stats_t) /
sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
if (prov_desc->pd_kstat != NULL) {
bcopy(&kcf_stats_ks_data_template,
&prov_desc->pd_ks_data,
sizeof (kcf_stats_ks_data_template));
prov_desc->pd_kstat->ks_data = &prov_desc->pd_ks_data;
KCF_PROV_REFHOLD(prov_desc);
KCF_PROV_IREFHOLD(prov_desc);
prov_desc->pd_kstat->ks_private = prov_desc;
prov_desc->pd_kstat->ks_update = kcf_prov_kstat_update;
kstat_install(prov_desc->pd_kstat);
}
kmem_strfree(ks_name);
if (prov_desc->pd_kstat != NULL) {
bcopy(&kcf_stats_ks_data_template,
&prov_desc->pd_ks_data,
sizeof (kcf_stats_ks_data_template));
prov_desc->pd_kstat->ks_data = &prov_desc->pd_ks_data;
KCF_PROV_REFHOLD(prov_desc);
KCF_PROV_IREFHOLD(prov_desc);
prov_desc->pd_kstat->ks_private = prov_desc;
prov_desc->pd_kstat->ks_update = kcf_prov_kstat_update;
kstat_install(prov_desc->pd_kstat);
}
if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
process_logical_providers(info, prov_desc);
mutex_enter(&prov_desc->pd_lock);
prov_desc->pd_state = KCF_PROV_READY;
mutex_exit(&prov_desc->pd_lock);
@ -183,8 +135,7 @@ bail:
/*
* This routine is used to notify the framework when a provider is being
* removed. Hardware providers call this routine in their detach routines.
* Software providers call this routine in their _fini() routine.
* removed. Providers call this routine in their _fini() routine.
*/
int
crypto_unregister_provider(crypto_kcf_provider_handle_t handle)
@ -212,46 +163,30 @@ crypto_unregister_provider(crypto_kcf_provider_handle_t handle)
saved_state = desc->pd_state;
desc->pd_state = KCF_PROV_REMOVED;
if (saved_state == KCF_PROV_BUSY) {
/*
* Check if this provider is currently being used.
* pd_irefcnt is the number of holds from the internal
* structures. We add one to account for the above lookup.
*/
if (desc->pd_refcnt > desc->pd_irefcnt + 1) {
desc->pd_state = saved_state;
mutex_exit(&desc->pd_lock);
/* Release reference held by kcf_prov_tab_lookup(). */
KCF_PROV_REFRELE(desc);
/*
* The per-provider taskq threads may be waiting. We
* signal them so that they can start failing requests.
* The administrator will presumably stop the clients,
* thus removing the holds, when they get the busy
* return value. Any retry will succeed then.
*/
cv_broadcast(&desc->pd_resume_cv);
}
if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
/*
* Check if this provider is currently being used.
* pd_irefcnt is the number of holds from the internal
* structures. We add one to account for the above lookup.
*/
if (desc->pd_refcnt > desc->pd_irefcnt + 1) {
desc->pd_state = saved_state;
mutex_exit(&desc->pd_lock);
/* Release reference held by kcf_prov_tab_lookup(). */
KCF_PROV_REFRELE(desc);
/*
* The administrator presumably will stop the clients
* thus removing the holds, when they get the busy
* return value. Any retry will succeed then.
*/
return (CRYPTO_BUSY);
}
return (CRYPTO_BUSY);
}
mutex_exit(&desc->pd_lock);
if (desc->pd_prov_type != CRYPTO_SW_PROVIDER) {
remove_provider(desc);
}
if (desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
/* remove the provider from the mechanisms tables */
for (mech_idx = 0; mech_idx < desc->pd_mech_list_count;
mech_idx++) {
kcf_remove_mech_provider(
desc->pd_mechanisms[mech_idx].cm_mech_name, desc);
}
/* remove the provider from the mechanisms tables */
for (mech_idx = 0; mech_idx < desc->pd_mech_list_count;
mech_idx++) {
kcf_remove_mech_provider(
desc->pd_mechanisms[mech_idx].cm_mech_name, desc);
}
/* remove provider from providers table */
@ -264,51 +199,34 @@ crypto_unregister_provider(crypto_kcf_provider_handle_t handle)
delete_kstat(desc);
if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
/* Release reference held by kcf_prov_tab_lookup(). */
KCF_PROV_REFRELE(desc);
/* Release reference held by kcf_prov_tab_lookup(). */
KCF_PROV_REFRELE(desc);
/*
* Wait till the existing requests complete.
*/
mutex_enter(&desc->pd_lock);
while (desc->pd_state != KCF_PROV_FREED)
cv_wait(&desc->pd_remove_cv, &desc->pd_lock);
mutex_exit(&desc->pd_lock);
} else {
/*
* Wait until requests that have been sent to the provider
* complete.
*/
mutex_enter(&desc->pd_lock);
while (desc->pd_irefcnt > 0)
cv_wait(&desc->pd_remove_cv, &desc->pd_lock);
mutex_exit(&desc->pd_lock);
}
/*
* Wait till the existing requests complete.
*/
mutex_enter(&desc->pd_lock);
while (desc->pd_state != KCF_PROV_FREED)
cv_wait(&desc->pd_remove_cv, &desc->pd_lock);
mutex_exit(&desc->pd_lock);
kcf_do_notify(desc, B_FALSE);
if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
/*
* This is the only place where kcf_free_provider_desc()
* is called directly. KCF_PROV_REFRELE() should free the
* structure in all other places.
*/
ASSERT(desc->pd_state == KCF_PROV_FREED &&
desc->pd_refcnt == 0);
kcf_free_provider_desc(desc);
} else {
KCF_PROV_REFRELE(desc);
}
/*
* This is the only place where kcf_free_provider_desc()
* is called directly. KCF_PROV_REFRELE() should free the
* structure in all other places.
*/
ASSERT(desc->pd_state == KCF_PROV_FREED &&
desc->pd_refcnt == 0);
kcf_free_provider_desc(desc);
return (CRYPTO_SUCCESS);
}
/*
* This routine is used by software providers to determine
* This routine is used by providers to determine
* whether to use KM_SLEEP or KM_NOSLEEP during memory allocation.
* Note that hardware providers can always use KM_SLEEP. So,
* they do not need to call this routine.
*
* This routine can be called from user or interrupt context.
*/
@ -323,9 +241,6 @@ crypto_kmflag(crypto_req_handle_t handle)
* during registration. A NULL crypto_provider_info_t indicates
* an already initialized provider descriptor.
*
* Mechanisms are not added to the kernel's mechanism table if the
* provider is a logical provider.
*
* Returns CRYPTO_SUCCESS on success, CRYPTO_ARGUMENTS if one
* of the specified mechanisms was malformed, or CRYPTO_HOST_MEMORY
* if the table of mechanisms is full.
@ -339,15 +254,6 @@ init_prov_mechs(const crypto_provider_info_t *info, kcf_provider_desc_t *desc)
kcf_prov_mech_desc_t *pmd;
int desc_use_count = 0;
if (desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
if (info != NULL) {
ASSERT(info->pi_mechanisms != NULL);
desc->pd_mech_list_count = info->pi_mech_list_count;
desc->pd_mechanisms = info->pi_mechanisms;
}
return (CRYPTO_SUCCESS);
}
/*
* Copy the mechanism list from the provider info to the provider
* descriptor. desc->pd_mechanisms has an extra crypto_mech_info_t
@ -403,12 +309,12 @@ init_prov_mechs(const crypto_provider_info_t *info, kcf_provider_desc_t *desc)
}
/*
* Don't allow multiple software providers with disabled mechanisms
* Don't allow multiple providers with disabled mechanisms
* to register. Subsequent enabling of mechanisms will result in
* an unsupported configuration, i.e. multiple software providers
* an unsupported configuration, i.e. multiple providers
* per mechanism.
*/
if (desc_use_count == 0 && desc->pd_prov_type == CRYPTO_SW_PROVIDER)
if (desc_use_count == 0)
return (CRYPTO_ARGUMENTS_BAD);
if (err == KCF_SUCCESS)
@ -479,117 +385,6 @@ undo_register_provider(kcf_provider_desc_t *desc, boolean_t remove_prov)
(void) kcf_prov_tab_rem_provider(desc->pd_prov_id);
}
/*
* Add provider (p1) to another provider's array of providers (p2).
* Hardware and logical providers use this array to cross-reference
* each other.
*/
static void
add_provider_to_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2)
{
kcf_provider_list_t *new;
new = kmem_alloc(sizeof (kcf_provider_list_t), KM_SLEEP);
mutex_enter(&p2->pd_lock);
new->pl_next = p2->pd_provider_list;
p2->pd_provider_list = new;
KCF_PROV_IREFHOLD(p1);
new->pl_provider = p1;
mutex_exit(&p2->pd_lock);
}
/*
* Remove provider (p1) from another provider's array of providers (p2).
* Hardware and logical providers use this array to cross-reference
* each other.
*/
static void
remove_provider_from_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2)
{
kcf_provider_list_t *pl = NULL, **prev;
mutex_enter(&p2->pd_lock);
for (pl = p2->pd_provider_list, prev = &p2->pd_provider_list;
pl != NULL; prev = &pl->pl_next, pl = pl->pl_next) {
if (pl->pl_provider == p1) {
break;
}
}
if (p1 == NULL) {
mutex_exit(&p2->pd_lock);
return;
}
/* detach and free kcf_provider_list structure */
KCF_PROV_IREFRELE(p1);
*prev = pl->pl_next;
kmem_free(pl, sizeof (*pl));
mutex_exit(&p2->pd_lock);
}
/*
* Convert an array of logical provider handles (crypto_provider_id)
* stored in a crypto_provider_info structure into an array of provider
* descriptors (kcf_provider_desc_t) attached to a logical provider.
*/
static void
process_logical_providers(const crypto_provider_info_t *info,
kcf_provider_desc_t *hp)
{
kcf_provider_desc_t *lp;
crypto_provider_id_t handle;
int count = info->pi_logical_provider_count;
int i;
/* add hardware provider to each logical provider */
for (i = 0; i < count; i++) {
handle = info->pi_logical_providers[i];
lp = kcf_prov_tab_lookup((crypto_provider_id_t)handle);
if (lp == NULL) {
continue;
}
add_provider_to_array(hp, lp);
hp->pd_flags |= KCF_LPROV_MEMBER;
/*
* A hardware provider has to have the provider descriptor of
* every logical provider it belongs to, so it can be removed
* from the logical provider if the hardware provider
* unregisters from the framework.
*/
add_provider_to_array(lp, hp);
KCF_PROV_REFRELE(lp);
}
}
/*
* This routine removes a provider from all of the logical or
* hardware providers it belongs to, and frees the provider's
* array of pointers to providers.
*/
static void
remove_provider(kcf_provider_desc_t *pp)
{
kcf_provider_desc_t *p;
kcf_provider_list_t *e, *next;
mutex_enter(&pp->pd_lock);
for (e = pp->pd_provider_list; e != NULL; e = next) {
p = e->pl_provider;
remove_provider_from_array(pp, p);
if (p->pd_prov_type == CRYPTO_HW_PROVIDER &&
p->pd_provider_list == NULL)
p->pd_flags &= ~KCF_LPROV_MEMBER;
KCF_PROV_IREFRELE(p);
next = e->pl_next;
kmem_free(e, sizeof (*e));
}
pp->pd_provider_list = NULL;
mutex_exit(&pp->pd_lock);
}
/*
* Dispatch events as needed for a provider. is_added flag tells
* whether the provider is registering or unregistering.
@ -600,36 +395,19 @@ kcf_do_notify(kcf_provider_desc_t *prov_desc, boolean_t is_added)
int i;
crypto_notify_event_change_t ec;
ASSERT(prov_desc->pd_state > KCF_PROV_VERIFICATION_FAILED);
ASSERT(prov_desc->pd_state > KCF_PROV_ALLOCATED);
/*
* Inform interested clients of the mechanisms becoming
* available/unavailable. We skip this for logical providers
* as they do not affect mechanisms.
* available/unavailable.
*/
if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
ec.ec_provider_type = prov_desc->pd_prov_type;
ec.ec_change = is_added ? CRYPTO_MECH_ADDED :
CRYPTO_MECH_REMOVED;
for (i = 0; i < prov_desc->pd_mech_list_count; i++) {
(void) strlcpy(ec.ec_mech_name,
prov_desc->pd_mechanisms[i].cm_mech_name,
CRYPTO_MAX_MECH_NAME);
kcf_walk_ntfylist(CRYPTO_EVENT_MECHS_CHANGED, &ec);
}
}
/*
* Inform interested clients about the new or departing provider.
* In case of a logical provider, we need to notify the event only
* for the logical provider and not for the underlying
* providers which are known by the KCF_LPROV_MEMBER bit.
*/
if (prov_desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER ||
(prov_desc->pd_flags & KCF_LPROV_MEMBER) == 0) {
kcf_walk_ntfylist(is_added ? CRYPTO_EVENT_PROVIDER_REGISTERED :
CRYPTO_EVENT_PROVIDER_UNREGISTERED, prov_desc);
ec.ec_change = is_added ? CRYPTO_MECH_ADDED :
CRYPTO_MECH_REMOVED;
for (i = 0; i < prov_desc->pd_mech_list_count; i++) {
(void) strlcpy(ec.ec_mech_name,
prov_desc->pd_mechanisms[i].cm_mech_name,
CRYPTO_MAX_MECH_NAME);
kcf_walk_ntfylist(CRYPTO_EVENT_MECHS_CHANGED, &ec);
}
}

View File

@ -1229,13 +1229,6 @@ taskq_destroy(taskq_t *tq)
}
EXPORT_SYMBOL(taskq_destroy);
boolean_t
taskq_empty(taskq_t *tq)
{
return (tq->tq_lowest_id == tq->tq_next_id);
}
EXPORT_SYMBOL(taskq_empty);
static unsigned int spl_taskq_kick = 0;
/*