tls: extract context alloc/initialization out of tls_set_sw_offload

[ Upstream commit 615580cbc99af0da2d1c7226fab43a3d5003eb97 ]

Simplify tls_set_sw_offload a bit.

Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Stable-dep-of: aec7961916f3 ("tls: fix race between async notify and socket close")
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Sabrina Dubroca 2023-10-09 22:50:46 +02:00 committed by Greg Kroah-Hartman
parent 48fae67d83
commit 5e01c54ebc
1 changed files with 51 additions and 35 deletions

View File

@ -2597,6 +2597,48 @@ void tls_update_rx_zc_capable(struct tls_context *tls_ctx)
tls_ctx->prot_info.version != TLS_1_3_VERSION;
}
static struct tls_sw_context_tx *init_ctx_tx(struct tls_context *ctx, struct sock *sk)
{
struct tls_sw_context_tx *sw_ctx_tx;
if (!ctx->priv_ctx_tx) {
sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
if (!sw_ctx_tx)
return NULL;
} else {
sw_ctx_tx = ctx->priv_ctx_tx;
}
crypto_init_wait(&sw_ctx_tx->async_wait);
spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
sw_ctx_tx->tx_work.sk = sk;
return sw_ctx_tx;
}
static struct tls_sw_context_rx *init_ctx_rx(struct tls_context *ctx)
{
struct tls_sw_context_rx *sw_ctx_rx;
if (!ctx->priv_ctx_rx) {
sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
if (!sw_ctx_rx)
return NULL;
} else {
sw_ctx_rx = ctx->priv_ctx_rx;
}
crypto_init_wait(&sw_ctx_rx->async_wait);
spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
init_waitqueue_head(&sw_ctx_rx->wq);
skb_queue_head_init(&sw_ctx_rx->rx_list);
skb_queue_head_init(&sw_ctx_rx->async_hold);
return sw_ctx_rx;
}
int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
@ -2618,48 +2660,22 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
}
if (tx) {
if (!ctx->priv_ctx_tx) {
sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
if (!sw_ctx_tx) {
rc = -ENOMEM;
goto out;
}
ctx->priv_ctx_tx = sw_ctx_tx;
} else {
sw_ctx_tx =
(struct tls_sw_context_tx *)ctx->priv_ctx_tx;
}
} else {
if (!ctx->priv_ctx_rx) {
sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
if (!sw_ctx_rx) {
rc = -ENOMEM;
goto out;
}
ctx->priv_ctx_rx = sw_ctx_rx;
} else {
sw_ctx_rx =
(struct tls_sw_context_rx *)ctx->priv_ctx_rx;
}
}
ctx->priv_ctx_tx = init_ctx_tx(ctx, sk);
if (!ctx->priv_ctx_tx)
return -ENOMEM;
if (tx) {
crypto_init_wait(&sw_ctx_tx->async_wait);
spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
sw_ctx_tx = ctx->priv_ctx_tx;
crypto_info = &ctx->crypto_send.info;
cctx = &ctx->tx;
aead = &sw_ctx_tx->aead_send;
INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
sw_ctx_tx->tx_work.sk = sk;
} else {
crypto_init_wait(&sw_ctx_rx->async_wait);
spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
init_waitqueue_head(&sw_ctx_rx->wq);
ctx->priv_ctx_rx = init_ctx_rx(ctx);
if (!ctx->priv_ctx_rx)
return -ENOMEM;
sw_ctx_rx = ctx->priv_ctx_rx;
crypto_info = &ctx->crypto_recv.info;
cctx = &ctx->rx;
skb_queue_head_init(&sw_ctx_rx->rx_list);
skb_queue_head_init(&sw_ctx_rx->async_hold);
aead = &sw_ctx_rx->aead_recv;
}