Merge branch 'tls-leaks'

Jakub Kicinski says:

====================
net: tls: fix memory leaks and freeing skbs

This series fixes two memory issues and a stack overflow.
First two patches are fairly simple leaks.  Third patch
partially reverts an optimization made to the strparser
which causes creation of skb->frag_list->skb->frag_list...
chains of 100s of skbs, leading to recursive kfree_skb()
filling up the kernel stack.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2019-04-10 13:07:02 -07:00
commit 44f5e04807
5 changed files with 41 additions and 22 deletions

View File

@ -307,6 +307,7 @@ int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int tls_device_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
void tls_device_sk_destruct(struct sock *sk);
void tls_device_free_resources_tx(struct sock *sk);
void tls_device_init(void);
void tls_device_cleanup(void);
int tls_tx_records(struct sock *sk, int flags);
@ -330,6 +331,7 @@ int tls_push_sg(struct sock *sk, struct tls_context *ctx,
int flags);
int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
int flags);
bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx);
static inline struct tls_msg *tls_msg(struct sk_buff *skb)
{

View File

@ -140,13 +140,11 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
/* We are going to append to the frags_list of head.
* Need to unshare the frag_list.
*/
if (skb_has_frag_list(head)) {
err = skb_unclone(head, GFP_ATOMIC);
if (err) {
STRP_STATS_INCR(strp->stats.mem_fail);
desc->error = err;
return 0;
}
err = skb_unclone(head, GFP_ATOMIC);
if (err) {
STRP_STATS_INCR(strp->stats.mem_fail);
desc->error = err;
return 0;
}
if (unlikely(skb_shinfo(head)->frag_list)) {

View File

@ -52,8 +52,11 @@ static DEFINE_SPINLOCK(tls_device_lock);
static void tls_device_free_ctx(struct tls_context *ctx)
{
if (ctx->tx_conf == TLS_HW)
if (ctx->tx_conf == TLS_HW) {
kfree(tls_offload_ctx_tx(ctx));
kfree(ctx->tx.rec_seq);
kfree(ctx->tx.iv);
}
if (ctx->rx_conf == TLS_HW)
kfree(tls_offload_ctx_rx(ctx));
@ -216,6 +219,13 @@ void tls_device_sk_destruct(struct sock *sk)
}
EXPORT_SYMBOL(tls_device_sk_destruct);
void tls_device_free_resources_tx(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
tls_free_partial_record(sk, tls_ctx);
}
static void tls_append_frag(struct tls_record_info *record,
struct page_frag *pfrag,
int size)

View File

@ -208,6 +208,26 @@ int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
return tls_push_sg(sk, ctx, sg, offset, flags);
}
bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx)
{
struct scatterlist *sg;
sg = ctx->partially_sent_record;
if (!sg)
return false;
while (1) {
put_page(sg_page(sg));
sk_mem_uncharge(sk, sg->length);
if (sg_is_last(sg))
break;
sg++;
}
ctx->partially_sent_record = NULL;
return true;
}
static void tls_write_space(struct sock *sk)
{
struct tls_context *ctx = tls_get_ctx(sk);
@ -267,6 +287,8 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
kfree(ctx->tx.rec_seq);
kfree(ctx->tx.iv);
tls_sw_free_resources_tx(sk);
} else if (ctx->tx_conf == TLS_HW) {
tls_device_free_resources_tx(sk);
}
if (ctx->rx_conf == TLS_SW) {

View File

@ -2052,20 +2052,7 @@ void tls_sw_free_resources_tx(struct sock *sk)
/* Free up un-sent records in tx_list. First, free
* the partially sent record if any at head of tx_list.
*/
if (tls_ctx->partially_sent_record) {
struct scatterlist *sg = tls_ctx->partially_sent_record;
while (1) {
put_page(sg_page(sg));
sk_mem_uncharge(sk, sg->length);
if (sg_is_last(sg))
break;
sg++;
}
tls_ctx->partially_sent_record = NULL;
if (tls_free_partial_record(sk, tls_ctx)) {
rec = list_first_entry(&ctx->tx_list,
struct tls_rec, list);
list_del(&rec->list);