net: generalize sk_alloc_sg to work with scatterlist rings
The current implementation of sk_alloc_sg expects scatterlist to always start at entry 0 and complete at entry MAX_SKB_FRAGS. Future patches will want to support starting at arbitrary offset into scatterlist so add an additional sg_start parameters and then default to the current values in TLS code paths. Signed-off-by: John Fastabend <john.fastabend@gmail.com> Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
This commit is contained in:
parent
312fc2b4c8
commit
8c05dbf04b
|
@ -2142,7 +2142,7 @@ static inline struct page_frag *sk_page_frag(struct sock *sk)
|
|||
bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
|
||||
|
||||
int sk_alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
|
||||
int *sg_num_elem, unsigned int *sg_size,
|
||||
int sg_start, int *sg_curr, unsigned int *sg_size,
|
||||
int first_coalesce);
|
||||
|
||||
/*
|
||||
|
|
|
@ -2240,19 +2240,20 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
|
|||
EXPORT_SYMBOL(sk_page_frag_refill);
|
||||
|
||||
int sk_alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
|
||||
int *sg_num_elem, unsigned int *sg_size,
|
||||
int sg_start, int *sg_curr_index, unsigned int *sg_curr_size,
|
||||
int first_coalesce)
|
||||
{
|
||||
int sg_curr = *sg_curr_index, use = 0, rc = 0;
|
||||
unsigned int size = *sg_curr_size;
|
||||
struct page_frag *pfrag;
|
||||
unsigned int size = *sg_size;
|
||||
int num_elem = *sg_num_elem, use = 0, rc = 0;
|
||||
struct scatterlist *sge;
|
||||
unsigned int orig_offset;
|
||||
|
||||
len -= size;
|
||||
pfrag = sk_page_frag(sk);
|
||||
|
||||
while (len > 0) {
|
||||
unsigned int orig_offset;
|
||||
|
||||
if (!sk_page_frag_refill(sk, pfrag)) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
|
@ -2270,17 +2271,21 @@ int sk_alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
|
|||
orig_offset = pfrag->offset;
|
||||
pfrag->offset += use;
|
||||
|
||||
sge = sg + num_elem - 1;
|
||||
if (num_elem > first_coalesce && sg_page(sg) == pfrag->page &&
|
||||
sge = sg + sg_curr - 1;
|
||||
if (sg_curr > first_coalesce && sg_page(sg) == pfrag->page &&
|
||||
sg->offset + sg->length == orig_offset) {
|
||||
sg->length += use;
|
||||
} else {
|
||||
sge++;
|
||||
sge = sg + sg_curr;
|
||||
sg_unmark_end(sge);
|
||||
sg_set_page(sge, pfrag->page, use, orig_offset);
|
||||
get_page(pfrag->page);
|
||||
++num_elem;
|
||||
if (num_elem == MAX_SKB_FRAGS) {
|
||||
sg_curr++;
|
||||
|
||||
if (sg_curr == MAX_SKB_FRAGS)
|
||||
sg_curr = 0;
|
||||
|
||||
if (sg_curr == sg_start) {
|
||||
rc = -ENOSPC;
|
||||
break;
|
||||
}
|
||||
|
@ -2289,8 +2294,8 @@ int sk_alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
|
|||
len -= use;
|
||||
}
|
||||
out:
|
||||
*sg_size = size;
|
||||
*sg_num_elem = num_elem;
|
||||
*sg_curr_size = size;
|
||||
*sg_curr_index = sg_curr;
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(sk_alloc_sg);
|
||||
|
|
|
@ -94,7 +94,7 @@ static int alloc_encrypted_sg(struct sock *sk, int len)
|
|||
int rc = 0;
|
||||
|
||||
rc = sk_alloc_sg(sk, len,
|
||||
ctx->sg_encrypted_data,
|
||||
ctx->sg_encrypted_data, 0,
|
||||
&ctx->sg_encrypted_num_elem,
|
||||
&ctx->sg_encrypted_size, 0);
|
||||
|
||||
|
@ -107,7 +107,7 @@ static int alloc_plaintext_sg(struct sock *sk, int len)
|
|||
struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
|
||||
int rc = 0;
|
||||
|
||||
rc = sk_alloc_sg(sk, len, ctx->sg_plaintext_data,
|
||||
rc = sk_alloc_sg(sk, len, ctx->sg_plaintext_data, 0,
|
||||
&ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size,
|
||||
tls_ctx->pending_open_record_frags);
|
||||
|
||||
|
|
Loading…
Reference in New Issue