Merge branch 'bpf-sockmap-sg-api-fixes'

Prashant Bhole says:

====================
These patches fix sg api usage in sockmap. Previously sockmap didn't
use sg_init_table(), which caused hitting BUG_ON in sg api, when
CONFIG_DEBUG_SG is enabled

v1: added sg_init_table() calls wherever needed.

v2:
- Patch1 adds new helper function in sg api. sg_init_marker()
- Patch2 sg_init_marker() and sg_init_table() in appropriate places

Backgroud:
While reviewing v1, John Fastabend raised a valid point about
unnecessary memset in sg_init_table() because sockmap uses sg table
which embedded in a struct. As enclosing struct is zeroed out, there
is unnecessary memset in sg_init_table.

So Daniel Borkmann suggested to define another static inline function
in scatterlist.h which only initializes sg_magic. Also this function
will be called from sg_init_table. From this suggestion I defined a
function sg_init_marker() which sets sg_magic and calls sg_mark_end()
====================

Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
This commit is contained in:
Daniel Borkmann 2018-03-30 22:50:16 +02:00
commit 807ae7daf5
3 changed files with 27 additions and 13 deletions

View File

@ -248,6 +248,24 @@ static inline void *sg_virt(struct scatterlist *sg)
return page_address(sg_page(sg)) + sg->offset;
}
/**
* sg_init_marker - Initialize markers in sg table
* @sgl: The SG table
* @nents: Number of entries in table
*
**/
static inline void sg_init_marker(struct scatterlist *sgl,
unsigned int nents)
{
#ifdef CONFIG_DEBUG_SG
unsigned int i;
for (i = 0; i < nents; i++)
sgl[i].sg_magic = SG_MAGIC;
#endif
sg_mark_end(&sgl[nents - 1]);
}
int sg_nents(struct scatterlist *sg);
int sg_nents_for_len(struct scatterlist *sg, u64 len);
struct scatterlist *sg_next(struct scatterlist *);

View File

@ -341,7 +341,7 @@ retry:
md->sg_start++;
if (md->sg_start == MAX_SKB_FRAGS)
md->sg_start = 0;
memset(sg, 0, sizeof(*sg));
sg_init_table(sg, 1);
if (md->sg_start == md->sg_end)
break;
@ -843,7 +843,7 @@ static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
}
sg = md.sg_data;
sg_init_table(sg, MAX_SKB_FRAGS);
sg_init_marker(sg, MAX_SKB_FRAGS);
rcu_read_unlock();
lock_sock(sk);
@ -950,10 +950,14 @@ static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
lock_sock(sk);
if (psock->cork_bytes)
if (psock->cork_bytes) {
m = psock->cork;
else
sg = &m->sg_data[m->sg_end];
} else {
m = &md;
sg = m->sg_data;
sg_init_marker(sg, MAX_SKB_FRAGS);
}
/* Catch case where ring is full and sendpage is stalled. */
if (unlikely(m->sg_end == m->sg_start &&
@ -961,7 +965,6 @@ static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
goto out_err;
psock->sg_size += size;
sg = &m->sg_data[m->sg_end];
sg_set_page(sg, page, size, offset);
get_page(page);
m->sg_copy[m->sg_end] = true;

View File

@ -132,14 +132,7 @@ EXPORT_SYMBOL(sg_last);
void sg_init_table(struct scatterlist *sgl, unsigned int nents)
{
memset(sgl, 0, sizeof(*sgl) * nents);
#ifdef CONFIG_DEBUG_SG
{
unsigned int i;
for (i = 0; i < nents; i++)
sgl[i].sg_magic = SG_MAGIC;
}
#endif
sg_mark_end(&sgl[nents - 1]);
sg_init_marker(sgl, nents);
}
EXPORT_SYMBOL(sg_init_table);