skmsg: convert struct sk_msg_sg::copy to a bitmap
We have plans for increasing MAX_SKB_FRAGS, but sk_msg_sg::copy is currently an unsigned long, limiting MAX_SKB_FRAGS to 30 on 32bit arches. Convert it to a bitmap, as Jakub suggested. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
d2692eee05
commit
5a8fb33e53
|
@ -29,7 +29,7 @@ struct sk_msg_sg {
|
||||||
u32 end;
|
u32 end;
|
||||||
u32 size;
|
u32 size;
|
||||||
u32 copybreak;
|
u32 copybreak;
|
||||||
unsigned long copy;
|
DECLARE_BITMAP(copy, MAX_MSG_FRAGS + 2);
|
||||||
/* The extra two elements:
|
/* The extra two elements:
|
||||||
* 1) used for chaining the front and sections when the list becomes
|
* 1) used for chaining the front and sections when the list becomes
|
||||||
* partitioned (e.g. end < start). The crypto APIs require the
|
* partitioned (e.g. end < start). The crypto APIs require the
|
||||||
|
@ -38,7 +38,6 @@ struct sk_msg_sg {
|
||||||
*/
|
*/
|
||||||
struct scatterlist data[MAX_MSG_FRAGS + 2];
|
struct scatterlist data[MAX_MSG_FRAGS + 2];
|
||||||
};
|
};
|
||||||
static_assert(BITS_PER_LONG >= NR_MSG_FRAG_IDS);
|
|
||||||
|
|
||||||
/* UAPI in filter.c depends on struct sk_msg_sg being first element. */
|
/* UAPI in filter.c depends on struct sk_msg_sg being first element. */
|
||||||
struct sk_msg {
|
struct sk_msg {
|
||||||
|
@ -234,7 +233,7 @@ static inline void sk_msg_compute_data_pointers(struct sk_msg *msg)
|
||||||
{
|
{
|
||||||
struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start);
|
struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start);
|
||||||
|
|
||||||
if (test_bit(msg->sg.start, &msg->sg.copy)) {
|
if (test_bit(msg->sg.start, msg->sg.copy)) {
|
||||||
msg->data = NULL;
|
msg->data = NULL;
|
||||||
msg->data_end = NULL;
|
msg->data_end = NULL;
|
||||||
} else {
|
} else {
|
||||||
|
@ -253,7 +252,7 @@ static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page,
|
||||||
sg_set_page(sge, page, len, offset);
|
sg_set_page(sge, page, len, offset);
|
||||||
sg_unmark_end(sge);
|
sg_unmark_end(sge);
|
||||||
|
|
||||||
__set_bit(msg->sg.end, &msg->sg.copy);
|
__set_bit(msg->sg.end, msg->sg.copy);
|
||||||
msg->sg.size += len;
|
msg->sg.size += len;
|
||||||
sk_msg_iter_next(msg, end);
|
sk_msg_iter_next(msg, end);
|
||||||
}
|
}
|
||||||
|
@ -262,9 +261,9 @@ static inline void sk_msg_sg_copy(struct sk_msg *msg, u32 i, bool copy_state)
|
||||||
{
|
{
|
||||||
do {
|
do {
|
||||||
if (copy_state)
|
if (copy_state)
|
||||||
__set_bit(i, &msg->sg.copy);
|
__set_bit(i, msg->sg.copy);
|
||||||
else
|
else
|
||||||
__clear_bit(i, &msg->sg.copy);
|
__clear_bit(i, msg->sg.copy);
|
||||||
sk_msg_iter_var_next(i);
|
sk_msg_iter_var_next(i);
|
||||||
if (i == msg->sg.end)
|
if (i == msg->sg.end)
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -2603,7 +2603,7 @@ BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start,
|
||||||
* account for the headroom.
|
* account for the headroom.
|
||||||
*/
|
*/
|
||||||
bytes_sg_total = start - offset + bytes;
|
bytes_sg_total = start - offset + bytes;
|
||||||
if (!test_bit(i, &msg->sg.copy) && bytes_sg_total <= len)
|
if (!test_bit(i, msg->sg.copy) && bytes_sg_total <= len)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/* At this point we need to linearize multiple scatterlist
|
/* At this point we need to linearize multiple scatterlist
|
||||||
|
@ -2809,7 +2809,7 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
|
||||||
/* Place newly allocated data buffer */
|
/* Place newly allocated data buffer */
|
||||||
sk_mem_charge(msg->sk, len);
|
sk_mem_charge(msg->sk, len);
|
||||||
msg->sg.size += len;
|
msg->sg.size += len;
|
||||||
__clear_bit(new, &msg->sg.copy);
|
__clear_bit(new, msg->sg.copy);
|
||||||
sg_set_page(&msg->sg.data[new], page, len + copy, 0);
|
sg_set_page(&msg->sg.data[new], page, len + copy, 0);
|
||||||
if (rsge.length) {
|
if (rsge.length) {
|
||||||
get_page(sg_page(&rsge));
|
get_page(sg_page(&rsge));
|
||||||
|
|
Loading…
Reference in New Issue