tls: Inline do_tcp_sendpages()
do_tcp_sendpages() is now just a small wrapper around tcp_sendmsg_locked(), so inline it, allowing do_tcp_sendpages() to be removed. This is part of replacing ->sendpage() with a call to sendmsg() with MSG_SPLICE_PAGES set. Signed-off-by: David Howells <dhowells@redhat.com> cc: Boris Pismenny <borisp@nvidia.com> cc: John Fastabend <john.fastabend@gmail.com> cc: Jens Axboe <axboe@kernel.dk> cc: Matthew Wilcox <willy@infradead.org> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
7f8816ab4b
commit
e117dcfd64
|
@ -258,7 +258,7 @@ struct tls_context {
|
|||
struct scatterlist *partially_sent_record;
|
||||
u16 partially_sent_offset;
|
||||
|
||||
bool in_tcp_sendpages;
|
||||
bool splicing_pages;
|
||||
bool pending_open_record_frags;
|
||||
|
||||
struct mutex tx_lock; /* protects partially_sent_* fields and
|
||||
|
|
|
@ -125,7 +125,10 @@ int tls_push_sg(struct sock *sk,
|
|||
u16 first_offset,
|
||||
int flags)
|
||||
{
|
||||
int sendpage_flags = flags | MSG_SENDPAGE_NOTLAST;
|
||||
struct bio_vec bvec;
|
||||
struct msghdr msg = {
|
||||
.msg_flags = MSG_SENDPAGE_NOTLAST | MSG_SPLICE_PAGES | flags,
|
||||
};
|
||||
int ret = 0;
|
||||
struct page *p;
|
||||
size_t size;
|
||||
|
@ -134,16 +137,19 @@ int tls_push_sg(struct sock *sk,
|
|||
size = sg->length - offset;
|
||||
offset += sg->offset;
|
||||
|
||||
ctx->in_tcp_sendpages = true;
|
||||
ctx->splicing_pages = true;
|
||||
while (1) {
|
||||
if (sg_is_last(sg))
|
||||
sendpage_flags = flags;
|
||||
msg.msg_flags = flags;
|
||||
|
||||
/* is sending application-limited? */
|
||||
tcp_rate_check_app_limited(sk);
|
||||
p = sg_page(sg);
|
||||
retry:
|
||||
ret = do_tcp_sendpages(sk, p, offset, size, sendpage_flags);
|
||||
bvec_set_page(&bvec, p, size, offset);
|
||||
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
|
||||
|
||||
ret = tcp_sendmsg_locked(sk, &msg, size);
|
||||
|
||||
if (ret != size) {
|
||||
if (ret > 0) {
|
||||
|
@ -155,7 +161,7 @@ retry:
|
|||
offset -= sg->offset;
|
||||
ctx->partially_sent_offset = offset;
|
||||
ctx->partially_sent_record = (void *)sg;
|
||||
ctx->in_tcp_sendpages = false;
|
||||
ctx->splicing_pages = false;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -169,7 +175,7 @@ retry:
|
|||
size = sg->length;
|
||||
}
|
||||
|
||||
ctx->in_tcp_sendpages = false;
|
||||
ctx->splicing_pages = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -247,11 +253,11 @@ static void tls_write_space(struct sock *sk)
|
|||
{
|
||||
struct tls_context *ctx = tls_get_ctx(sk);
|
||||
|
||||
/* If in_tcp_sendpages call lower protocol write space handler
|
||||
/* If splicing_pages call lower protocol write space handler
|
||||
* to ensure we wake up any waiting operations there. For example
|
||||
* if do_tcp_sendpages where to call sk_wait_event.
|
||||
* if splicing pages where to call sk_wait_event.
|
||||
*/
|
||||
if (ctx->in_tcp_sendpages) {
|
||||
if (ctx->splicing_pages) {
|
||||
ctx->sk_write_space(sk);
|
||||
return;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue