2022-05-25 20:25:13 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
|
|
|
|
#include <linux/net.h>
|
|
|
|
#include <linux/uio.h>
|
|
|
|
|
2022-07-08 04:30:09 +08:00
|
|
|
#include "alloc_cache.h"
|
|
|
|
|
2022-05-25 20:25:13 +08:00
|
|
|
#if defined(CONFIG_NET)
|
|
|
|
struct io_async_msghdr {
|
2022-07-08 04:30:09 +08:00
|
|
|
union {
|
|
|
|
struct iovec fast_iov[UIO_FASTIOV];
|
2022-07-14 19:02:58 +08:00
|
|
|
struct {
|
|
|
|
struct iovec fast_iov_one;
|
|
|
|
__kernel_size_t controllen;
|
|
|
|
int namelen;
|
|
|
|
__kernel_size_t payloadlen;
|
|
|
|
};
|
2022-07-08 04:30:09 +08:00
|
|
|
struct io_cache_entry cache;
|
|
|
|
};
|
2022-05-25 20:25:13 +08:00
|
|
|
/* points to an allocated iov, if NULL we use fast_iov instead */
|
|
|
|
struct iovec *free_iov;
|
|
|
|
struct sockaddr __user *uaddr;
|
|
|
|
struct msghdr msg;
|
|
|
|
struct sockaddr_storage addr;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct io_async_connect {
|
|
|
|
struct sockaddr_storage address;
|
|
|
|
};
|
|
|
|
|
|
|
|
int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
|
|
int io_shutdown(struct io_kiocb *req, unsigned int issue_flags);
|
|
|
|
|
|
|
|
int io_sendmsg_prep_async(struct io_kiocb *req);
|
|
|
|
void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req);
|
|
|
|
int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
|
|
int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags);
|
2022-09-21 19:17:51 +08:00
|
|
|
|
2022-05-25 20:25:13 +08:00
|
|
|
int io_send(struct io_kiocb *req, unsigned int issue_flags);
|
2022-09-21 19:17:51 +08:00
|
|
|
int io_send_prep_async(struct io_kiocb *req);
|
2022-05-25 20:25:13 +08:00
|
|
|
|
|
|
|
int io_recvmsg_prep_async(struct io_kiocb *req);
|
|
|
|
int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
|
|
int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags);
|
|
|
|
int io_recv(struct io_kiocb *req, unsigned int issue_flags);
|
|
|
|
|
2022-09-21 19:17:48 +08:00
|
|
|
void io_sendrecv_fail(struct io_kiocb *req);
|
|
|
|
|
2022-05-25 20:25:13 +08:00
|
|
|
int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
|
|
int io_accept(struct io_kiocb *req, unsigned int issue_flags);
|
|
|
|
|
|
|
|
int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
|
|
int io_socket(struct io_kiocb *req, unsigned int issue_flags);
|
|
|
|
|
|
|
|
int io_connect_prep_async(struct io_kiocb *req);
|
|
|
|
int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
|
|
int io_connect(struct io_kiocb *req, unsigned int issue_flags);
|
2022-07-08 04:30:09 +08:00
|
|
|
|
2022-07-13 04:52:43 +08:00
|
|
|
int io_sendzc(struct io_kiocb *req, unsigned int issue_flags);
|
|
|
|
int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
2022-09-01 18:54:04 +08:00
|
|
|
void io_sendzc_cleanup(struct io_kiocb *req);
|
2022-09-21 19:17:49 +08:00
|
|
|
void io_send_zc_fail(struct io_kiocb *req);
|
2022-07-13 04:52:43 +08:00
|
|
|
|
2022-07-08 04:30:09 +08:00
|
|
|
void io_netmsg_cache_free(struct io_cache_entry *entry);
|
|
|
|
#else
|
|
|
|
static inline void io_netmsg_cache_free(struct io_cache_entry *entry)
|
|
|
|
{
|
|
|
|
}
|
2022-05-25 20:25:13 +08:00
|
|
|
#endif
|