io_uring-6.0-2022-08-13
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmL3+fQQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpmXyEACfERdKYdZ/W3IvPoyK8CJ3p7f/6SOj2/p1 DTuaa3l7/kVq2HcRUGgZwvgeWpOCFghdBm5co/4hGqSw7bT8rERGDelo41ohhTfr xKIiwJflK/s280VXLJFA+o7Jeoj1oTFYCmdUmU3wcKFVnQdu1rz9s0L6bwsEqq93 y1uty96dxYZn2mENLbBah0x9yV0h2ZxRkguUm0sdnKl/tMkUVLSD1TPLHf2s6eAL o3Dbmo9jv4HFXoJj8YL50Oxl22zIKBHl9hZqHdLcKesFgyFTChckKUNijWyPL2vE zesbnd57sXgY6ghi4LDGeCOtN41WNjiVeAm/c4XK5oFhTag8Q2x0D1hTPUByHksl IV/116xs6pHTeZRhNlMOBVMZGLSz95zSuRUyTONAmKgc/b3if/w3zTi1W3CnJSlx 7O5GpqQDZTQuin0jldNKImbx1aPAATb+UWDkl7O5aXkjw4FUtxT5GrYcBNswVuKX iybx8NyVn8kFD1hix3U8huBOPSg1JMkR+sFml+NqYRd4i2CwV8KAPPuzsPw6MRBL U4DfkAkpsbKqSK+mri5aUrYxmpYkJ45mgyldiewiOso9+AYg9DDp3D2iGgAiRbKm i3pz1Gh/3iUow0UAI5ZFlDhjHgWPlIH7IBbemivhjhFV4GrXJqTwUzsA1iDKTe14 3lHKkAPVPA== =FfLf -----END PGP SIGNATURE----- Merge tag 'io_uring-6.0-2022-08-13' of git://git.kernel.dk/linux-block Pull io_uring fixes from Jens Axboe: - Regression fix for this merge window, fixing a wrong order of arguments for io_req_set_res() for passthru (Dylan) - Fix for the audit code leaking context memory (Peilin) - Ensure that provided buffers are memcg accounted (Pavel) - Correctly handle short zero-copy sends (Pavel) - Sparse warning fixes for the recvmsg multishot command (Dylan) - Error handling fix for passthru (Anuj) - Remove randomization of struct kiocb fields, to avoid it growing in size if re-arranged in such a fashion that it grows more holes or padding (Keith, Linus) - Small series improving type safety of the sqe fields (Stefan) * tag 'io_uring-6.0-2022-08-13' of git://git.kernel.dk/linux-block: io_uring: add missing BUILD_BUG_ON() checks for new io_uring_sqe fields io_uring: make io_kiocb_to_cmd() typesafe fs: don't randomize struct kiocb fields io_uring: consistently make use of io_notif_to_data() io_uring: fix error handling for io_uring_cmd io_uring: fix io_recvmsg_prep_multishot sparse warnings io_uring/net: send retry for zerocopy io_uring: mem-account pbuf buckets audit, io_uring, io-wq: Fix memory leak in io_sq_thread() and io_wqe_worker() io_uring: pass correct parameters to io_req_set_res
This commit is contained in:
commit
1da8cf961b
|
@ -285,7 +285,6 @@ static inline int audit_signal_info(int sig, struct task_struct *t)
|
|||
/* These are defined in auditsc.c */
|
||||
/* Public API */
|
||||
extern int audit_alloc(struct task_struct *task);
|
||||
extern int audit_alloc_kernel(struct task_struct *task);
|
||||
extern void __audit_free(struct task_struct *task);
|
||||
extern void __audit_uring_entry(u8 op);
|
||||
extern void __audit_uring_exit(int success, long code);
|
||||
|
@ -578,10 +577,6 @@ static inline int audit_alloc(struct task_struct *task)
|
|||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int audit_alloc_kernel(struct task_struct *task)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void audit_free(struct task_struct *task)
|
||||
{ }
|
||||
static inline void audit_uring_entry(u8 op)
|
||||
|
|
|
@ -340,17 +340,12 @@ enum rw_hint {
|
|||
|
||||
struct kiocb {
|
||||
struct file *ki_filp;
|
||||
|
||||
/* The 'ki_filp' pointer is shared in a union for aio */
|
||||
randomized_struct_fields_start
|
||||
|
||||
loff_t ki_pos;
|
||||
void (*ki_complete)(struct kiocb *iocb, long ret);
|
||||
void *private;
|
||||
int ki_flags;
|
||||
u16 ki_ioprio; /* See linux/ioprio.h */
|
||||
struct wait_page_queue *ki_waitq; /* for async buffered IO */
|
||||
randomized_struct_fields_end
|
||||
};
|
||||
|
||||
static inline bool is_sync_kiocb(struct kiocb *kiocb)
|
||||
|
|
|
@ -491,7 +491,14 @@ struct io_cmd_data {
|
|||
__u8 data[56];
|
||||
};
|
||||
|
||||
#define io_kiocb_to_cmd(req) ((void *) &(req)->cmd)
|
||||
static inline void io_kiocb_cmd_sz_check(size_t cmd_sz)
|
||||
{
|
||||
BUILD_BUG_ON(cmd_sz > sizeof(struct io_cmd_data));
|
||||
}
|
||||
#define io_kiocb_to_cmd(req, cmd_type) ( \
|
||||
io_kiocb_cmd_sz_check(sizeof(cmd_type)) , \
|
||||
((cmd_type *)&(req)->cmd) \
|
||||
)
|
||||
#define cmd_to_io_kiocb(ptr) ((struct io_kiocb *) ptr)
|
||||
|
||||
struct io_kiocb {
|
||||
|
|
|
@ -31,7 +31,7 @@ struct io_madvise {
|
|||
int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
|
||||
struct io_madvise *ma = io_kiocb_to_cmd(req);
|
||||
struct io_madvise *ma = io_kiocb_to_cmd(req, struct io_madvise);
|
||||
|
||||
if (sqe->buf_index || sqe->off || sqe->splice_fd_in)
|
||||
return -EINVAL;
|
||||
|
@ -48,7 +48,7 @@ int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
|
||||
struct io_madvise *ma = io_kiocb_to_cmd(req);
|
||||
struct io_madvise *ma = io_kiocb_to_cmd(req, struct io_madvise);
|
||||
int ret;
|
||||
|
||||
if (issue_flags & IO_URING_F_NONBLOCK)
|
||||
|
@ -64,7 +64,7 @@ int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
|
|||
|
||||
int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_fadvise *fa = io_kiocb_to_cmd(req);
|
||||
struct io_fadvise *fa = io_kiocb_to_cmd(req, struct io_fadvise);
|
||||
|
||||
if (sqe->buf_index || sqe->addr || sqe->splice_fd_in)
|
||||
return -EINVAL;
|
||||
|
@ -77,7 +77,7 @@ int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
|
||||
int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_fadvise *fa = io_kiocb_to_cmd(req);
|
||||
struct io_fadvise *fa = io_kiocb_to_cmd(req, struct io_fadvise);
|
||||
int ret;
|
||||
|
||||
if (issue_flags & IO_URING_F_NONBLOCK) {
|
||||
|
|
|
@ -107,7 +107,7 @@ int io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd,
|
|||
|
||||
int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_cancel *cancel = io_kiocb_to_cmd(req);
|
||||
struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
|
||||
|
||||
if (unlikely(req->flags & REQ_F_BUFFER_SELECT))
|
||||
return -EINVAL;
|
||||
|
@ -164,7 +164,7 @@ static int __io_async_cancel(struct io_cancel_data *cd,
|
|||
|
||||
int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_cancel *cancel = io_kiocb_to_cmd(req);
|
||||
struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
|
||||
struct io_cancel_data cd = {
|
||||
.ctx = req->ctx,
|
||||
.data = cancel->addr,
|
||||
|
|
|
@ -23,7 +23,7 @@ struct io_epoll {
|
|||
|
||||
int io_epoll_ctl_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_epoll *epoll = io_kiocb_to_cmd(req);
|
||||
struct io_epoll *epoll = io_kiocb_to_cmd(req, struct io_epoll);
|
||||
|
||||
pr_warn_once("%s: epoll_ctl support in io_uring is deprecated and will "
|
||||
"be removed in a future Linux kernel version.\n",
|
||||
|
@ -49,7 +49,7 @@ int io_epoll_ctl_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
|
||||
int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_epoll *ie = io_kiocb_to_cmd(req);
|
||||
struct io_epoll *ie = io_kiocb_to_cmd(req, struct io_epoll);
|
||||
int ret;
|
||||
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ struct io_link {
|
|||
|
||||
int io_renameat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_rename *ren = io_kiocb_to_cmd(req);
|
||||
struct io_rename *ren = io_kiocb_to_cmd(req, struct io_rename);
|
||||
const char __user *oldf, *newf;
|
||||
|
||||
if (sqe->buf_index || sqe->splice_fd_in)
|
||||
|
@ -79,7 +79,7 @@ int io_renameat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
|
||||
int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_rename *ren = io_kiocb_to_cmd(req);
|
||||
struct io_rename *ren = io_kiocb_to_cmd(req, struct io_rename);
|
||||
int ret;
|
||||
|
||||
if (issue_flags & IO_URING_F_NONBLOCK)
|
||||
|
@ -95,7 +95,7 @@ int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
|
|||
|
||||
void io_renameat_cleanup(struct io_kiocb *req)
|
||||
{
|
||||
struct io_rename *ren = io_kiocb_to_cmd(req);
|
||||
struct io_rename *ren = io_kiocb_to_cmd(req, struct io_rename);
|
||||
|
||||
putname(ren->oldpath);
|
||||
putname(ren->newpath);
|
||||
|
@ -103,7 +103,7 @@ void io_renameat_cleanup(struct io_kiocb *req)
|
|||
|
||||
int io_unlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_unlink *un = io_kiocb_to_cmd(req);
|
||||
struct io_unlink *un = io_kiocb_to_cmd(req, struct io_unlink);
|
||||
const char __user *fname;
|
||||
|
||||
if (sqe->off || sqe->len || sqe->buf_index || sqe->splice_fd_in)
|
||||
|
@ -128,7 +128,7 @@ int io_unlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
|
||||
int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_unlink *un = io_kiocb_to_cmd(req);
|
||||
struct io_unlink *un = io_kiocb_to_cmd(req, struct io_unlink);
|
||||
int ret;
|
||||
|
||||
if (issue_flags & IO_URING_F_NONBLOCK)
|
||||
|
@ -146,14 +146,14 @@ int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
|
|||
|
||||
void io_unlinkat_cleanup(struct io_kiocb *req)
|
||||
{
|
||||
struct io_unlink *ul = io_kiocb_to_cmd(req);
|
||||
struct io_unlink *ul = io_kiocb_to_cmd(req, struct io_unlink);
|
||||
|
||||
putname(ul->filename);
|
||||
}
|
||||
|
||||
int io_mkdirat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_mkdir *mkd = io_kiocb_to_cmd(req);
|
||||
struct io_mkdir *mkd = io_kiocb_to_cmd(req, struct io_mkdir);
|
||||
const char __user *fname;
|
||||
|
||||
if (sqe->off || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
|
||||
|
@ -175,7 +175,7 @@ int io_mkdirat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
|
||||
int io_mkdirat(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_mkdir *mkd = io_kiocb_to_cmd(req);
|
||||
struct io_mkdir *mkd = io_kiocb_to_cmd(req, struct io_mkdir);
|
||||
int ret;
|
||||
|
||||
if (issue_flags & IO_URING_F_NONBLOCK)
|
||||
|
@ -190,14 +190,14 @@ int io_mkdirat(struct io_kiocb *req, unsigned int issue_flags)
|
|||
|
||||
void io_mkdirat_cleanup(struct io_kiocb *req)
|
||||
{
|
||||
struct io_mkdir *md = io_kiocb_to_cmd(req);
|
||||
struct io_mkdir *md = io_kiocb_to_cmd(req, struct io_mkdir);
|
||||
|
||||
putname(md->filename);
|
||||
}
|
||||
|
||||
int io_symlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_link *sl = io_kiocb_to_cmd(req);
|
||||
struct io_link *sl = io_kiocb_to_cmd(req, struct io_link);
|
||||
const char __user *oldpath, *newpath;
|
||||
|
||||
if (sqe->len || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
|
||||
|
@ -225,7 +225,7 @@ int io_symlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
|
||||
int io_symlinkat(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_link *sl = io_kiocb_to_cmd(req);
|
||||
struct io_link *sl = io_kiocb_to_cmd(req, struct io_link);
|
||||
int ret;
|
||||
|
||||
if (issue_flags & IO_URING_F_NONBLOCK)
|
||||
|
@ -240,7 +240,7 @@ int io_symlinkat(struct io_kiocb *req, unsigned int issue_flags)
|
|||
|
||||
int io_linkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_link *lnk = io_kiocb_to_cmd(req);
|
||||
struct io_link *lnk = io_kiocb_to_cmd(req, struct io_link);
|
||||
const char __user *oldf, *newf;
|
||||
|
||||
if (sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
|
||||
|
@ -270,7 +270,7 @@ int io_linkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
|
||||
int io_linkat(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_link *lnk = io_kiocb_to_cmd(req);
|
||||
struct io_link *lnk = io_kiocb_to_cmd(req, struct io_link);
|
||||
int ret;
|
||||
|
||||
if (issue_flags & IO_URING_F_NONBLOCK)
|
||||
|
@ -286,7 +286,7 @@ int io_linkat(struct io_kiocb *req, unsigned int issue_flags)
|
|||
|
||||
void io_link_cleanup(struct io_kiocb *req)
|
||||
{
|
||||
struct io_link *sl = io_kiocb_to_cmd(req);
|
||||
struct io_link *sl = io_kiocb_to_cmd(req, struct io_link);
|
||||
|
||||
putname(sl->oldpath);
|
||||
putname(sl->newpath);
|
||||
|
|
|
@ -624,8 +624,6 @@ static int io_wqe_worker(void *data)
|
|||
snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task->pid);
|
||||
set_task_comm(current, buf);
|
||||
|
||||
audit_alloc_kernel(current);
|
||||
|
||||
while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
|
||||
long ret;
|
||||
|
||||
|
@ -660,7 +658,6 @@ static int io_wqe_worker(void *data)
|
|||
if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
|
||||
io_worker_handle_work(worker);
|
||||
|
||||
audit_free(current);
|
||||
io_worker_exit(worker);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -3885,13 +3885,15 @@ out_fput:
|
|||
|
||||
static int __init io_uring_init(void)
|
||||
{
|
||||
#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
|
||||
#define __BUILD_BUG_VERIFY_OFFSET_SIZE(stype, eoffset, esize, ename) do { \
|
||||
BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
|
||||
BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
|
||||
BUILD_BUG_ON(sizeof_field(stype, ename) != esize); \
|
||||
} while (0)
|
||||
|
||||
#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
|
||||
__BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
|
||||
__BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, sizeof(etype), ename)
|
||||
#define BUILD_BUG_SQE_ELEM_SIZE(eoffset, esize, ename) \
|
||||
__BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, esize, ename)
|
||||
BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
|
||||
BUILD_BUG_SQE_ELEM(0, __u8, opcode);
|
||||
BUILD_BUG_SQE_ELEM(1, __u8, flags);
|
||||
|
@ -3899,6 +3901,8 @@ static int __init io_uring_init(void)
|
|||
BUILD_BUG_SQE_ELEM(4, __s32, fd);
|
||||
BUILD_BUG_SQE_ELEM(8, __u64, off);
|
||||
BUILD_BUG_SQE_ELEM(8, __u64, addr2);
|
||||
BUILD_BUG_SQE_ELEM(8, __u32, cmd_op);
|
||||
BUILD_BUG_SQE_ELEM(12, __u32, __pad1);
|
||||
BUILD_BUG_SQE_ELEM(16, __u64, addr);
|
||||
BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
|
||||
BUILD_BUG_SQE_ELEM(24, __u32, len);
|
||||
|
@ -3917,13 +3921,22 @@ static int __init io_uring_init(void)
|
|||
BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
|
||||
BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
|
||||
BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
|
||||
BUILD_BUG_SQE_ELEM(28, __u32, rename_flags);
|
||||
BUILD_BUG_SQE_ELEM(28, __u32, unlink_flags);
|
||||
BUILD_BUG_SQE_ELEM(28, __u32, hardlink_flags);
|
||||
BUILD_BUG_SQE_ELEM(28, __u32, xattr_flags);
|
||||
BUILD_BUG_SQE_ELEM(28, __u32, msg_ring_flags);
|
||||
BUILD_BUG_SQE_ELEM(32, __u64, user_data);
|
||||
BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
|
||||
BUILD_BUG_SQE_ELEM(40, __u16, buf_group);
|
||||
BUILD_BUG_SQE_ELEM(42, __u16, personality);
|
||||
BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
|
||||
BUILD_BUG_SQE_ELEM(44, __u32, file_index);
|
||||
BUILD_BUG_SQE_ELEM(44, __u16, notification_idx);
|
||||
BUILD_BUG_SQE_ELEM(46, __u16, addr_len);
|
||||
BUILD_BUG_SQE_ELEM(48, __u64, addr3);
|
||||
BUILD_BUG_SQE_ELEM_SIZE(48, 0, cmd);
|
||||
BUILD_BUG_SQE_ELEM(56, __u64, __pad2);
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct io_uring_files_update) !=
|
||||
sizeof(struct io_uring_rsrc_update));
|
||||
|
|
|
@ -272,7 +272,7 @@ void io_destroy_buffers(struct io_ring_ctx *ctx)
|
|||
|
||||
int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_provide_buf *p = io_kiocb_to_cmd(req);
|
||||
struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
|
||||
u64 tmp;
|
||||
|
||||
if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
|
||||
|
@ -291,7 +291,7 @@ int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
|
||||
int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_provide_buf *p = io_kiocb_to_cmd(req);
|
||||
struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
struct io_buffer_list *bl;
|
||||
int ret = 0;
|
||||
|
@ -319,7 +319,7 @@ int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
|
|||
int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
unsigned long size, tmp_check;
|
||||
struct io_provide_buf *p = io_kiocb_to_cmd(req);
|
||||
struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
|
||||
u64 tmp;
|
||||
|
||||
if (sqe->rw_flags || sqe->splice_fd_in)
|
||||
|
@ -421,7 +421,7 @@ static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
|
|||
|
||||
int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_provide_buf *p = io_kiocb_to_cmd(req);
|
||||
struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
struct io_buffer_list *bl;
|
||||
int ret = 0;
|
||||
|
@ -436,7 +436,7 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
|
|||
|
||||
bl = io_buffer_get_list(ctx, p->bgid);
|
||||
if (unlikely(!bl)) {
|
||||
bl = kzalloc(sizeof(*bl), GFP_KERNEL);
|
||||
bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
|
||||
if (!bl) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
|
|
|
@ -26,7 +26,7 @@ struct io_msg {
|
|||
static int io_msg_ring_data(struct io_kiocb *req)
|
||||
{
|
||||
struct io_ring_ctx *target_ctx = req->file->private_data;
|
||||
struct io_msg *msg = io_kiocb_to_cmd(req);
|
||||
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
|
||||
|
||||
if (msg->src_fd || msg->dst_fd || msg->flags)
|
||||
return -EINVAL;
|
||||
|
@ -76,7 +76,7 @@ static int io_double_lock_ctx(struct io_ring_ctx *ctx,
|
|||
static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_ring_ctx *target_ctx = req->file->private_data;
|
||||
struct io_msg *msg = io_kiocb_to_cmd(req);
|
||||
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
unsigned long file_ptr;
|
||||
struct file *src_file;
|
||||
|
@ -122,7 +122,7 @@ out_unlock:
|
|||
|
||||
int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_msg *msg = io_kiocb_to_cmd(req);
|
||||
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
|
||||
|
||||
if (unlikely(sqe->buf_index || sqe->personality))
|
||||
return -EINVAL;
|
||||
|
@ -141,7 +141,7 @@ int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
|
||||
int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_msg *msg = io_kiocb_to_cmd(req);
|
||||
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
|
||||
int ret;
|
||||
|
||||
ret = -EBADFD;
|
||||
|
|
|
@ -70,13 +70,14 @@ struct io_sendzc {
|
|||
unsigned flags;
|
||||
unsigned addr_len;
|
||||
void __user *addr;
|
||||
size_t done_io;
|
||||
};
|
||||
|
||||
#define IO_APOLL_MULTI_POLLED (REQ_F_APOLL_MULTISHOT | REQ_F_POLLED)
|
||||
|
||||
int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_shutdown *shutdown = io_kiocb_to_cmd(req);
|
||||
struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
|
||||
|
||||
if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
|
||||
sqe->buf_index || sqe->splice_fd_in))
|
||||
|
@ -88,7 +89,7 @@ int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
|
||||
int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_shutdown *shutdown = io_kiocb_to_cmd(req);
|
||||
struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
|
||||
struct socket *sock;
|
||||
int ret;
|
||||
|
||||
|
@ -173,7 +174,7 @@ static int io_setup_async_msg(struct io_kiocb *req,
|
|||
static int io_sendmsg_copy_hdr(struct io_kiocb *req,
|
||||
struct io_async_msghdr *iomsg)
|
||||
{
|
||||
struct io_sr_msg *sr = io_kiocb_to_cmd(req);
|
||||
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
|
||||
|
||||
iomsg->msg.msg_name = &iomsg->addr;
|
||||
iomsg->free_iov = iomsg->fast_iov;
|
||||
|
@ -200,7 +201,7 @@ void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
|
|||
|
||||
int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_sr_msg *sr = io_kiocb_to_cmd(req);
|
||||
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
|
||||
|
||||
if (unlikely(sqe->file_index || sqe->addr2))
|
||||
return -EINVAL;
|
||||
|
@ -224,7 +225,7 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
|
||||
int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_sr_msg *sr = io_kiocb_to_cmd(req);
|
||||
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
|
||||
struct io_async_msghdr iomsg, *kmsg;
|
||||
struct socket *sock;
|
||||
unsigned flags;
|
||||
|
@ -283,7 +284,7 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
|
|||
|
||||
int io_send(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_sr_msg *sr = io_kiocb_to_cmd(req);
|
||||
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
|
||||
struct msghdr msg;
|
||||
struct iovec iov;
|
||||
struct socket *sock;
|
||||
|
@ -357,7 +358,7 @@ static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
|
|||
static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
|
||||
struct io_async_msghdr *iomsg)
|
||||
{
|
||||
struct io_sr_msg *sr = io_kiocb_to_cmd(req);
|
||||
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
|
||||
struct user_msghdr msg;
|
||||
int ret;
|
||||
|
||||
|
@ -404,7 +405,7 @@ static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
|
|||
static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
|
||||
struct io_async_msghdr *iomsg)
|
||||
{
|
||||
struct io_sr_msg *sr = io_kiocb_to_cmd(req);
|
||||
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
|
||||
struct compat_msghdr msg;
|
||||
struct compat_iovec __user *uiov;
|
||||
int ret;
|
||||
|
@ -482,7 +483,7 @@ int io_recvmsg_prep_async(struct io_kiocb *req)
|
|||
|
||||
int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_sr_msg *sr = io_kiocb_to_cmd(req);
|
||||
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
|
||||
|
||||
if (unlikely(sqe->file_index || sqe->addr2))
|
||||
return -EINVAL;
|
||||
|
@ -517,7 +518,7 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
|
||||
static inline void io_recv_prep_retry(struct io_kiocb *req)
|
||||
{
|
||||
struct io_sr_msg *sr = io_kiocb_to_cmd(req);
|
||||
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
|
||||
|
||||
sr->done_io = 0;
|
||||
sr->len = 0; /* get from the provided buffer */
|
||||
|
@ -575,12 +576,12 @@ static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
|
|||
if (kmsg->controllen) {
|
||||
unsigned long control = ubuf + hdr - kmsg->controllen;
|
||||
|
||||
kmsg->msg.msg_control_user = (void *) control;
|
||||
kmsg->msg.msg_control_user = (void __user *) control;
|
||||
kmsg->msg.msg_controllen = kmsg->controllen;
|
||||
}
|
||||
|
||||
sr->buf = *buf; /* stash for later copy */
|
||||
*buf = (void *) (ubuf + hdr);
|
||||
*buf = (void __user *) (ubuf + hdr);
|
||||
kmsg->payloadlen = *len = *len - hdr;
|
||||
return 0;
|
||||
}
|
||||
|
@ -646,7 +647,7 @@ static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
|
|||
|
||||
int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_sr_msg *sr = io_kiocb_to_cmd(req);
|
||||
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
|
||||
struct io_async_msghdr iomsg, *kmsg;
|
||||
struct socket *sock;
|
||||
unsigned int cflags;
|
||||
|
@ -758,7 +759,7 @@ retry_multishot:
|
|||
|
||||
int io_recv(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_sr_msg *sr = io_kiocb_to_cmd(req);
|
||||
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
|
||||
struct msghdr msg;
|
||||
struct socket *sock;
|
||||
struct iovec iov;
|
||||
|
@ -849,7 +850,7 @@ out_free:
|
|||
|
||||
int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_sendzc *zc = io_kiocb_to_cmd(req);
|
||||
struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc);
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
|
||||
if (READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3))
|
||||
|
@ -878,6 +879,7 @@ int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
|
||||
zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
|
||||
zc->addr_len = READ_ONCE(sqe->addr_len);
|
||||
zc->done_io = 0;
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (req->ctx->compat)
|
||||
|
@ -944,7 +946,7 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
|
|||
{
|
||||
struct sockaddr_storage address;
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
struct io_sendzc *zc = io_kiocb_to_cmd(req);
|
||||
struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc);
|
||||
struct io_notif_slot *notif_slot;
|
||||
struct io_kiocb *notif;
|
||||
struct msghdr msg;
|
||||
|
@ -1012,18 +1014,30 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
|
|||
if (unlikely(ret < min_ret)) {
|
||||
if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
|
||||
return -EAGAIN;
|
||||
return ret == -ERESTARTSYS ? -EINTR : ret;
|
||||
if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
|
||||
zc->len -= ret;
|
||||
zc->buf += ret;
|
||||
zc->done_io += ret;
|
||||
req->flags |= REQ_F_PARTIAL_IO;
|
||||
return -EAGAIN;
|
||||
}
|
||||
if (ret == -ERESTARTSYS)
|
||||
ret = -EINTR;
|
||||
} else if (zc->flags & IORING_RECVSEND_NOTIF_FLUSH) {
|
||||
io_notif_slot_flush_submit(notif_slot, 0);
|
||||
}
|
||||
|
||||
if (zc->flags & IORING_RECVSEND_NOTIF_FLUSH)
|
||||
io_notif_slot_flush_submit(notif_slot, 0);
|
||||
if (ret >= 0)
|
||||
ret += zc->done_io;
|
||||
else if (zc->done_io)
|
||||
ret = zc->done_io;
|
||||
io_req_set_res(req, ret, 0);
|
||||
return IOU_OK;
|
||||
}
|
||||
|
||||
int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_accept *accept = io_kiocb_to_cmd(req);
|
||||
struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
|
||||
unsigned flags;
|
||||
|
||||
if (sqe->len || sqe->buf_index)
|
||||
|
@ -1057,7 +1071,7 @@ int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
int io_accept(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
struct io_accept *accept = io_kiocb_to_cmd(req);
|
||||
struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
|
||||
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
|
||||
unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
|
||||
bool fixed = !!accept->file_slot;
|
||||
|
@ -1115,7 +1129,7 @@ retry:
|
|||
|
||||
int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_socket *sock = io_kiocb_to_cmd(req);
|
||||
struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
|
||||
|
||||
if (sqe->addr || sqe->rw_flags || sqe->buf_index)
|
||||
return -EINVAL;
|
||||
|
@ -1136,7 +1150,7 @@ int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
|
||||
int io_socket(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_socket *sock = io_kiocb_to_cmd(req);
|
||||
struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
|
||||
bool fixed = !!sock->file_slot;
|
||||
struct file *file;
|
||||
int ret, fd;
|
||||
|
@ -1170,14 +1184,14 @@ int io_socket(struct io_kiocb *req, unsigned int issue_flags)
|
|||
int io_connect_prep_async(struct io_kiocb *req)
|
||||
{
|
||||
struct io_async_connect *io = req->async_data;
|
||||
struct io_connect *conn = io_kiocb_to_cmd(req);
|
||||
struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
|
||||
|
||||
return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
|
||||
}
|
||||
|
||||
int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_connect *conn = io_kiocb_to_cmd(req);
|
||||
struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
|
||||
|
||||
if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
|
||||
return -EINVAL;
|
||||
|
@ -1189,7 +1203,7 @@ int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
|
||||
int io_connect(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_connect *connect = io_kiocb_to_cmd(req);
|
||||
struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
|
||||
struct io_async_connect __io, *io;
|
||||
unsigned file_flags;
|
||||
int ret;
|
||||
|
|
|
@ -100,7 +100,7 @@ __cold int io_notif_unregister(struct io_ring_ctx *ctx)
|
|||
|
||||
if (!notif)
|
||||
continue;
|
||||
nd = io_kiocb_to_cmd(notif);
|
||||
nd = io_notif_to_data(notif);
|
||||
slot->notif = NULL;
|
||||
if (!refcount_dec_and_test(&nd->uarg.refcnt))
|
||||
continue;
|
||||
|
@ -123,8 +123,6 @@ __cold int io_notif_register(struct io_ring_ctx *ctx,
|
|||
struct io_uring_notification_register reg;
|
||||
unsigned i;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct io_notif_data) > 64);
|
||||
|
||||
if (ctx->nr_notif_slots)
|
||||
return -EBUSY;
|
||||
if (size != sizeof(reg))
|
||||
|
|
|
@ -46,7 +46,7 @@ struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx,
|
|||
|
||||
static inline struct io_notif_data *io_notif_to_data(struct io_kiocb *notif)
|
||||
{
|
||||
return io_kiocb_to_cmd(notif);
|
||||
return io_kiocb_to_cmd(notif, struct io_notif_data);
|
||||
}
|
||||
|
||||
static inline struct io_kiocb *io_get_notif(struct io_ring_ctx *ctx,
|
||||
|
|
|
@ -33,7 +33,7 @@ struct io_close {
|
|||
|
||||
static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_open *open = io_kiocb_to_cmd(req);
|
||||
struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
|
||||
const char __user *fname;
|
||||
int ret;
|
||||
|
||||
|
@ -66,7 +66,7 @@ static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
|
|||
|
||||
int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_open *open = io_kiocb_to_cmd(req);
|
||||
struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
|
||||
u64 mode = READ_ONCE(sqe->len);
|
||||
u64 flags = READ_ONCE(sqe->open_flags);
|
||||
|
||||
|
@ -76,7 +76,7 @@ int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
|
||||
int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_open *open = io_kiocb_to_cmd(req);
|
||||
struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
|
||||
struct open_how __user *how;
|
||||
size_t len;
|
||||
int ret;
|
||||
|
@ -95,7 +95,7 @@ int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
|
||||
int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_open *open = io_kiocb_to_cmd(req);
|
||||
struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
|
||||
struct open_flags op;
|
||||
struct file *file;
|
||||
bool resolve_nonblock, nonblock_set;
|
||||
|
@ -167,7 +167,7 @@ int io_openat(struct io_kiocb *req, unsigned int issue_flags)
|
|||
|
||||
void io_open_cleanup(struct io_kiocb *req)
|
||||
{
|
||||
struct io_open *open = io_kiocb_to_cmd(req);
|
||||
struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
|
||||
|
||||
if (open->filename)
|
||||
putname(open->filename);
|
||||
|
@ -187,14 +187,14 @@ int __io_close_fixed(struct io_ring_ctx *ctx, unsigned int issue_flags,
|
|||
|
||||
static inline int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_close *close = io_kiocb_to_cmd(req);
|
||||
struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
|
||||
|
||||
return __io_close_fixed(req->ctx, issue_flags, close->file_slot - 1);
|
||||
}
|
||||
|
||||
int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_close *close = io_kiocb_to_cmd(req);
|
||||
struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
|
||||
|
||||
if (sqe->off || sqe->addr || sqe->len || sqe->rw_flags || sqe->buf_index)
|
||||
return -EINVAL;
|
||||
|
@ -212,7 +212,7 @@ int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
int io_close(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct files_struct *files = current->files;
|
||||
struct io_close *close = io_kiocb_to_cmd(req);
|
||||
struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
|
||||
struct fdtable *fdt;
|
||||
struct file *file;
|
||||
int ret = -EBADF;
|
||||
|
|
|
@ -85,7 +85,7 @@ static struct io_poll *io_poll_get_double(struct io_kiocb *req)
|
|||
static struct io_poll *io_poll_get_single(struct io_kiocb *req)
|
||||
{
|
||||
if (req->opcode == IORING_OP_POLL_ADD)
|
||||
return io_kiocb_to_cmd(req);
|
||||
return io_kiocb_to_cmd(req, struct io_poll);
|
||||
return &req->apoll->poll;
|
||||
}
|
||||
|
||||
|
@ -274,7 +274,7 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
|
|||
return;
|
||||
|
||||
if (ret == IOU_POLL_DONE) {
|
||||
struct io_poll *poll = io_kiocb_to_cmd(req);
|
||||
struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
|
||||
req->cqe.res = mangle_poll(req->cqe.res & poll->events);
|
||||
} else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) {
|
||||
req->cqe.res = ret;
|
||||
|
@ -475,7 +475,7 @@ static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
|
|||
struct poll_table_struct *p)
|
||||
{
|
||||
struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
|
||||
struct io_poll *poll = io_kiocb_to_cmd(pt->req);
|
||||
struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll);
|
||||
|
||||
__io_queue_proc(poll, pt, head,
|
||||
(struct io_poll **) &pt->req->async_data);
|
||||
|
@ -821,7 +821,7 @@ static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
|
|||
|
||||
int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_poll_update *upd = io_kiocb_to_cmd(req);
|
||||
struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update);
|
||||
u32 flags;
|
||||
|
||||
if (sqe->buf_index || sqe->splice_fd_in)
|
||||
|
@ -851,7 +851,7 @@ int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
|
||||
int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_poll *poll = io_kiocb_to_cmd(req);
|
||||
struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
|
||||
u32 flags;
|
||||
|
||||
if (sqe->buf_index || sqe->off || sqe->addr)
|
||||
|
@ -868,7 +868,7 @@ int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
|
||||
int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_poll *poll = io_kiocb_to_cmd(req);
|
||||
struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
|
||||
struct io_poll_table ipt;
|
||||
int ret;
|
||||
|
||||
|
@ -891,7 +891,7 @@ int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
|
|||
|
||||
int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_poll_update *poll_update = io_kiocb_to_cmd(req);
|
||||
struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update);
|
||||
struct io_cancel_data cd = { .data = poll_update->old_user_data, };
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
struct io_hash_bucket *bucket;
|
||||
|
@ -930,7 +930,7 @@ found:
|
|||
if (poll_update->update_events || poll_update->update_user_data) {
|
||||
/* only mask one event flags, keep behavior flags */
|
||||
if (poll_update->update_events) {
|
||||
struct io_poll *poll = io_kiocb_to_cmd(preq);
|
||||
struct io_poll *poll = io_kiocb_to_cmd(preq, struct io_poll);
|
||||
|
||||
poll->events &= ~0xffff;
|
||||
poll->events |= poll_update->events & 0xffff;
|
||||
|
|
|
@ -657,7 +657,7 @@ __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
|
|||
|
||||
int io_rsrc_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_rsrc_update *up = io_kiocb_to_cmd(req);
|
||||
struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
|
||||
|
||||
if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
|
||||
return -EINVAL;
|
||||
|
@ -676,7 +676,7 @@ int io_rsrc_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
static int io_files_update_with_index_alloc(struct io_kiocb *req,
|
||||
unsigned int issue_flags)
|
||||
{
|
||||
struct io_rsrc_update *up = io_kiocb_to_cmd(req);
|
||||
struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
|
||||
__s32 __user *fds = u64_to_user_ptr(up->arg);
|
||||
unsigned int done;
|
||||
struct file *file;
|
||||
|
@ -714,7 +714,7 @@ static int io_files_update_with_index_alloc(struct io_kiocb *req,
|
|||
|
||||
static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_rsrc_update *up = io_kiocb_to_cmd(req);
|
||||
struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
struct io_uring_rsrc_update2 up2;
|
||||
int ret;
|
||||
|
@ -743,7 +743,7 @@ static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
|
|||
|
||||
static int io_notif_update(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_rsrc_update *up = io_kiocb_to_cmd(req);
|
||||
struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
unsigned len = up->nr_args;
|
||||
unsigned idx_end, idx = up->offset;
|
||||
|
@ -778,7 +778,7 @@ out:
|
|||
|
||||
int io_rsrc_update(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_rsrc_update *up = io_kiocb_to_cmd(req);
|
||||
struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
|
||||
|
||||
switch (up->type) {
|
||||
case IORING_RSRC_UPDATE_FILES:
|
||||
|
|
|
@ -35,7 +35,7 @@ static inline bool io_file_supports_nowait(struct io_kiocb *req)
|
|||
|
||||
int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_rw *rw = io_kiocb_to_cmd(req);
|
||||
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
|
||||
unsigned ioprio;
|
||||
int ret;
|
||||
|
||||
|
@ -102,7 +102,7 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
|
|||
|
||||
static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
|
||||
{
|
||||
struct io_rw *rw = io_kiocb_to_cmd(req);
|
||||
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
|
||||
|
||||
if (rw->kiocb.ki_pos != -1)
|
||||
return &rw->kiocb.ki_pos;
|
||||
|
@ -186,7 +186,7 @@ static void kiocb_end_write(struct io_kiocb *req)
|
|||
|
||||
static bool __io_complete_rw_common(struct io_kiocb *req, long res)
|
||||
{
|
||||
struct io_rw *rw = io_kiocb_to_cmd(req);
|
||||
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
|
||||
|
||||
if (rw->kiocb.ki_flags & IOCB_WRITE) {
|
||||
kiocb_end_write(req);
|
||||
|
@ -241,7 +241,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
|
|||
unsigned int issue_flags)
|
||||
{
|
||||
struct io_async_rw *io = req->async_data;
|
||||
struct io_rw *rw = io_kiocb_to_cmd(req);
|
||||
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
|
||||
|
||||
/* add previously done IO, if any */
|
||||
if (req_has_async_data(req) && io->bytes_done > 0) {
|
||||
|
@ -277,7 +277,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
|
|||
static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
|
||||
unsigned int issue_flags)
|
||||
{
|
||||
struct io_rw *rw = io_kiocb_to_cmd(req);
|
||||
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
|
||||
struct compat_iovec __user *uiov;
|
||||
compat_ssize_t clen;
|
||||
void __user *buf;
|
||||
|
@ -305,7 +305,7 @@ static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
|
|||
static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
|
||||
unsigned int issue_flags)
|
||||
{
|
||||
struct io_rw *rw = io_kiocb_to_cmd(req);
|
||||
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
|
||||
struct iovec __user *uiov = u64_to_user_ptr(rw->addr);
|
||||
void __user *buf;
|
||||
ssize_t len;
|
||||
|
@ -328,7 +328,7 @@ static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
|
|||
static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
|
||||
unsigned int issue_flags)
|
||||
{
|
||||
struct io_rw *rw = io_kiocb_to_cmd(req);
|
||||
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
|
||||
|
||||
if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
|
||||
iov[0].iov_base = u64_to_user_ptr(rw->addr);
|
||||
|
@ -350,7 +350,7 @@ static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
|
|||
struct io_rw_state *s,
|
||||
unsigned int issue_flags)
|
||||
{
|
||||
struct io_rw *rw = io_kiocb_to_cmd(req);
|
||||
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
|
||||
struct iov_iter *iter = &s->iter;
|
||||
u8 opcode = req->opcode;
|
||||
struct iovec *iovec;
|
||||
|
@ -571,7 +571,7 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
|
|||
{
|
||||
struct wait_page_queue *wpq;
|
||||
struct io_kiocb *req = wait->private;
|
||||
struct io_rw *rw = io_kiocb_to_cmd(req);
|
||||
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
|
||||
struct wait_page_key *key = arg;
|
||||
|
||||
wpq = container_of(wait, struct wait_page_queue, wait);
|
||||
|
@ -601,7 +601,7 @@ static bool io_rw_should_retry(struct io_kiocb *req)
|
|||
{
|
||||
struct io_async_rw *io = req->async_data;
|
||||
struct wait_page_queue *wait = &io->wpq;
|
||||
struct io_rw *rw = io_kiocb_to_cmd(req);
|
||||
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
|
||||
struct kiocb *kiocb = &rw->kiocb;
|
||||
|
||||
/* never retry for NOWAIT, we just complete with -EAGAIN */
|
||||
|
@ -649,7 +649,7 @@ static bool need_complete_io(struct io_kiocb *req)
|
|||
|
||||
static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
|
||||
{
|
||||
struct io_rw *rw = io_kiocb_to_cmd(req);
|
||||
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
|
||||
struct kiocb *kiocb = &rw->kiocb;
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
struct file *file = req->file;
|
||||
|
@ -694,7 +694,7 @@ static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
|
|||
|
||||
int io_read(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_rw *rw = io_kiocb_to_cmd(req);
|
||||
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
|
||||
struct io_rw_state __s, *s = &__s;
|
||||
struct iovec *iovec;
|
||||
struct kiocb *kiocb = &rw->kiocb;
|
||||
|
@ -839,7 +839,7 @@ done:
|
|||
|
||||
int io_write(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_rw *rw = io_kiocb_to_cmd(req);
|
||||
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
|
||||
struct io_rw_state __s, *s = &__s;
|
||||
struct iovec *iovec;
|
||||
struct kiocb *kiocb = &rw->kiocb;
|
||||
|
@ -994,7 +994,7 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
|
|||
|
||||
wq_list_for_each(pos, start, &ctx->iopoll_list) {
|
||||
struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
|
||||
struct io_rw *rw = io_kiocb_to_cmd(req);
|
||||
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
|
||||
int ret;
|
||||
|
||||
/*
|
||||
|
|
|
@ -26,7 +26,7 @@ struct io_splice {
|
|||
static int __io_splice_prep(struct io_kiocb *req,
|
||||
const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_splice *sp = io_kiocb_to_cmd(req);
|
||||
struct io_splice *sp = io_kiocb_to_cmd(req, struct io_splice);
|
||||
unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
|
||||
|
||||
sp->len = READ_ONCE(sqe->len);
|
||||
|
@ -46,7 +46,7 @@ int io_tee_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
|
||||
int io_tee(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_splice *sp = io_kiocb_to_cmd(req);
|
||||
struct io_splice *sp = io_kiocb_to_cmd(req, struct io_splice);
|
||||
struct file *out = sp->file_out;
|
||||
unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
|
||||
struct file *in;
|
||||
|
@ -78,7 +78,7 @@ done:
|
|||
|
||||
int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_splice *sp = io_kiocb_to_cmd(req);
|
||||
struct io_splice *sp = io_kiocb_to_cmd(req, struct io_splice);
|
||||
|
||||
sp->off_in = READ_ONCE(sqe->splice_off_in);
|
||||
sp->off_out = READ_ONCE(sqe->off);
|
||||
|
@ -87,7 +87,7 @@ int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
|
||||
int io_splice(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_splice *sp = io_kiocb_to_cmd(req);
|
||||
struct io_splice *sp = io_kiocb_to_cmd(req, struct io_splice);
|
||||
struct file *out = sp->file_out;
|
||||
unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
|
||||
loff_t *poff_in, *poff_out;
|
||||
|
|
|
@ -235,8 +235,6 @@ static int io_sq_thread(void *data)
|
|||
set_cpus_allowed_ptr(current, cpu_online_mask);
|
||||
current->flags |= PF_NO_SETAFFINITY;
|
||||
|
||||
audit_alloc_kernel(current);
|
||||
|
||||
mutex_lock(&sqd->lock);
|
||||
while (1) {
|
||||
bool cap_entries, sqt_spin = false;
|
||||
|
@ -310,8 +308,6 @@ static int io_sq_thread(void *data)
|
|||
io_run_task_work();
|
||||
mutex_unlock(&sqd->lock);
|
||||
|
||||
audit_free(current);
|
||||
|
||||
complete(&sqd->exited);
|
||||
do_exit(0);
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ struct io_statx {
|
|||
|
||||
int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_statx *sx = io_kiocb_to_cmd(req);
|
||||
struct io_statx *sx = io_kiocb_to_cmd(req, struct io_statx);
|
||||
const char __user *path;
|
||||
|
||||
if (sqe->buf_index || sqe->splice_fd_in)
|
||||
|
@ -53,7 +53,7 @@ int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
|
||||
int io_statx(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_statx *sx = io_kiocb_to_cmd(req);
|
||||
struct io_statx *sx = io_kiocb_to_cmd(req, struct io_statx);
|
||||
int ret;
|
||||
|
||||
if (issue_flags & IO_URING_F_NONBLOCK)
|
||||
|
@ -66,7 +66,7 @@ int io_statx(struct io_kiocb *req, unsigned int issue_flags)
|
|||
|
||||
void io_statx_cleanup(struct io_kiocb *req)
|
||||
{
|
||||
struct io_statx *sx = io_kiocb_to_cmd(req);
|
||||
struct io_statx *sx = io_kiocb_to_cmd(req, struct io_statx);
|
||||
|
||||
if (sx->filename)
|
||||
putname(sx->filename);
|
||||
|
|
|
@ -24,7 +24,7 @@ struct io_sync {
|
|||
|
||||
int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_sync *sync = io_kiocb_to_cmd(req);
|
||||
struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync);
|
||||
|
||||
if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in))
|
||||
return -EINVAL;
|
||||
|
@ -37,7 +37,7 @@ int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
|
||||
int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_sync *sync = io_kiocb_to_cmd(req);
|
||||
struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync);
|
||||
int ret;
|
||||
|
||||
/* sync_file_range always requires a blocking context */
|
||||
|
@ -51,7 +51,7 @@ int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
|
|||
|
||||
int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_sync *sync = io_kiocb_to_cmd(req);
|
||||
struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync);
|
||||
|
||||
if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in))
|
||||
return -EINVAL;
|
||||
|
@ -67,7 +67,7 @@ int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
|
||||
int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_sync *sync = io_kiocb_to_cmd(req);
|
||||
struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync);
|
||||
loff_t end = sync->off + sync->len;
|
||||
int ret;
|
||||
|
||||
|
@ -83,7 +83,7 @@ int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
|
|||
|
||||
int io_fallocate_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_sync *sync = io_kiocb_to_cmd(req);
|
||||
struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync);
|
||||
|
||||
if (sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
|
||||
return -EINVAL;
|
||||
|
@ -96,7 +96,7 @@ int io_fallocate_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
|
||||
int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_sync *sync = io_kiocb_to_cmd(req);
|
||||
struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync);
|
||||
int ret;
|
||||
|
||||
/* fallocate always requiring blocking context */
|
||||
|
|
|
@ -36,7 +36,7 @@ struct io_timeout_rem {
|
|||
|
||||
static inline bool io_is_timeout_noseq(struct io_kiocb *req)
|
||||
{
|
||||
struct io_timeout *timeout = io_kiocb_to_cmd(req);
|
||||
struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
|
||||
|
||||
return !timeout->off;
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ static bool io_kill_timeout(struct io_kiocb *req, int status)
|
|||
struct io_timeout_data *io = req->async_data;
|
||||
|
||||
if (hrtimer_try_to_cancel(&io->timer) != -1) {
|
||||
struct io_timeout *timeout = io_kiocb_to_cmd(req);
|
||||
struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
|
||||
|
||||
if (status)
|
||||
req_set_fail(req);
|
||||
|
@ -188,7 +188,7 @@ struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req,
|
|||
__must_hold(&req->ctx->timeout_lock)
|
||||
{
|
||||
struct io_timeout_data *io = link->async_data;
|
||||
struct io_timeout *timeout = io_kiocb_to_cmd(link);
|
||||
struct io_timeout *timeout = io_kiocb_to_cmd(link, struct io_timeout);
|
||||
|
||||
io_remove_next_linked(req);
|
||||
timeout->head = NULL;
|
||||
|
@ -205,7 +205,7 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
|
|||
struct io_timeout_data *data = container_of(timer,
|
||||
struct io_timeout_data, timer);
|
||||
struct io_kiocb *req = data->req;
|
||||
struct io_timeout *timeout = io_kiocb_to_cmd(req);
|
||||
struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -252,7 +252,7 @@ static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
|
|||
io = req->async_data;
|
||||
if (hrtimer_try_to_cancel(&io->timer) == -1)
|
||||
return ERR_PTR(-EALREADY);
|
||||
timeout = io_kiocb_to_cmd(req);
|
||||
timeout = io_kiocb_to_cmd(req, struct io_timeout);
|
||||
list_del_init(&timeout->list);
|
||||
return req;
|
||||
}
|
||||
|
@ -275,7 +275,7 @@ int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
|
|||
static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
|
||||
{
|
||||
unsigned issue_flags = *locked ? 0 : IO_URING_F_UNLOCKED;
|
||||
struct io_timeout *timeout = io_kiocb_to_cmd(req);
|
||||
struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
|
||||
struct io_kiocb *prev = timeout->prev;
|
||||
int ret = -ENOENT;
|
||||
|
||||
|
@ -302,7 +302,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
|
|||
struct io_timeout_data *data = container_of(timer,
|
||||
struct io_timeout_data, timer);
|
||||
struct io_kiocb *prev, *req = data->req;
|
||||
struct io_timeout *timeout = io_kiocb_to_cmd(req);
|
||||
struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -378,7 +378,7 @@ static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
|
|||
{
|
||||
struct io_cancel_data cd = { .data = user_data, };
|
||||
struct io_kiocb *req = io_timeout_extract(ctx, &cd);
|
||||
struct io_timeout *timeout = io_kiocb_to_cmd(req);
|
||||
struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
|
||||
struct io_timeout_data *data;
|
||||
|
||||
if (IS_ERR(req))
|
||||
|
@ -395,7 +395,7 @@ static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
|
|||
|
||||
int io_timeout_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_timeout_rem *tr = io_kiocb_to_cmd(req);
|
||||
struct io_timeout_rem *tr = io_kiocb_to_cmd(req, struct io_timeout_rem);
|
||||
|
||||
if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
|
||||
return -EINVAL;
|
||||
|
@ -435,7 +435,7 @@ static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
|
|||
*/
|
||||
int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_timeout_rem *tr = io_kiocb_to_cmd(req);
|
||||
struct io_timeout_rem *tr = io_kiocb_to_cmd(req, struct io_timeout_rem);
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
int ret;
|
||||
|
||||
|
@ -466,7 +466,7 @@ static int __io_timeout_prep(struct io_kiocb *req,
|
|||
const struct io_uring_sqe *sqe,
|
||||
bool is_timeout_link)
|
||||
{
|
||||
struct io_timeout *timeout = io_kiocb_to_cmd(req);
|
||||
struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
|
||||
struct io_timeout_data *data;
|
||||
unsigned flags;
|
||||
u32 off = READ_ONCE(sqe->off);
|
||||
|
@ -532,7 +532,7 @@ int io_link_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
|
||||
int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_timeout *timeout = io_kiocb_to_cmd(req);
|
||||
struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
struct io_timeout_data *data = req->async_data;
|
||||
struct list_head *entry;
|
||||
|
@ -583,7 +583,7 @@ add:
|
|||
|
||||
void io_queue_linked_timeout(struct io_kiocb *req)
|
||||
{
|
||||
struct io_timeout *timeout = io_kiocb_to_cmd(req);
|
||||
struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
|
||||
spin_lock_irq(&ctx->timeout_lock);
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
|
||||
static void io_uring_cmd_work(struct io_kiocb *req, bool *locked)
|
||||
{
|
||||
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req);
|
||||
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
|
||||
|
||||
ioucmd->task_work_cb(ioucmd);
|
||||
}
|
||||
|
@ -46,7 +46,7 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2)
|
|||
if (ret < 0)
|
||||
req_set_fail(req);
|
||||
|
||||
io_req_set_res(req, 0, ret);
|
||||
io_req_set_res(req, ret, 0);
|
||||
if (req->ctx->flags & IORING_SETUP_CQE32)
|
||||
io_req_set_cqe32_extra(req, res2, 0);
|
||||
__io_req_complete(req, 0);
|
||||
|
@ -55,9 +55,12 @@ EXPORT_SYMBOL_GPL(io_uring_cmd_done);
|
|||
|
||||
int io_uring_cmd_prep_async(struct io_kiocb *req)
|
||||
{
|
||||
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req);
|
||||
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
|
||||
size_t cmd_size;
|
||||
|
||||
BUILD_BUG_ON(uring_cmd_pdu_size(0) != 16);
|
||||
BUILD_BUG_ON(uring_cmd_pdu_size(1) != 80);
|
||||
|
||||
cmd_size = uring_cmd_pdu_size(req->ctx->flags & IORING_SETUP_SQE128);
|
||||
|
||||
memcpy(req->async_data, ioucmd->cmd, cmd_size);
|
||||
|
@ -66,7 +69,7 @@ int io_uring_cmd_prep_async(struct io_kiocb *req)
|
|||
|
||||
int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req);
|
||||
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
|
||||
|
||||
if (sqe->rw_flags || sqe->__pad1)
|
||||
return -EINVAL;
|
||||
|
@ -77,7 +80,7 @@ int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
|
||||
int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req);
|
||||
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
struct file *file = req->file;
|
||||
int ret;
|
||||
|
@ -106,7 +109,9 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
|
|||
}
|
||||
|
||||
if (ret != -EIOCBQUEUED) {
|
||||
io_uring_cmd_done(ioucmd, ret, 0);
|
||||
if (ret < 0)
|
||||
req_set_fail(req);
|
||||
io_req_set_res(req, ret, 0);
|
||||
return IOU_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ struct io_xattr {
|
|||
|
||||
void io_xattr_cleanup(struct io_kiocb *req)
|
||||
{
|
||||
struct io_xattr *ix = io_kiocb_to_cmd(req);
|
||||
struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
|
||||
|
||||
if (ix->filename)
|
||||
putname(ix->filename);
|
||||
|
@ -44,7 +44,7 @@ static void io_xattr_finish(struct io_kiocb *req, int ret)
|
|||
static int __io_getxattr_prep(struct io_kiocb *req,
|
||||
const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_xattr *ix = io_kiocb_to_cmd(req);
|
||||
struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
|
||||
const char __user *name;
|
||||
int ret;
|
||||
|
||||
|
@ -85,7 +85,7 @@ int io_fgetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
|
||||
int io_getxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_xattr *ix = io_kiocb_to_cmd(req);
|
||||
struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
|
||||
const char __user *path;
|
||||
int ret;
|
||||
|
||||
|
@ -106,7 +106,7 @@ int io_getxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
|
||||
int io_fgetxattr(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_xattr *ix = io_kiocb_to_cmd(req);
|
||||
struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
|
||||
int ret;
|
||||
|
||||
if (issue_flags & IO_URING_F_NONBLOCK)
|
||||
|
@ -122,7 +122,7 @@ int io_fgetxattr(struct io_kiocb *req, unsigned int issue_flags)
|
|||
|
||||
int io_getxattr(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_xattr *ix = io_kiocb_to_cmd(req);
|
||||
struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
|
||||
unsigned int lookup_flags = LOOKUP_FOLLOW;
|
||||
struct path path;
|
||||
int ret;
|
||||
|
@ -151,7 +151,7 @@ retry:
|
|||
static int __io_setxattr_prep(struct io_kiocb *req,
|
||||
const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_xattr *ix = io_kiocb_to_cmd(req);
|
||||
struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
|
||||
const char __user *name;
|
||||
int ret;
|
||||
|
||||
|
@ -181,7 +181,7 @@ static int __io_setxattr_prep(struct io_kiocb *req,
|
|||
|
||||
int io_setxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_xattr *ix = io_kiocb_to_cmd(req);
|
||||
struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
|
||||
const char __user *path;
|
||||
int ret;
|
||||
|
||||
|
@ -208,7 +208,7 @@ int io_fsetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
static int __io_setxattr(struct io_kiocb *req, unsigned int issue_flags,
|
||||
struct path *path)
|
||||
{
|
||||
struct io_xattr *ix = io_kiocb_to_cmd(req);
|
||||
struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
|
||||
int ret;
|
||||
|
||||
ret = mnt_want_write(path->mnt);
|
||||
|
@ -234,7 +234,7 @@ int io_fsetxattr(struct io_kiocb *req, unsigned int issue_flags)
|
|||
|
||||
int io_setxattr(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_xattr *ix = io_kiocb_to_cmd(req);
|
||||
struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
|
||||
unsigned int lookup_flags = LOOKUP_FOLLOW;
|
||||
struct path path;
|
||||
int ret;
|
||||
|
|
|
@ -1073,31 +1073,6 @@ int audit_alloc(struct task_struct *tsk)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* audit_alloc_kernel - allocate an audit_context for a kernel task
|
||||
* @tsk: the kernel task
|
||||
*
|
||||
* Similar to the audit_alloc() function, but intended for kernel private
|
||||
* threads. Returns zero on success, negative values on failure.
|
||||
*/
|
||||
int audit_alloc_kernel(struct task_struct *tsk)
|
||||
{
|
||||
/*
|
||||
* At the moment we are just going to call into audit_alloc() to
|
||||
* simplify the code, but there two things to keep in mind with this
|
||||
* approach:
|
||||
*
|
||||
* 1. Filtering internal kernel tasks is a bit laughable in almost all
|
||||
* cases, but there is at least one case where there is a benefit:
|
||||
* the '-a task,never' case allows the admin to effectively disable
|
||||
* task auditing at runtime.
|
||||
*
|
||||
* 2. The {set,clear}_task_syscall_work() ops likely have zero effect
|
||||
* on these internal kernel tasks, but they probably don't hurt either.
|
||||
*/
|
||||
return audit_alloc(tsk);
|
||||
}
|
||||
|
||||
static inline void audit_free_context(struct audit_context *context)
|
||||
{
|
||||
/* resetting is extra work, but it is likely just noise */
|
||||
|
|
Loading…
Reference in New Issue