143 lines
3.9 KiB
C
143 lines
3.9 KiB
C
|
// SPDX-License-Identifier: GPL-2.0
|
||
|
#ifndef IOU_KBUF_H
|
||
|
#define IOU_KBUF_H
|
||
|
|
||
|
#include <uapi/linux/io_uring.h>
|
||
|
|
||
|
struct io_buffer_list {
|
||
|
/*
|
||
|
* If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
|
||
|
* then these are classic provided buffers and ->buf_list is used.
|
||
|
*/
|
||
|
union {
|
||
|
struct list_head buf_list;
|
||
|
struct {
|
||
|
struct page **buf_pages;
|
||
|
struct io_uring_buf_ring *buf_ring;
|
||
|
};
|
||
|
};
|
||
|
__u16 bgid;
|
||
|
|
||
|
/* below is for ring provided buffers */
|
||
|
__u16 buf_nr_pages;
|
||
|
__u16 nr_entries;
|
||
|
__u16 head;
|
||
|
__u16 mask;
|
||
|
};
|
||
|
|
||
|
struct io_buffer {
|
||
|
struct list_head list;
|
||
|
__u64 addr;
|
||
|
__u32 len;
|
||
|
__u16 bid;
|
||
|
__u16 bgid;
|
||
|
};
|
||
|
|
||
|
void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
|
||
|
unsigned int issue_flags);
|
||
|
void __io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags);
|
||
|
void io_destroy_buffers(struct io_ring_ctx *ctx);
|
||
|
|
||
|
int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
||
|
int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags);
|
||
|
|
||
|
int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
||
|
int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
|
||
|
|
||
|
int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
|
||
|
int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
|
||
|
|
||
|
static inline bool io_do_buffer_select(struct io_kiocb *req)
|
||
|
{
|
||
|
if (!(req->flags & REQ_F_BUFFER_SELECT))
|
||
|
return false;
|
||
|
return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
|
||
|
}
|
||
|
|
||
|
static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
|
||
|
{
|
||
|
if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
|
||
|
return;
|
||
|
/*
|
||
|
* For legacy provided buffer mode, don't recycle if we already did
|
||
|
* IO to this buffer. For ring-mapped provided buffer mode, we should
|
||
|
* increment ring->head to explicitly monopolize the buffer to avoid
|
||
|
* multiple use.
|
||
|
*/
|
||
|
if ((req->flags & REQ_F_BUFFER_SELECTED) &&
|
||
|
(req->flags & REQ_F_PARTIAL_IO))
|
||
|
return;
|
||
|
|
||
|
/*
|
||
|
* READV uses fields in `struct io_rw` (len/addr) to stash the selected
|
||
|
* buffer data. However if that buffer is recycled the original request
|
||
|
* data stored in addr is lost. Therefore forbid recycling for now.
|
||
|
*/
|
||
|
if (req->opcode == IORING_OP_READV)
|
||
|
return;
|
||
|
|
||
|
__io_kbuf_recycle(req, issue_flags);
|
||
|
}
|
||
|
|
||
|
static unsigned int __io_put_kbuf(struct io_kiocb *req, struct list_head *list)
|
||
|
{
|
||
|
if (req->flags & REQ_F_BUFFER_RING) {
|
||
|
if (req->buf_list)
|
||
|
req->buf_list->head++;
|
||
|
req->flags &= ~REQ_F_BUFFER_RING;
|
||
|
} else {
|
||
|
list_add(&req->kbuf->list, list);
|
||
|
req->flags &= ~REQ_F_BUFFER_SELECTED;
|
||
|
}
|
||
|
|
||
|
return IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
|
||
|
}
|
||
|
|
||
|
static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
|
||
|
{
|
||
|
lockdep_assert_held(&req->ctx->completion_lock);
|
||
|
|
||
|
if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
|
||
|
return 0;
|
||
|
return __io_put_kbuf(req, &req->ctx->io_buffers_comp);
|
||
|
}
|
||
|
|
||
|
static inline unsigned int io_put_kbuf(struct io_kiocb *req,
|
||
|
unsigned issue_flags)
|
||
|
{
|
||
|
unsigned int cflags;
|
||
|
|
||
|
if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
|
||
|
return 0;
|
||
|
|
||
|
/*
|
||
|
* We can add this buffer back to two lists:
|
||
|
*
|
||
|
* 1) The io_buffers_cache list. This one is protected by the
|
||
|
* ctx->uring_lock. If we already hold this lock, add back to this
|
||
|
* list as we can grab it from issue as well.
|
||
|
* 2) The io_buffers_comp list. This one is protected by the
|
||
|
* ctx->completion_lock.
|
||
|
*
|
||
|
* We migrate buffers from the comp_list to the issue cache list
|
||
|
* when we need one.
|
||
|
*/
|
||
|
if (req->flags & REQ_F_BUFFER_RING) {
|
||
|
/* no buffers to recycle for this case */
|
||
|
cflags = __io_put_kbuf(req, NULL);
|
||
|
} else if (issue_flags & IO_URING_F_UNLOCKED) {
|
||
|
struct io_ring_ctx *ctx = req->ctx;
|
||
|
|
||
|
spin_lock(&ctx->completion_lock);
|
||
|
cflags = __io_put_kbuf(req, &ctx->io_buffers_comp);
|
||
|
spin_unlock(&ctx->completion_lock);
|
||
|
} else {
|
||
|
lockdep_assert_held(&req->ctx->uring_lock);
|
||
|
|
||
|
cflags = __io_put_kbuf(req, &req->ctx->io_buffers_cache);
|
||
|
}
|
||
|
|
||
|
return cflags;
|
||
|
}
|
||
|
#endif
|