ITER_PIPE: helper for getting pipe buffer by index
pipe_buffer instances of a pipe are organized as a ring buffer, with power-of-2 size. Indices are kept *not* reduced modulo ring size, so the buffer refered to by index N is pipe->bufs[N & (pipe->ring_size - 1)]. Ring size can change over the lifetime of a pipe, but not while the pipe is locked. So for any iov_iter primitives it's a constant. Original conversion of pipes to this layout went overboard trying to microoptimize that - calculating pipe->ring_size - 1, storing it in a local variable and using through the function. In some cases it might be warranted, but most of the times it only obfuscates what's going on in there. Introduce a helper (pipe_buf(pipe, N)) that would encapsulate that and use it in the obvious cases. More will follow... Reviewed-by: Jeff Layton <jlayton@kernel.org> Reviewed-by: Christian Brauner (Microsoft) <brauner@kernel.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
parent
0d96493413
commit
2dcedb2a54
|
@ -183,13 +183,18 @@ static int copyin(void *to, const void __user *from, size_t n)
|
|||
return n;
|
||||
}
|
||||
|
||||
static inline struct pipe_buffer *pipe_buf(const struct pipe_inode_info *pipe,
|
||||
unsigned int slot)
|
||||
{
|
||||
return &pipe->bufs[slot & (pipe->ring_size - 1)];
|
||||
}
|
||||
|
||||
#ifdef PIPE_PARANOIA
|
||||
static bool sanity(const struct iov_iter *i)
|
||||
{
|
||||
struct pipe_inode_info *pipe = i->pipe;
|
||||
unsigned int p_head = pipe->head;
|
||||
unsigned int p_tail = pipe->tail;
|
||||
unsigned int p_mask = pipe->ring_size - 1;
|
||||
unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
|
||||
unsigned int i_head = i->head;
|
||||
unsigned int idx;
|
||||
|
@ -201,7 +206,7 @@ static bool sanity(const struct iov_iter *i)
|
|||
if (unlikely(i_head != p_head - 1))
|
||||
goto Bad; // must be at the last buffer...
|
||||
|
||||
p = &pipe->bufs[i_head & p_mask];
|
||||
p = pipe_buf(pipe, i_head);
|
||||
if (unlikely(p->offset + p->len != i->iov_offset))
|
||||
goto Bad; // ... at the end of segment
|
||||
} else {
|
||||
|
@ -386,11 +391,10 @@ static inline bool allocated(struct pipe_buffer *buf)
|
|||
static inline void data_start(const struct iov_iter *i,
|
||||
unsigned int *iter_headp, size_t *offp)
|
||||
{
|
||||
unsigned int p_mask = i->pipe->ring_size - 1;
|
||||
unsigned int iter_head = i->head;
|
||||
size_t off = i->iov_offset;
|
||||
|
||||
if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) ||
|
||||
if (off && (!allocated(pipe_buf(i->pipe, iter_head)) ||
|
||||
off == PAGE_SIZE)) {
|
||||
iter_head++;
|
||||
off = 0;
|
||||
|
@ -1280,10 +1284,9 @@ unsigned long iov_iter_alignment(const struct iov_iter *i)
|
|||
return iov_iter_alignment_bvec(i);
|
||||
|
||||
if (iov_iter_is_pipe(i)) {
|
||||
unsigned int p_mask = i->pipe->ring_size - 1;
|
||||
size_t size = i->count;
|
||||
|
||||
if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
|
||||
if (size && i->iov_offset && allocated(pipe_buf(i->pipe, i->head)))
|
||||
return size | i->iov_offset;
|
||||
return size;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue