2005-04-17 06:20:36 +08:00
|
|
|
#ifndef _LINUX_PIPE_FS_I_H
|
|
|
|
#define _LINUX_PIPE_FS_I_H
|
|
|
|
|
|
|
|
#define PIPEFS_MAGIC 0x50495045
|
|
|
|
|
|
|
|
#define PIPE_BUFFERS (16)
|
|
|
|
|
2006-05-03 16:35:26 +08:00
|
|
|
#define PIPE_BUF_FLAG_LRU 0x01 /* page is on the LRU */
|
|
|
|
#define PIPE_BUF_FLAG_ATOMIC 0x02 /* was atomically mapped */
|
|
|
|
#define PIPE_BUF_FLAG_GIFT 0x04 /* page is a gift */
|
2006-04-03 05:11:04 +08:00
|
|
|
|
2007-06-13 02:51:32 +08:00
|
|
|
/**
|
|
|
|
* struct pipe_buffer - a linux kernel pipe buffer
|
|
|
|
* @page: the page containing the data for the pipe buffer
|
|
|
|
* @offset: offset of data inside the @page
|
|
|
|
* @len: length of data inside the @page
|
|
|
|
* @ops: operations associated with this buffer. See @pipe_buf_operations.
|
|
|
|
* @flags: pipe buffer flags. See above.
|
|
|
|
* @private: private data owned by the ops.
|
|
|
|
**/
|
2005-04-17 06:20:36 +08:00
|
|
|
struct pipe_buffer {
|
|
|
|
struct page *page;
|
|
|
|
unsigned int offset, len;
|
2006-12-13 16:34:04 +08:00
|
|
|
const struct pipe_buf_operations *ops;
|
2006-04-03 05:11:04 +08:00
|
|
|
unsigned int flags;
|
2007-06-11 18:00:45 +08:00
|
|
|
unsigned long private;
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2007-06-13 02:51:32 +08:00
|
|
|
/**
|
|
|
|
* struct pipe_inode_info - a linux kernel pipe
|
|
|
|
* @wait: reader/writer wait point in case of empty/full pipe
|
|
|
|
* @nrbufs: the number of non-empty pipe buffers in this pipe
|
|
|
|
* @curbuf: the current pipe buffer entry
|
|
|
|
* @tmp_page: cached released page
|
|
|
|
* @readers: number of current readers of this pipe
|
|
|
|
* @writers: number of current writers of this pipe
|
|
|
|
* @waiting_writers: number of writers blocked waiting for room
|
|
|
|
* @r_counter: reader counter
|
|
|
|
* @w_counter: writer counter
|
|
|
|
* @fasync_readers: reader side fasync
|
|
|
|
* @fasync_writers: writer side fasync
|
|
|
|
* @inode: inode this pipe is attached to
|
|
|
|
* @bufs: the circular array of pipe buffers
|
|
|
|
**/
|
2007-06-04 21:03:12 +08:00
|
|
|
struct pipe_inode_info {
|
|
|
|
wait_queue_head_t wait;
|
|
|
|
unsigned int nrbufs, curbuf;
|
|
|
|
struct page *tmp_page;
|
|
|
|
unsigned int readers;
|
|
|
|
unsigned int writers;
|
|
|
|
unsigned int waiting_writers;
|
|
|
|
unsigned int r_counter;
|
|
|
|
unsigned int w_counter;
|
|
|
|
struct fasync_struct *fasync_readers;
|
|
|
|
struct fasync_struct *fasync_writers;
|
|
|
|
struct inode *inode;
|
|
|
|
struct pipe_buffer bufs[PIPE_BUFFERS];
|
|
|
|
};
|
|
|
|
|
2006-05-02 01:59:03 +08:00
|
|
|
/*
|
|
|
|
* Note on the nesting of these functions:
|
|
|
|
*
|
2007-06-14 19:10:48 +08:00
|
|
|
* ->confirm()
|
2006-05-02 01:59:03 +08:00
|
|
|
* ->steal()
|
|
|
|
* ...
|
|
|
|
* ->map()
|
|
|
|
* ...
|
|
|
|
* ->unmap()
|
|
|
|
*
|
2007-06-14 19:10:48 +08:00
|
|
|
* That is, ->map() must be called on a confirmed buffer,
|
2007-06-13 02:51:32 +08:00
|
|
|
* same goes for ->steal(). See below for the meaning of each
|
|
|
|
* operation. Also see kerneldoc in fs/pipe.c for the pipe
|
|
|
|
* and generic variants of these hooks.
|
2006-05-02 01:59:03 +08:00
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
struct pipe_buf_operations {
|
2007-06-13 02:51:32 +08:00
|
|
|
/*
|
|
|
|
* This is set to 1, if the generic pipe read/write may coalesce
|
|
|
|
* data into an existing buffer. If this is set to 0, a new pipe
|
|
|
|
* page segment is always used for new data.
|
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
int can_merge;
|
2007-06-13 02:51:32 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* ->map() returns a virtual address mapping of the pipe buffer.
|
|
|
|
* The last integer flag reflects whether this should be an atomic
|
|
|
|
* mapping or not. The atomic map is faster, however you can't take
|
|
|
|
* page faults before calling ->unmap() again. So if you need to eg
|
|
|
|
* access user data through copy_to/from_user(), then you must get
|
|
|
|
* a non-atomic map. ->map() uses the KM_USER0 atomic slot for
|
|
|
|
* atomic maps, so you can't map more than one pipe_buffer at once
|
|
|
|
* and you have to be careful if mapping another page as source
|
|
|
|
* or destination for a copy (IOW, it has to use something else
|
|
|
|
* than KM_USER0).
|
|
|
|
*/
|
2006-05-02 02:02:05 +08:00
|
|
|
void * (*map)(struct pipe_inode_info *, struct pipe_buffer *, int);
|
2007-06-13 02:51:32 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Undoes ->map(), finishes the virtual mapping of the pipe buffer.
|
|
|
|
*/
|
2006-05-02 02:02:05 +08:00
|
|
|
void (*unmap)(struct pipe_inode_info *, struct pipe_buffer *, void *);
|
2007-06-13 02:51:32 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* ->confirm() verifies that the data in the pipe buffer is there
|
|
|
|
* and that the contents are good. If the pages in the pipe belong
|
|
|
|
* to a file system, we may need to wait for IO completion in this
|
|
|
|
* hook. Returns 0 for good, or a negative error value in case of
|
|
|
|
* error.
|
|
|
|
*/
|
2007-06-14 19:10:48 +08:00
|
|
|
int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *);
|
2007-06-13 02:51:32 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* When the contents of this pipe buffer has been completely
|
|
|
|
* consumed by a reader, ->release() is called.
|
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
void (*release)(struct pipe_inode_info *, struct pipe_buffer *);
|
2007-06-13 02:51:32 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Attempt to take ownership of the pipe buffer and its contents.
|
|
|
|
* ->steal() returns 0 for success, in which case the contents
|
|
|
|
* of the pipe (the buf->page) is locked and now completely owned
|
|
|
|
* by the caller. The page may then be transferred to a different
|
|
|
|
* mapping, the most often used case is insertion into different
|
|
|
|
* file address space cache.
|
|
|
|
*/
|
2006-03-30 21:16:46 +08:00
|
|
|
int (*steal)(struct pipe_inode_info *, struct pipe_buffer *);
|
2007-06-13 02:51:32 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Get a reference to the pipe buffer.
|
|
|
|
*/
|
2006-04-11 21:51:17 +08:00
|
|
|
void (*get)(struct pipe_inode_info *, struct pipe_buffer *);
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual
|
|
|
|
memory allocation, whereas PIPE_BUF makes atomicity guarantees. */
|
|
|
|
#define PIPE_SIZE PAGE_SIZE
|
|
|
|
|
2009-04-15 01:48:41 +08:00
|
|
|
/* Pipe lock and unlock operations */
|
|
|
|
void pipe_lock(struct pipe_inode_info *);
|
|
|
|
void pipe_unlock(struct pipe_inode_info *);
|
|
|
|
void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Drop the inode semaphore and wait for a pipe event, atomically */
|
2006-04-10 21:18:35 +08:00
|
|
|
void pipe_wait(struct pipe_inode_info *pipe);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-04-10 21:18:35 +08:00
|
|
|
struct pipe_inode_info * alloc_pipe_info(struct inode * inode);
|
|
|
|
void free_pipe_info(struct inode * inode);
|
2006-04-11 19:52:07 +08:00
|
|
|
void __free_pipe_info(struct pipe_inode_info *);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-05-02 01:59:03 +08:00
|
|
|
/* Generic pipe buffer ops functions */
|
2006-05-02 02:02:05 +08:00
|
|
|
void *generic_pipe_buf_map(struct pipe_inode_info *, struct pipe_buffer *, int);
|
|
|
|
void generic_pipe_buf_unmap(struct pipe_inode_info *, struct pipe_buffer *, void *);
|
2006-05-02 01:59:03 +08:00
|
|
|
void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
|
2007-06-14 19:10:48 +08:00
|
|
|
int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
|
2006-05-02 21:29:57 +08:00
|
|
|
int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
|
2009-05-07 21:37:36 +08:00
|
|
|
void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
|
2006-05-02 01:59:03 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|