fs: eventpoll: fix comments & kernel-doc notation
Use the documented kernel-doc format for function Return: descriptions. Begin constant values in kernel-doc comments with '%'. Remove kernel-doc "/**" from 2 functions that are not documented with kernel-doc notation. Fix typos, punctuation, & grammar. Also fix a few kernel-doc warnings: ../fs/eventpoll.c:1883: warning: Function parameter or member 'ep' not described in 'ep_loop_check_proc' ../fs/eventpoll.c:1883: warning: Excess function parameter 'priv' description in 'ep_loop_check_proc' ../fs/eventpoll.c:1932: warning: Function parameter or member 'ep' not described in 'ep_loop_check' ../fs/eventpoll.c:1932: warning: Excess function parameter 'from' description in 'ep_loop_check' Signed-off-by: Randy Dunlap <rdunlap@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Signed-off-by: Jonathan Corbet <corbet@lwn.net>
This commit is contained in:
parent
26bea42771
commit
a6c67fee9c
|
@ -366,8 +366,8 @@ static inline struct epitem *ep_item_from_wait(wait_queue_entry_t *p)
|
||||||
*
|
*
|
||||||
* @ep: Pointer to the eventpoll context.
|
* @ep: Pointer to the eventpoll context.
|
||||||
*
|
*
|
||||||
* Returns: Returns a value different than zero if ready events are available,
|
* Return: a value different than %zero if ready events are available,
|
||||||
* or zero otherwise.
|
* or %zero otherwise.
|
||||||
*/
|
*/
|
||||||
static inline int ep_events_available(struct eventpoll *ep)
|
static inline int ep_events_available(struct eventpoll *ep)
|
||||||
{
|
{
|
||||||
|
@ -1023,7 +1023,7 @@ struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd,
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_KCMP */
|
#endif /* CONFIG_KCMP */
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* Adds a new entry to the tail of the list in a lockless way, i.e.
|
* Adds a new entry to the tail of the list in a lockless way, i.e.
|
||||||
* multiple CPUs are allowed to call this function concurrently.
|
* multiple CPUs are allowed to call this function concurrently.
|
||||||
*
|
*
|
||||||
|
@ -1035,10 +1035,10 @@ struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd,
|
||||||
* completed.
|
* completed.
|
||||||
*
|
*
|
||||||
* Also an element can be locklessly added to the list only in one
|
* Also an element can be locklessly added to the list only in one
|
||||||
* direction i.e. either to the tail either to the head, otherwise
|
* direction i.e. either to the tail or to the head, otherwise
|
||||||
* concurrent access will corrupt the list.
|
* concurrent access will corrupt the list.
|
||||||
*
|
*
|
||||||
* Returns %false if element has been already added to the list, %true
|
* Return: %false if element has been already added to the list, %true
|
||||||
* otherwise.
|
* otherwise.
|
||||||
*/
|
*/
|
||||||
static inline bool list_add_tail_lockless(struct list_head *new,
|
static inline bool list_add_tail_lockless(struct list_head *new,
|
||||||
|
@ -1076,11 +1076,11 @@ static inline bool list_add_tail_lockless(struct list_head *new,
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* Chains a new epi entry to the tail of the ep->ovflist in a lockless way,
|
* Chains a new epi entry to the tail of the ep->ovflist in a lockless way,
|
||||||
* i.e. multiple CPUs are allowed to call this function concurrently.
|
* i.e. multiple CPUs are allowed to call this function concurrently.
|
||||||
*
|
*
|
||||||
* Returns %false if epi element has been already chained, %true otherwise.
|
* Return: %false if epi element has been already chained, %true otherwise.
|
||||||
*/
|
*/
|
||||||
static inline bool chain_epi_lockless(struct epitem *epi)
|
static inline bool chain_epi_lockless(struct epitem *epi)
|
||||||
{
|
{
|
||||||
|
@ -1105,8 +1105,8 @@ static inline bool chain_epi_lockless(struct epitem *epi)
|
||||||
* mechanism. It is called by the stored file descriptors when they
|
* mechanism. It is called by the stored file descriptors when they
|
||||||
* have events to report.
|
* have events to report.
|
||||||
*
|
*
|
||||||
* This callback takes a read lock in order not to content with concurrent
|
* This callback takes a read lock in order not to contend with concurrent
|
||||||
* events from another file descriptors, thus all modifications to ->rdllist
|
* events from another file descriptor, thus all modifications to ->rdllist
|
||||||
* or ->ovflist are lockless. Read lock is paired with the write lock from
|
* or ->ovflist are lockless. Read lock is paired with the write lock from
|
||||||
* ep_scan_ready_list(), which stops all list modifications and guarantees
|
* ep_scan_ready_list(), which stops all list modifications and guarantees
|
||||||
* that lists state is seen correctly.
|
* that lists state is seen correctly.
|
||||||
|
@ -1335,8 +1335,8 @@ static int reverse_path_check_proc(struct hlist_head *refs, int depth)
|
||||||
* paths such that we will spend all our time waking up
|
* paths such that we will spend all our time waking up
|
||||||
* eventpoll objects.
|
* eventpoll objects.
|
||||||
*
|
*
|
||||||
* Returns: Returns zero if the proposed links don't create too many paths,
|
* Return: %zero if the proposed links don't create too many paths,
|
||||||
* -1 otherwise.
|
* %-1 otherwise.
|
||||||
*/
|
*/
|
||||||
static int reverse_path_check(void)
|
static int reverse_path_check(void)
|
||||||
{
|
{
|
||||||
|
@ -1734,7 +1734,7 @@ static struct timespec64 *ep_timeout_to_timespec(struct timespec64 *to, long ms)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ep_poll - Retrieves ready events, and delivers them to the caller supplied
|
* ep_poll - Retrieves ready events, and delivers them to the caller-supplied
|
||||||
* event buffer.
|
* event buffer.
|
||||||
*
|
*
|
||||||
* @ep: Pointer to the eventpoll context.
|
* @ep: Pointer to the eventpoll context.
|
||||||
|
@ -1747,7 +1747,7 @@ static struct timespec64 *ep_timeout_to_timespec(struct timespec64 *to, long ms)
|
||||||
* until at least one event has been retrieved (or an error
|
* until at least one event has been retrieved (or an error
|
||||||
* occurred).
|
* occurred).
|
||||||
*
|
*
|
||||||
* Returns: Returns the number of ready events which have been fetched, or an
|
* Return: the number of ready events which have been fetched, or an
|
||||||
* error code, in case of error.
|
* error code, in case of error.
|
||||||
*/
|
*/
|
||||||
static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
|
static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
|
||||||
|
@ -1774,9 +1774,9 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This call is racy: We may or may not see events that are being added
|
* This call is racy: We may or may not see events that are being added
|
||||||
* to the ready list under the lock (e.g., in IRQ callbacks). For, cases
|
* to the ready list under the lock (e.g., in IRQ callbacks). For cases
|
||||||
* with a non-zero timeout, this thread will check the ready list under
|
* with a non-zero timeout, this thread will check the ready list under
|
||||||
* lock and will added to the wait queue. For, cases with a zero
|
* lock and will add to the wait queue. For cases with a zero
|
||||||
* timeout, the user by definition should not care and will have to
|
* timeout, the user by definition should not care and will have to
|
||||||
* recheck again.
|
* recheck again.
|
||||||
*/
|
*/
|
||||||
|
@ -1869,15 +1869,15 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ep_loop_check_proc - verify that adding an epoll file inside another
|
* ep_loop_check_proc - verify that adding an epoll file inside another
|
||||||
* epoll structure, does not violate the constraints, in
|
* epoll structure does not violate the constraints, in
|
||||||
* terms of closed loops, or too deep chains (which can
|
* terms of closed loops, or too deep chains (which can
|
||||||
* result in excessive stack usage).
|
* result in excessive stack usage).
|
||||||
*
|
*
|
||||||
* @priv: Pointer to the epoll file to be currently checked.
|
* @ep: the &struct eventpoll to be currently checked.
|
||||||
* @depth: Current depth of the path being checked.
|
* @depth: Current depth of the path being checked.
|
||||||
*
|
*
|
||||||
* Returns: Returns zero if adding the epoll @file inside current epoll
|
* Return: %zero if adding the epoll @file inside current epoll
|
||||||
* structure @ep does not violate the constraints, or -1 otherwise.
|
* structure @ep does not violate the constraints, or %-1 otherwise.
|
||||||
*/
|
*/
|
||||||
static int ep_loop_check_proc(struct eventpoll *ep, int depth)
|
static int ep_loop_check_proc(struct eventpoll *ep, int depth)
|
||||||
{
|
{
|
||||||
|
@ -1919,14 +1919,14 @@ static int ep_loop_check_proc(struct eventpoll *ep, int depth)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ep_loop_check - Performs a check to verify that adding an epoll file (@to)
|
* ep_loop_check - Performs a check to verify that adding an epoll file (@to)
|
||||||
* into another epoll file (represented by @from) does not create
|
* into another epoll file (represented by @ep) does not create
|
||||||
* closed loops or too deep chains.
|
* closed loops or too deep chains.
|
||||||
*
|
*
|
||||||
* @from: Pointer to the epoll we are inserting into.
|
* @ep: Pointer to the epoll we are inserting into.
|
||||||
* @to: Pointer to the epoll to be inserted.
|
* @to: Pointer to the epoll to be inserted.
|
||||||
*
|
*
|
||||||
* Returns: Returns zero if adding the epoll @to inside the epoll @from
|
* Return: %zero if adding the epoll @to inside the epoll @from
|
||||||
* does not violate the constraints, or -1 otherwise.
|
* does not violate the constraints, or %-1 otherwise.
|
||||||
*/
|
*/
|
||||||
static int ep_loop_check(struct eventpoll *ep, struct eventpoll *to)
|
static int ep_loop_check(struct eventpoll *ep, struct eventpoll *to)
|
||||||
{
|
{
|
||||||
|
@ -2074,8 +2074,8 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
|
||||||
ep = f.file->private_data;
|
ep = f.file->private_data;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When we insert an epoll file descriptor, inside another epoll file
|
* When we insert an epoll file descriptor inside another epoll file
|
||||||
* descriptor, there is the change of creating closed loops, which are
|
* descriptor, there is the chance of creating closed loops, which are
|
||||||
* better be handled here, than in more critical paths. While we are
|
* better be handled here, than in more critical paths. While we are
|
||||||
* checking for loops we also determine the list of files reachable
|
* checking for loops we also determine the list of files reachable
|
||||||
* and hang them on the tfile_check_list, so we can check that we
|
* and hang them on the tfile_check_list, so we can check that we
|
||||||
|
@ -2113,7 +2113,7 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Try to lookup the file inside our RB tree, Since we grabbed "mtx"
|
* Try to lookup the file inside our RB tree. Since we grabbed "mtx"
|
||||||
* above, we can be sure to be able to use the item looked up by
|
* above, we can be sure to be able to use the item looked up by
|
||||||
* ep_find() till we release the mutex.
|
* ep_find() till we release the mutex.
|
||||||
*/
|
*/
|
||||||
|
|
Loading…
Reference in New Issue