[PATCH] Numerous fixes to kernel-doc info in source files.
A variety of (mostly) innocuous fixes to the embedded kernel-doc content in source files, including: * make multi-line initial descriptions single line * denote some function names, constants and structs as such * change erroneous opening '/*' to '/**' in a few places * reword some text for clarity Signed-off-by: Robert P. J. Day <rpjday@mindspring.com> Cc: "Randy.Dunlap" <rdunlap@xenotime.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
262086cf5b
commit
72fd4a35a8
|
@ -211,12 +211,12 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
|
||||||
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* atomic_add_unless - add unless the number is a given value
|
* atomic_add_unless - add unless the number is already a given value
|
||||||
* @v: pointer of type atomic_t
|
* @v: pointer of type atomic_t
|
||||||
* @a: the amount to add to v...
|
* @a: the amount to add to v...
|
||||||
* @u: ...unless v is equal to u.
|
* @u: ...unless v is equal to u.
|
||||||
*
|
*
|
||||||
* Atomically adds @a to @v, so long as it was not @u.
|
* Atomically adds @a to @v, so long as @v was not already @u.
|
||||||
* Returns non-zero if @v was not @u, and zero otherwise.
|
* Returns non-zero if @v was not @u, and zero otherwise.
|
||||||
*/
|
*/
|
||||||
#define atomic_add_unless(v, a, u) \
|
#define atomic_add_unless(v, a, u) \
|
||||||
|
|
|
@ -371,7 +371,7 @@ static inline unsigned long ffz(unsigned long word)
|
||||||
*
|
*
|
||||||
* This is defined the same way as
|
* This is defined the same way as
|
||||||
* the libc and compiler builtin ffs routines, therefore
|
* the libc and compiler builtin ffs routines, therefore
|
||||||
* differs in spirit from the above ffz (man ffs).
|
* differs in spirit from the above ffz() (man ffs).
|
||||||
*/
|
*/
|
||||||
static inline int ffs(int x)
|
static inline int ffs(int x)
|
||||||
{
|
{
|
||||||
|
@ -388,7 +388,7 @@ static inline int ffs(int x)
|
||||||
* fls - find last bit set
|
* fls - find last bit set
|
||||||
* @x: the word to search
|
* @x: the word to search
|
||||||
*
|
*
|
||||||
* This is defined the same way as ffs.
|
* This is defined the same way as ffs().
|
||||||
*/
|
*/
|
||||||
static inline int fls(int x)
|
static inline int fls(int x)
|
||||||
{
|
{
|
||||||
|
|
|
@ -172,7 +172,7 @@ void __init parse_early_param(void);
|
||||||
* module_init() - driver initialization entry point
|
* module_init() - driver initialization entry point
|
||||||
* @x: function to be run at kernel boot time or module insertion
|
* @x: function to be run at kernel boot time or module insertion
|
||||||
*
|
*
|
||||||
* module_init() will either be called during do_initcalls (if
|
* module_init() will either be called during do_initcalls() (if
|
||||||
* builtin) or at module insertion time (if a module). There can only
|
* builtin) or at module insertion time (if a module). There can only
|
||||||
* be one per module.
|
* be one per module.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -74,7 +74,7 @@ static inline void kfifo_reset(struct kfifo *fifo)
|
||||||
* @buffer: the data to be added.
|
* @buffer: the data to be added.
|
||||||
* @len: the length of the data to be added.
|
* @len: the length of the data to be added.
|
||||||
*
|
*
|
||||||
* This function copies at most 'len' bytes from the 'buffer' into
|
* This function copies at most @len bytes from the @buffer into
|
||||||
* the FIFO depending on the free space, and returns the number of
|
* the FIFO depending on the free space, and returns the number of
|
||||||
* bytes copied.
|
* bytes copied.
|
||||||
*/
|
*/
|
||||||
|
@ -99,8 +99,8 @@ static inline unsigned int kfifo_put(struct kfifo *fifo,
|
||||||
* @buffer: where the data must be copied.
|
* @buffer: where the data must be copied.
|
||||||
* @len: the size of the destination buffer.
|
* @len: the size of the destination buffer.
|
||||||
*
|
*
|
||||||
* This function copies at most 'len' bytes from the FIFO into the
|
* This function copies at most @len bytes from the FIFO into the
|
||||||
* 'buffer' and returns the number of copied bytes.
|
* @buffer and returns the number of copied bytes.
|
||||||
*/
|
*/
|
||||||
static inline unsigned int kfifo_get(struct kfifo *fifo,
|
static inline unsigned int kfifo_get(struct kfifo *fifo,
|
||||||
unsigned char *buffer, unsigned int len)
|
unsigned char *buffer, unsigned int len)
|
||||||
|
|
|
@ -163,7 +163,7 @@ static inline ktime_t ktime_sub(const ktime_t lhs, const ktime_t rhs)
|
||||||
* @add1: addend1
|
* @add1: addend1
|
||||||
* @add2: addend2
|
* @add2: addend2
|
||||||
*
|
*
|
||||||
* Returns the sum of addend1 and addend2
|
* Returns the sum of @add1 and @add2.
|
||||||
*/
|
*/
|
||||||
static inline ktime_t ktime_add(const ktime_t add1, const ktime_t add2)
|
static inline ktime_t ktime_add(const ktime_t add1, const ktime_t add2)
|
||||||
{
|
{
|
||||||
|
@ -189,7 +189,7 @@ static inline ktime_t ktime_add(const ktime_t add1, const ktime_t add2)
|
||||||
* @kt: addend
|
* @kt: addend
|
||||||
* @nsec: the scalar nsec value to add
|
* @nsec: the scalar nsec value to add
|
||||||
*
|
*
|
||||||
* Returns the sum of kt and nsec in ktime_t format
|
* Returns the sum of @kt and @nsec in ktime_t format
|
||||||
*/
|
*/
|
||||||
extern ktime_t ktime_add_ns(const ktime_t kt, u64 nsec);
|
extern ktime_t ktime_add_ns(const ktime_t kt, u64 nsec);
|
||||||
|
|
||||||
|
@ -246,7 +246,7 @@ static inline struct timeval ktime_to_timeval(const ktime_t kt)
|
||||||
* ktime_to_ns - convert a ktime_t variable to scalar nanoseconds
|
* ktime_to_ns - convert a ktime_t variable to scalar nanoseconds
|
||||||
* @kt: the ktime_t variable to convert
|
* @kt: the ktime_t variable to convert
|
||||||
*
|
*
|
||||||
* Returns the scalar nanoseconds representation of kt
|
* Returns the scalar nanoseconds representation of @kt
|
||||||
*/
|
*/
|
||||||
static inline s64 ktime_to_ns(const ktime_t kt)
|
static inline s64 ktime_to_ns(const ktime_t kt)
|
||||||
{
|
{
|
||||||
|
|
|
@ -161,7 +161,7 @@ static inline void __list_del(struct list_head * prev, struct list_head * next)
|
||||||
/**
|
/**
|
||||||
* list_del - deletes entry from list.
|
* list_del - deletes entry from list.
|
||||||
* @entry: the element to delete from the list.
|
* @entry: the element to delete from the list.
|
||||||
* Note: list_empty on entry does not return true after this, the entry is
|
* Note: list_empty() on entry does not return true after this, the entry is
|
||||||
* in an undefined state.
|
* in an undefined state.
|
||||||
*/
|
*/
|
||||||
#ifndef CONFIG_DEBUG_LIST
|
#ifndef CONFIG_DEBUG_LIST
|
||||||
|
@ -179,7 +179,7 @@ extern void list_del(struct list_head *entry);
|
||||||
* list_del_rcu - deletes entry from list without re-initialization
|
* list_del_rcu - deletes entry from list without re-initialization
|
||||||
* @entry: the element to delete from the list.
|
* @entry: the element to delete from the list.
|
||||||
*
|
*
|
||||||
* Note: list_empty on entry does not return true after this,
|
* Note: list_empty() on entry does not return true after this,
|
||||||
* the entry is in an undefined state. It is useful for RCU based
|
* the entry is in an undefined state. It is useful for RCU based
|
||||||
* lockfree traversal.
|
* lockfree traversal.
|
||||||
*
|
*
|
||||||
|
@ -209,7 +209,8 @@ static inline void list_del_rcu(struct list_head *entry)
|
||||||
* list_replace - replace old entry by new one
|
* list_replace - replace old entry by new one
|
||||||
* @old : the element to be replaced
|
* @old : the element to be replaced
|
||||||
* @new : the new element to insert
|
* @new : the new element to insert
|
||||||
* Note: if 'old' was empty, it will be overwritten.
|
*
|
||||||
|
* If @old was empty, it will be overwritten.
|
||||||
*/
|
*/
|
||||||
static inline void list_replace(struct list_head *old,
|
static inline void list_replace(struct list_head *old,
|
||||||
struct list_head *new)
|
struct list_head *new)
|
||||||
|
@ -488,12 +489,12 @@ static inline void list_splice_init_rcu(struct list_head *list,
|
||||||
pos = list_entry(pos->member.prev, typeof(*pos), member))
|
pos = list_entry(pos->member.prev, typeof(*pos), member))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue
|
* list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
|
||||||
* @pos: the type * to use as a start point
|
* @pos: the type * to use as a start point
|
||||||
* @head: the head of the list
|
* @head: the head of the list
|
||||||
* @member: the name of the list_struct within the struct.
|
* @member: the name of the list_struct within the struct.
|
||||||
*
|
*
|
||||||
* Prepares a pos entry for use as a start point in list_for_each_entry_continue.
|
* Prepares a pos entry for use as a start point in list_for_each_entry_continue().
|
||||||
*/
|
*/
|
||||||
#define list_prepare_entry(pos, head, member) \
|
#define list_prepare_entry(pos, head, member) \
|
||||||
((pos) ? : list_entry(head, typeof(*pos), member))
|
((pos) ? : list_entry(head, typeof(*pos), member))
|
||||||
|
|
21
ipc/util.c
21
ipc/util.c
|
@ -150,7 +150,7 @@ void free_ipc_ns(struct kref *kref)
|
||||||
* ipc_init - initialise IPC subsystem
|
* ipc_init - initialise IPC subsystem
|
||||||
*
|
*
|
||||||
* The various system5 IPC resources (semaphores, messages and shared
|
* The various system5 IPC resources (semaphores, messages and shared
|
||||||
* memory are initialised
|
* memory) are initialised
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static int __init ipc_init(void)
|
static int __init ipc_init(void)
|
||||||
|
@ -207,8 +207,7 @@ void __ipc_init ipc_init_ids(struct ipc_ids* ids, int size)
|
||||||
#ifdef CONFIG_PROC_FS
|
#ifdef CONFIG_PROC_FS
|
||||||
static struct file_operations sysvipc_proc_fops;
|
static struct file_operations sysvipc_proc_fops;
|
||||||
/**
|
/**
|
||||||
* ipc_init_proc_interface - Create a proc interface for sysipc types
|
* ipc_init_proc_interface - Create a proc interface for sysipc types using a seq_file interface.
|
||||||
* using a seq_file interface.
|
|
||||||
* @path: Path in procfs
|
* @path: Path in procfs
|
||||||
* @header: Banner to be printed at the beginning of the file.
|
* @header: Banner to be printed at the beginning of the file.
|
||||||
* @ids: ipc id table to iterate.
|
* @ids: ipc id table to iterate.
|
||||||
|
@ -417,7 +416,7 @@ void* ipc_alloc(int size)
|
||||||
* @ptr: pointer returned by ipc_alloc
|
* @ptr: pointer returned by ipc_alloc
|
||||||
* @size: size of block
|
* @size: size of block
|
||||||
*
|
*
|
||||||
* Free a block created with ipc_alloc. The caller must know the size
|
* Free a block created with ipc_alloc(). The caller must know the size
|
||||||
* used in the allocation call.
|
* used in the allocation call.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -524,7 +523,7 @@ static void ipc_do_vfree(struct work_struct *work)
|
||||||
* @head: RCU callback structure for queued work
|
* @head: RCU callback structure for queued work
|
||||||
*
|
*
|
||||||
* Since RCU callback function is called in bh,
|
* Since RCU callback function is called in bh,
|
||||||
* we need to defer the vfree to schedule_work
|
* we need to defer the vfree to schedule_work().
|
||||||
*/
|
*/
|
||||||
static void ipc_schedule_free(struct rcu_head *head)
|
static void ipc_schedule_free(struct rcu_head *head)
|
||||||
{
|
{
|
||||||
|
@ -541,7 +540,7 @@ static void ipc_schedule_free(struct rcu_head *head)
|
||||||
* ipc_immediate_free - free ipc + rcu space
|
* ipc_immediate_free - free ipc + rcu space
|
||||||
* @head: RCU callback structure that contains pointer to be freed
|
* @head: RCU callback structure that contains pointer to be freed
|
||||||
*
|
*
|
||||||
* Free from the RCU callback context
|
* Free from the RCU callback context.
|
||||||
*/
|
*/
|
||||||
static void ipc_immediate_free(struct rcu_head *head)
|
static void ipc_immediate_free(struct rcu_head *head)
|
||||||
{
|
{
|
||||||
|
@ -603,8 +602,8 @@ int ipcperms (struct kern_ipc_perm *ipcp, short flag)
|
||||||
* @in: kernel permissions
|
* @in: kernel permissions
|
||||||
* @out: new style IPC permissions
|
* @out: new style IPC permissions
|
||||||
*
|
*
|
||||||
* Turn the kernel object 'in' into a set of permissions descriptions
|
* Turn the kernel object @in into a set of permissions descriptions
|
||||||
* for returning to userspace (out).
|
* for returning to userspace (@out).
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
@ -624,8 +623,8 @@ void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out)
|
||||||
* @in: new style IPC permissions
|
* @in: new style IPC permissions
|
||||||
* @out: old style IPC permissions
|
* @out: old style IPC permissions
|
||||||
*
|
*
|
||||||
* Turn the new style permissions object in into a compatibility
|
* Turn the new style permissions object @in into a compatibility
|
||||||
* object and store it into the 'out' pointer.
|
* object and store it into the @out pointer.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out)
|
void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out)
|
||||||
|
@ -722,7 +721,7 @@ int ipc_checkid(struct ipc_ids* ids, struct kern_ipc_perm* ipcp, int uid)
|
||||||
* @cmd: pointer to command
|
* @cmd: pointer to command
|
||||||
*
|
*
|
||||||
* Return IPC_64 for new style IPC and IPC_OLD for old style IPC.
|
* Return IPC_64 for new style IPC and IPC_OLD for old style IPC.
|
||||||
* The cmd value is turned from an encoding command and version into
|
* The @cmd value is turned from an encoding command and version into
|
||||||
* just the command code.
|
* just the command code.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
|
@ -257,8 +257,7 @@ static int has_stopped_jobs(int pgrp)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* reparent_to_init - Reparent the calling kernel thread to the init task
|
* reparent_to_init - Reparent the calling kernel thread to the init task of the pid space that the thread belongs to.
|
||||||
* of the pid space that the thread belongs to.
|
|
||||||
*
|
*
|
||||||
* If a kernel thread is launched as a result of a system call, or if
|
* If a kernel thread is launched as a result of a system call, or if
|
||||||
* it ever exits, it should generally reparent itself to init so that
|
* it ever exits, it should generally reparent itself to init so that
|
||||||
|
|
|
@ -102,7 +102,7 @@ static DEFINE_PER_CPU(struct hrtimer_base, hrtimer_bases[MAX_HRTIMER_BASES]) =
|
||||||
*
|
*
|
||||||
* The function calculates the monotonic clock from the realtime
|
* The function calculates the monotonic clock from the realtime
|
||||||
* clock and the wall_to_monotonic offset and stores the result
|
* clock and the wall_to_monotonic offset and stores the result
|
||||||
* in normalized timespec format in the variable pointed to by ts.
|
* in normalized timespec format in the variable pointed to by @ts.
|
||||||
*/
|
*/
|
||||||
void ktime_get_ts(struct timespec *ts)
|
void ktime_get_ts(struct timespec *ts)
|
||||||
{
|
{
|
||||||
|
@ -583,8 +583,8 @@ EXPORT_SYMBOL_GPL(hrtimer_init);
|
||||||
* @which_clock: which clock to query
|
* @which_clock: which clock to query
|
||||||
* @tp: pointer to timespec variable to store the resolution
|
* @tp: pointer to timespec variable to store the resolution
|
||||||
*
|
*
|
||||||
* Store the resolution of the clock selected by which_clock in the
|
* Store the resolution of the clock selected by @which_clock in the
|
||||||
* variable pointed to by tp.
|
* variable pointed to by @tp.
|
||||||
*/
|
*/
|
||||||
int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
|
int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
|
||||||
{
|
{
|
||||||
|
|
|
@ -32,8 +32,8 @@
|
||||||
* @gfp_mask: get_free_pages mask, passed to kmalloc()
|
* @gfp_mask: get_free_pages mask, passed to kmalloc()
|
||||||
* @lock: the lock to be used to protect the fifo buffer
|
* @lock: the lock to be used to protect the fifo buffer
|
||||||
*
|
*
|
||||||
* Do NOT pass the kfifo to kfifo_free() after use ! Simply free the
|
* Do NOT pass the kfifo to kfifo_free() after use! Simply free the
|
||||||
* struct kfifo with kfree().
|
* &struct kfifo with kfree().
|
||||||
*/
|
*/
|
||||||
struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size,
|
struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size,
|
||||||
gfp_t gfp_mask, spinlock_t *lock)
|
gfp_t gfp_mask, spinlock_t *lock)
|
||||||
|
@ -108,7 +108,7 @@ EXPORT_SYMBOL(kfifo_free);
|
||||||
* @buffer: the data to be added.
|
* @buffer: the data to be added.
|
||||||
* @len: the length of the data to be added.
|
* @len: the length of the data to be added.
|
||||||
*
|
*
|
||||||
* This function copies at most 'len' bytes from the 'buffer' into
|
* This function copies at most @len bytes from the @buffer into
|
||||||
* the FIFO depending on the free space, and returns the number of
|
* the FIFO depending on the free space, and returns the number of
|
||||||
* bytes copied.
|
* bytes copied.
|
||||||
*
|
*
|
||||||
|
@ -155,8 +155,8 @@ EXPORT_SYMBOL(__kfifo_put);
|
||||||
* @buffer: where the data must be copied.
|
* @buffer: where the data must be copied.
|
||||||
* @len: the size of the destination buffer.
|
* @len: the size of the destination buffer.
|
||||||
*
|
*
|
||||||
* This function copies at most 'len' bytes from the FIFO into the
|
* This function copies at most @len bytes from the FIFO into the
|
||||||
* 'buffer' and returns the number of copied bytes.
|
* @buffer and returns the number of copied bytes.
|
||||||
*
|
*
|
||||||
* Note that with only one concurrent reader and one concurrent
|
* Note that with only one concurrent reader and one concurrent
|
||||||
* writer, you don't need extra locking to use these functions.
|
* writer, you don't need extra locking to use these functions.
|
||||||
|
|
|
@ -50,7 +50,7 @@ static struct kthread_stop_info kthread_stop_info;
|
||||||
/**
|
/**
|
||||||
* kthread_should_stop - should this kthread return now?
|
* kthread_should_stop - should this kthread return now?
|
||||||
*
|
*
|
||||||
* When someone calls kthread_stop on your kthread, it will be woken
|
* When someone calls kthread_stop() on your kthread, it will be woken
|
||||||
* and this will return true. You should then return, and your return
|
* and this will return true. You should then return, and your return
|
||||||
* value will be passed through to kthread_stop().
|
* value will be passed through to kthread_stop().
|
||||||
*/
|
*/
|
||||||
|
@ -143,7 +143,7 @@ static void keventd_create_kthread(struct work_struct *work)
|
||||||
* it. See also kthread_run(), kthread_create_on_cpu().
|
* it. See also kthread_run(), kthread_create_on_cpu().
|
||||||
*
|
*
|
||||||
* When woken, the thread will run @threadfn() with @data as its
|
* When woken, the thread will run @threadfn() with @data as its
|
||||||
* argument. @threadfn can either call do_exit() directly if it is a
|
* argument. @threadfn() can either call do_exit() directly if it is a
|
||||||
* standalone thread for which noone will call kthread_stop(), or
|
* standalone thread for which noone will call kthread_stop(), or
|
||||||
* return when 'kthread_should_stop()' is true (which means
|
* return when 'kthread_should_stop()' is true (which means
|
||||||
* kthread_stop() has been called). The return value should be zero
|
* kthread_stop() has been called). The return value should be zero
|
||||||
|
@ -192,7 +192,7 @@ EXPORT_SYMBOL(kthread_create);
|
||||||
*
|
*
|
||||||
* Description: This function is equivalent to set_cpus_allowed(),
|
* Description: This function is equivalent to set_cpus_allowed(),
|
||||||
* except that @cpu doesn't need to be online, and the thread must be
|
* except that @cpu doesn't need to be online, and the thread must be
|
||||||
* stopped (i.e., just returned from kthread_create().
|
* stopped (i.e., just returned from kthread_create()).
|
||||||
*/
|
*/
|
||||||
void kthread_bind(struct task_struct *k, unsigned int cpu)
|
void kthread_bind(struct task_struct *k, unsigned int cpu)
|
||||||
{
|
{
|
||||||
|
|
|
@ -483,7 +483,7 @@ static int have_callable_console(void)
|
||||||
* printk - print a kernel message
|
* printk - print a kernel message
|
||||||
* @fmt: format string
|
* @fmt: format string
|
||||||
*
|
*
|
||||||
* This is printk. It can be called from any context. We want it to work.
|
* This is printk(). It can be called from any context. We want it to work.
|
||||||
*
|
*
|
||||||
* We try to grab the console_sem. If we succeed, it's easy - we log the output and
|
* We try to grab the console_sem. If we succeed, it's easy - we log the output and
|
||||||
* call the console drivers. If we fail to get the semaphore we place the output
|
* call the console drivers. If we fail to get the semaphore we place the output
|
||||||
|
|
|
@ -328,7 +328,7 @@ static void wakeup_readers(struct work_struct *work)
|
||||||
* @buf: the channel buffer
|
* @buf: the channel buffer
|
||||||
* @init: 1 if this is a first-time initialization
|
* @init: 1 if this is a first-time initialization
|
||||||
*
|
*
|
||||||
* See relay_reset for description of effect.
|
* See relay_reset() for description of effect.
|
||||||
*/
|
*/
|
||||||
static void __relay_reset(struct rchan_buf *buf, unsigned int init)
|
static void __relay_reset(struct rchan_buf *buf, unsigned int init)
|
||||||
{
|
{
|
||||||
|
@ -364,7 +364,7 @@ static void __relay_reset(struct rchan_buf *buf, unsigned int init)
|
||||||
* and restarting the channel in its initial state. The buffers
|
* and restarting the channel in its initial state. The buffers
|
||||||
* are not freed, so any mappings are still in effect.
|
* are not freed, so any mappings are still in effect.
|
||||||
*
|
*
|
||||||
* NOTE: Care should be taken that the channel isn't actually
|
* NOTE. Care should be taken that the channel isn't actually
|
||||||
* being used by anything when this call is made.
|
* being used by anything when this call is made.
|
||||||
*/
|
*/
|
||||||
void relay_reset(struct rchan *chan)
|
void relay_reset(struct rchan *chan)
|
||||||
|
@ -528,7 +528,7 @@ static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb,
|
||||||
* Creates a channel buffer for each cpu using the sizes and
|
* Creates a channel buffer for each cpu using the sizes and
|
||||||
* attributes specified. The created channel buffer files
|
* attributes specified. The created channel buffer files
|
||||||
* will be named base_filename0...base_filenameN-1. File
|
* will be named base_filename0...base_filenameN-1. File
|
||||||
* permissions will be S_IRUSR.
|
* permissions will be %S_IRUSR.
|
||||||
*/
|
*/
|
||||||
struct rchan *relay_open(const char *base_filename,
|
struct rchan *relay_open(const char *base_filename,
|
||||||
struct dentry *parent,
|
struct dentry *parent,
|
||||||
|
@ -648,7 +648,7 @@ EXPORT_SYMBOL_GPL(relay_switch_subbuf);
|
||||||
* subbufs_consumed should be the number of sub-buffers newly consumed,
|
* subbufs_consumed should be the number of sub-buffers newly consumed,
|
||||||
* not the total consumed.
|
* not the total consumed.
|
||||||
*
|
*
|
||||||
* NOTE: Kernel clients don't need to call this function if the channel
|
* NOTE. Kernel clients don't need to call this function if the channel
|
||||||
* mode is 'overwrite'.
|
* mode is 'overwrite'.
|
||||||
*/
|
*/
|
||||||
void relay_subbufs_consumed(struct rchan *chan,
|
void relay_subbufs_consumed(struct rchan *chan,
|
||||||
|
@ -749,7 +749,7 @@ static int relay_file_open(struct inode *inode, struct file *filp)
|
||||||
* @filp: the file
|
* @filp: the file
|
||||||
* @vma: the vma describing what to map
|
* @vma: the vma describing what to map
|
||||||
*
|
*
|
||||||
* Calls upon relay_mmap_buf to map the file into user space.
|
* Calls upon relay_mmap_buf() to map the file into user space.
|
||||||
*/
|
*/
|
||||||
static int relay_file_mmap(struct file *filp, struct vm_area_struct *vma)
|
static int relay_file_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
|
@ -891,7 +891,7 @@ static size_t relay_file_read_subbuf_avail(size_t read_pos,
|
||||||
* @read_pos: file read position
|
* @read_pos: file read position
|
||||||
* @buf: relay channel buffer
|
* @buf: relay channel buffer
|
||||||
*
|
*
|
||||||
* If the read_pos is in the middle of padding, return the
|
* If the @read_pos is in the middle of padding, return the
|
||||||
* position of the first actually available byte, otherwise
|
* position of the first actually available byte, otherwise
|
||||||
* return the original value.
|
* return the original value.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -4203,13 +4203,12 @@ static void __setscheduler(struct task_struct *p, int policy, int prio)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sched_setscheduler - change the scheduling policy and/or RT priority of
|
* sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
|
||||||
* a thread.
|
|
||||||
* @p: the task in question.
|
* @p: the task in question.
|
||||||
* @policy: new policy.
|
* @policy: new policy.
|
||||||
* @param: structure containing the new RT priority.
|
* @param: structure containing the new RT priority.
|
||||||
*
|
*
|
||||||
* NOTE: the task may be already dead
|
* NOTE that the task may be already dead.
|
||||||
*/
|
*/
|
||||||
int sched_setscheduler(struct task_struct *p, int policy,
|
int sched_setscheduler(struct task_struct *p, int policy,
|
||||||
struct sched_param *param)
|
struct sched_param *param)
|
||||||
|
@ -4577,7 +4576,7 @@ asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
|
||||||
/**
|
/**
|
||||||
* sys_sched_yield - yield the current processor to other threads.
|
* sys_sched_yield - yield the current processor to other threads.
|
||||||
*
|
*
|
||||||
* this function yields the current CPU by moving the calling thread
|
* This function yields the current CPU by moving the calling thread
|
||||||
* to the expired array. If there are no other threads running on this
|
* to the expired array. If there are no other threads running on this
|
||||||
* CPU then this function will return.
|
* CPU then this function will return.
|
||||||
*/
|
*/
|
||||||
|
@ -4704,7 +4703,7 @@ EXPORT_SYMBOL(cond_resched_softirq);
|
||||||
/**
|
/**
|
||||||
* yield - yield the current processor to other threads.
|
* yield - yield the current processor to other threads.
|
||||||
*
|
*
|
||||||
* this is a shortcut for kernel-space yielding - it marks the
|
* This is a shortcut for kernel-space yielding - it marks the
|
||||||
* thread runnable and calls sys_sched_yield().
|
* thread runnable and calls sys_sched_yield().
|
||||||
*/
|
*/
|
||||||
void __sched yield(void)
|
void __sched yield(void)
|
||||||
|
|
|
@ -2282,7 +2282,7 @@ static int do_tkill(int tgid, int pid, int sig)
|
||||||
* @pid: the PID of the thread
|
* @pid: the PID of the thread
|
||||||
* @sig: signal to be sent
|
* @sig: signal to be sent
|
||||||
*
|
*
|
||||||
* This syscall also checks the tgid and returns -ESRCH even if the PID
|
* This syscall also checks the @tgid and returns -ESRCH even if the PID
|
||||||
* exists but it's not belonging to the target process anymore. This
|
* exists but it's not belonging to the target process anymore. This
|
||||||
* method solves the problem of threads exiting and PIDs getting reused.
|
* method solves the problem of threads exiting and PIDs getting reused.
|
||||||
*/
|
*/
|
||||||
|
|
10
kernel/sys.c
10
kernel/sys.c
|
@ -215,7 +215,7 @@ EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
|
||||||
* This routine uses RCU to synchronize with changes to the chain.
|
* This routine uses RCU to synchronize with changes to the chain.
|
||||||
*
|
*
|
||||||
* If the return value of the notifier can be and'ed
|
* If the return value of the notifier can be and'ed
|
||||||
* with %NOTIFY_STOP_MASK then atomic_notifier_call_chain
|
* with %NOTIFY_STOP_MASK then atomic_notifier_call_chain()
|
||||||
* will return immediately, with the return value of
|
* will return immediately, with the return value of
|
||||||
* the notifier function which halted execution.
|
* the notifier function which halted execution.
|
||||||
* Otherwise the return value is the return value
|
* Otherwise the return value is the return value
|
||||||
|
@ -313,7 +313,7 @@ EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister);
|
||||||
* run in a process context, so they are allowed to block.
|
* run in a process context, so they are allowed to block.
|
||||||
*
|
*
|
||||||
* If the return value of the notifier can be and'ed
|
* If the return value of the notifier can be and'ed
|
||||||
* with %NOTIFY_STOP_MASK then blocking_notifier_call_chain
|
* with %NOTIFY_STOP_MASK then blocking_notifier_call_chain()
|
||||||
* will return immediately, with the return value of
|
* will return immediately, with the return value of
|
||||||
* the notifier function which halted execution.
|
* the notifier function which halted execution.
|
||||||
* Otherwise the return value is the return value
|
* Otherwise the return value is the return value
|
||||||
|
@ -393,7 +393,7 @@ EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister);
|
||||||
* All locking must be provided by the caller.
|
* All locking must be provided by the caller.
|
||||||
*
|
*
|
||||||
* If the return value of the notifier can be and'ed
|
* If the return value of the notifier can be and'ed
|
||||||
* with %NOTIFY_STOP_MASK then raw_notifier_call_chain
|
* with %NOTIFY_STOP_MASK then raw_notifier_call_chain()
|
||||||
* will return immediately, with the return value of
|
* will return immediately, with the return value of
|
||||||
* the notifier function which halted execution.
|
* the notifier function which halted execution.
|
||||||
* Otherwise the return value is the return value
|
* Otherwise the return value is the return value
|
||||||
|
@ -487,7 +487,7 @@ EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister);
|
||||||
* run in a process context, so they are allowed to block.
|
* run in a process context, so they are allowed to block.
|
||||||
*
|
*
|
||||||
* If the return value of the notifier can be and'ed
|
* If the return value of the notifier can be and'ed
|
||||||
* with %NOTIFY_STOP_MASK then srcu_notifier_call_chain
|
* with %NOTIFY_STOP_MASK then srcu_notifier_call_chain()
|
||||||
* will return immediately, with the return value of
|
* will return immediately, with the return value of
|
||||||
* the notifier function which halted execution.
|
* the notifier function which halted execution.
|
||||||
* Otherwise the return value is the return value
|
* Otherwise the return value is the return value
|
||||||
|
@ -538,7 +538,7 @@ EXPORT_SYMBOL_GPL(srcu_init_notifier_head);
|
||||||
* Registers a function with the list of functions
|
* Registers a function with the list of functions
|
||||||
* to be called at reboot time.
|
* to be called at reboot time.
|
||||||
*
|
*
|
||||||
* Currently always returns zero, as blocking_notifier_chain_register
|
* Currently always returns zero, as blocking_notifier_chain_register()
|
||||||
* always returns zero.
|
* always returns zero.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
|
@ -85,7 +85,7 @@ static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases;
|
||||||
* @j: the time in (absolute) jiffies that should be rounded
|
* @j: the time in (absolute) jiffies that should be rounded
|
||||||
* @cpu: the processor number on which the timeout will happen
|
* @cpu: the processor number on which the timeout will happen
|
||||||
*
|
*
|
||||||
* __round_jiffies rounds an absolute time in the future (in jiffies)
|
* __round_jiffies() rounds an absolute time in the future (in jiffies)
|
||||||
* up or down to (approximately) full seconds. This is useful for timers
|
* up or down to (approximately) full seconds. This is useful for timers
|
||||||
* for which the exact time they fire does not matter too much, as long as
|
* for which the exact time they fire does not matter too much, as long as
|
||||||
* they fire approximately every X seconds.
|
* they fire approximately every X seconds.
|
||||||
|
@ -98,7 +98,7 @@ static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases;
|
||||||
* processors firing at the exact same time, which could lead
|
* processors firing at the exact same time, which could lead
|
||||||
* to lock contention or spurious cache line bouncing.
|
* to lock contention or spurious cache line bouncing.
|
||||||
*
|
*
|
||||||
* The return value is the rounded version of the "j" parameter.
|
* The return value is the rounded version of the @j parameter.
|
||||||
*/
|
*/
|
||||||
unsigned long __round_jiffies(unsigned long j, int cpu)
|
unsigned long __round_jiffies(unsigned long j, int cpu)
|
||||||
{
|
{
|
||||||
|
@ -142,7 +142,7 @@ EXPORT_SYMBOL_GPL(__round_jiffies);
|
||||||
* @j: the time in (relative) jiffies that should be rounded
|
* @j: the time in (relative) jiffies that should be rounded
|
||||||
* @cpu: the processor number on which the timeout will happen
|
* @cpu: the processor number on which the timeout will happen
|
||||||
*
|
*
|
||||||
* __round_jiffies_relative rounds a time delta in the future (in jiffies)
|
* __round_jiffies_relative() rounds a time delta in the future (in jiffies)
|
||||||
* up or down to (approximately) full seconds. This is useful for timers
|
* up or down to (approximately) full seconds. This is useful for timers
|
||||||
* for which the exact time they fire does not matter too much, as long as
|
* for which the exact time they fire does not matter too much, as long as
|
||||||
* they fire approximately every X seconds.
|
* they fire approximately every X seconds.
|
||||||
|
@ -155,7 +155,7 @@ EXPORT_SYMBOL_GPL(__round_jiffies);
|
||||||
* processors firing at the exact same time, which could lead
|
* processors firing at the exact same time, which could lead
|
||||||
* to lock contention or spurious cache line bouncing.
|
* to lock contention or spurious cache line bouncing.
|
||||||
*
|
*
|
||||||
* The return value is the rounded version of the "j" parameter.
|
* The return value is the rounded version of the @j parameter.
|
||||||
*/
|
*/
|
||||||
unsigned long __round_jiffies_relative(unsigned long j, int cpu)
|
unsigned long __round_jiffies_relative(unsigned long j, int cpu)
|
||||||
{
|
{
|
||||||
|
@ -173,7 +173,7 @@ EXPORT_SYMBOL_GPL(__round_jiffies_relative);
|
||||||
* round_jiffies - function to round jiffies to a full second
|
* round_jiffies - function to round jiffies to a full second
|
||||||
* @j: the time in (absolute) jiffies that should be rounded
|
* @j: the time in (absolute) jiffies that should be rounded
|
||||||
*
|
*
|
||||||
* round_jiffies rounds an absolute time in the future (in jiffies)
|
* round_jiffies() rounds an absolute time in the future (in jiffies)
|
||||||
* up or down to (approximately) full seconds. This is useful for timers
|
* up or down to (approximately) full seconds. This is useful for timers
|
||||||
* for which the exact time they fire does not matter too much, as long as
|
* for which the exact time they fire does not matter too much, as long as
|
||||||
* they fire approximately every X seconds.
|
* they fire approximately every X seconds.
|
||||||
|
@ -182,7 +182,7 @@ EXPORT_SYMBOL_GPL(__round_jiffies_relative);
|
||||||
* at the same time, rather than at various times spread out. The goal
|
* at the same time, rather than at various times spread out. The goal
|
||||||
* of this is to have the CPU wake up less, which saves power.
|
* of this is to have the CPU wake up less, which saves power.
|
||||||
*
|
*
|
||||||
* The return value is the rounded version of the "j" parameter.
|
* The return value is the rounded version of the @j parameter.
|
||||||
*/
|
*/
|
||||||
unsigned long round_jiffies(unsigned long j)
|
unsigned long round_jiffies(unsigned long j)
|
||||||
{
|
{
|
||||||
|
@ -194,7 +194,7 @@ EXPORT_SYMBOL_GPL(round_jiffies);
|
||||||
* round_jiffies_relative - function to round jiffies to a full second
|
* round_jiffies_relative - function to round jiffies to a full second
|
||||||
* @j: the time in (relative) jiffies that should be rounded
|
* @j: the time in (relative) jiffies that should be rounded
|
||||||
*
|
*
|
||||||
* round_jiffies_relative rounds a time delta in the future (in jiffies)
|
* round_jiffies_relative() rounds a time delta in the future (in jiffies)
|
||||||
* up or down to (approximately) full seconds. This is useful for timers
|
* up or down to (approximately) full seconds. This is useful for timers
|
||||||
* for which the exact time they fire does not matter too much, as long as
|
* for which the exact time they fire does not matter too much, as long as
|
||||||
* they fire approximately every X seconds.
|
* they fire approximately every X seconds.
|
||||||
|
@ -203,7 +203,7 @@ EXPORT_SYMBOL_GPL(round_jiffies);
|
||||||
* at the same time, rather than at various times spread out. The goal
|
* at the same time, rather than at various times spread out. The goal
|
||||||
* of this is to have the CPU wake up less, which saves power.
|
* of this is to have the CPU wake up less, which saves power.
|
||||||
*
|
*
|
||||||
* The return value is the rounded version of the "j" parameter.
|
* The return value is the rounded version of the @j parameter.
|
||||||
*/
|
*/
|
||||||
unsigned long round_jiffies_relative(unsigned long j)
|
unsigned long round_jiffies_relative(unsigned long j)
|
||||||
{
|
{
|
||||||
|
@ -387,7 +387,7 @@ void add_timer_on(struct timer_list *timer, int cpu)
|
||||||
* @timer: the timer to be modified
|
* @timer: the timer to be modified
|
||||||
* @expires: new timeout in jiffies
|
* @expires: new timeout in jiffies
|
||||||
*
|
*
|
||||||
* mod_timer is a more efficient way to update the expire field of an
|
* mod_timer() is a more efficient way to update the expire field of an
|
||||||
* active timer (if the timer is inactive it will be activated)
|
* active timer (if the timer is inactive it will be activated)
|
||||||
*
|
*
|
||||||
* mod_timer(timer, expires) is equivalent to:
|
* mod_timer(timer, expires) is equivalent to:
|
||||||
|
@ -490,7 +490,7 @@ out:
|
||||||
* the timer it also makes sure the handler has finished executing on other
|
* the timer it also makes sure the handler has finished executing on other
|
||||||
* CPUs.
|
* CPUs.
|
||||||
*
|
*
|
||||||
* Synchronization rules: callers must prevent restarting of the timer,
|
* Synchronization rules: Callers must prevent restarting of the timer,
|
||||||
* otherwise this function is meaningless. It must not be called from
|
* otherwise this function is meaningless. It must not be called from
|
||||||
* interrupt contexts. The caller must not hold locks which would prevent
|
* interrupt contexts. The caller must not hold locks which would prevent
|
||||||
* completion of the timer's handler. The timer's handler must not call
|
* completion of the timer's handler. The timer's handler must not call
|
||||||
|
|
|
@ -656,8 +656,7 @@ void flush_scheduled_work(void)
|
||||||
EXPORT_SYMBOL(flush_scheduled_work);
|
EXPORT_SYMBOL(flush_scheduled_work);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cancel_rearming_delayed_workqueue - reliably kill off a delayed
|
* cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work.
|
||||||
* work whose handler rearms the delayed work.
|
|
||||||
* @wq: the controlling workqueue structure
|
* @wq: the controlling workqueue structure
|
||||||
* @dwork: the delayed work struct
|
* @dwork: the delayed work struct
|
||||||
*/
|
*/
|
||||||
|
@ -670,8 +669,7 @@ void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
|
||||||
EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
|
EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cancel_rearming_delayed_work - reliably kill off a delayed keventd
|
* cancel_rearming_delayed_work - reliably kill off a delayed keventd work whose handler rearms the delayed work.
|
||||||
* work whose handler rearms the delayed work.
|
|
||||||
* @dwork: the delayed work struct
|
* @dwork: the delayed work struct
|
||||||
*/
|
*/
|
||||||
void cancel_rearming_delayed_work(struct delayed_work *dwork)
|
void cancel_rearming_delayed_work(struct delayed_work *dwork)
|
||||||
|
|
|
@ -95,7 +95,7 @@ void __bitmap_complement(unsigned long *dst, const unsigned long *src, int bits)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__bitmap_complement);
|
EXPORT_SYMBOL(__bitmap_complement);
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* __bitmap_shift_right - logical right shift of the bits in a bitmap
|
* __bitmap_shift_right - logical right shift of the bits in a bitmap
|
||||||
* @dst - destination bitmap
|
* @dst - destination bitmap
|
||||||
* @src - source bitmap
|
* @src - source bitmap
|
||||||
|
@ -139,7 +139,7 @@ void __bitmap_shift_right(unsigned long *dst,
|
||||||
EXPORT_SYMBOL(__bitmap_shift_right);
|
EXPORT_SYMBOL(__bitmap_shift_right);
|
||||||
|
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* __bitmap_shift_left - logical left shift of the bits in a bitmap
|
* __bitmap_shift_left - logical left shift of the bits in a bitmap
|
||||||
* @dst - destination bitmap
|
* @dst - destination bitmap
|
||||||
* @src - source bitmap
|
* @src - source bitmap
|
||||||
|
@ -529,7 +529,7 @@ int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(bitmap_parselist);
|
EXPORT_SYMBOL(bitmap_parselist);
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* bitmap_pos_to_ord(buf, pos, bits)
|
* bitmap_pos_to_ord(buf, pos, bits)
|
||||||
* @buf: pointer to a bitmap
|
* @buf: pointer to a bitmap
|
||||||
* @pos: a bit position in @buf (0 <= @pos < @bits)
|
* @pos: a bit position in @buf (0 <= @pos < @bits)
|
||||||
|
@ -804,7 +804,7 @@ EXPORT_SYMBOL(bitmap_find_free_region);
|
||||||
* @pos: beginning of bit region to release
|
* @pos: beginning of bit region to release
|
||||||
* @order: region size (log base 2 of number of bits) to release
|
* @order: region size (log base 2 of number of bits) to release
|
||||||
*
|
*
|
||||||
* This is the complement to __bitmap_find_free_region and releases
|
* This is the complement to __bitmap_find_free_region() and releases
|
||||||
* the found region (by clearing it in the bitmap).
|
* the found region (by clearing it in the bitmap).
|
||||||
*
|
*
|
||||||
* No return value.
|
* No return value.
|
||||||
|
|
|
@ -43,10 +43,10 @@ static int get_range(char **str, int *pint)
|
||||||
* comma as well.
|
* comma as well.
|
||||||
*
|
*
|
||||||
* Return values:
|
* Return values:
|
||||||
* 0 : no int in string
|
* 0 - no int in string
|
||||||
* 1 : int found, no subsequent comma
|
* 1 - int found, no subsequent comma
|
||||||
* 2 : int found including a subsequent comma
|
* 2 - int found including a subsequent comma
|
||||||
* 3 : hyphen found to denote a range
|
* 3 - hyphen found to denote a range
|
||||||
*/
|
*/
|
||||||
|
|
||||||
int get_option (char **str, int *pint)
|
int get_option (char **str, int *pint)
|
||||||
|
|
|
@ -329,8 +329,8 @@ static void sub_remove(struct idr *idp, int shift, int id)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* idr_remove - remove the given id and free it's slot
|
* idr_remove - remove the given id and free it's slot
|
||||||
* idp: idr handle
|
* @idp: idr handle
|
||||||
* id: uniqueue key
|
* @id: unique key
|
||||||
*/
|
*/
|
||||||
void idr_remove(struct idr *idp, int id)
|
void idr_remove(struct idr *idp, int id)
|
||||||
{
|
{
|
||||||
|
|
|
@ -97,11 +97,12 @@ static void fill_kobj_path(struct kobject *kobj, char *path, int length)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* kobject_get_path - generate and return the path associated with a given kobj
|
* kobject_get_path - generate and return the path associated with a given kobj and kset pair.
|
||||||
* and kset pair. The result must be freed by the caller with kfree().
|
|
||||||
*
|
*
|
||||||
* @kobj: kobject in question, with which to build the path
|
* @kobj: kobject in question, with which to build the path
|
||||||
* @gfp_mask: the allocation type used to allocate the path
|
* @gfp_mask: the allocation type used to allocate the path
|
||||||
|
*
|
||||||
|
* The result must be freed by the caller with kfree().
|
||||||
*/
|
*/
|
||||||
char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask)
|
char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
|
|
|
@ -20,8 +20,8 @@
|
||||||
#define K3 0x8F1BBCDCL /* Rounds 40-59: sqrt(5) * 2^30 */
|
#define K3 0x8F1BBCDCL /* Rounds 40-59: sqrt(5) * 2^30 */
|
||||||
#define K4 0xCA62C1D6L /* Rounds 60-79: sqrt(10) * 2^30 */
|
#define K4 0xCA62C1D6L /* Rounds 60-79: sqrt(10) * 2^30 */
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* sha_transform: single block SHA1 transform
|
* sha_transform - single block SHA1 transform
|
||||||
*
|
*
|
||||||
* @digest: 160 bit digest to update
|
* @digest: 160 bit digest to update
|
||||||
* @data: 512 bits of data to hash
|
* @data: 512 bits of data to hash
|
||||||
|
@ -80,9 +80,8 @@ void sha_transform(__u32 *digest, const char *in, __u32 *W)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(sha_transform);
|
EXPORT_SYMBOL(sha_transform);
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* sha_init: initialize the vectors for a SHA1 digest
|
* sha_init - initialize the vectors for a SHA1 digest
|
||||||
*
|
|
||||||
* @buf: vector to initialize
|
* @buf: vector to initialize
|
||||||
*/
|
*/
|
||||||
void sha_init(__u32 *buf)
|
void sha_init(__u32 *buf)
|
||||||
|
|
|
@ -27,7 +27,7 @@ static void generic_swap(void *a, void *b, int size)
|
||||||
} while (--size > 0);
|
} while (--size > 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* sort - sort an array of elements
|
* sort - sort an array of elements
|
||||||
* @base: pointer to data to sort
|
* @base: pointer to data to sort
|
||||||
* @num: number of elements
|
* @num: number of elements
|
||||||
|
|
|
@ -160,7 +160,7 @@ EXPORT_SYMBOL(strcat);
|
||||||
* @src: The string to append to it
|
* @src: The string to append to it
|
||||||
* @count: The maximum numbers of bytes to copy
|
* @count: The maximum numbers of bytes to copy
|
||||||
*
|
*
|
||||||
* Note that in contrast to strncpy, strncat ensures the result is
|
* Note that in contrast to strncpy(), strncat() ensures the result is
|
||||||
* terminated.
|
* terminated.
|
||||||
*/
|
*/
|
||||||
char *strncat(char *dest, const char *src, size_t count)
|
char *strncat(char *dest, const char *src, size_t count)
|
||||||
|
@ -366,8 +366,7 @@ EXPORT_SYMBOL(strnlen);
|
||||||
|
|
||||||
#ifndef __HAVE_ARCH_STRSPN
|
#ifndef __HAVE_ARCH_STRSPN
|
||||||
/**
|
/**
|
||||||
* strspn - Calculate the length of the initial substring of @s which only
|
* strspn - Calculate the length of the initial substring of @s which only contain letters in @accept
|
||||||
* contain letters in @accept
|
|
||||||
* @s: The string to be searched
|
* @s: The string to be searched
|
||||||
* @accept: The string to search for
|
* @accept: The string to search for
|
||||||
*/
|
*/
|
||||||
|
@ -394,8 +393,7 @@ EXPORT_SYMBOL(strspn);
|
||||||
|
|
||||||
#ifndef __HAVE_ARCH_STRCSPN
|
#ifndef __HAVE_ARCH_STRCSPN
|
||||||
/**
|
/**
|
||||||
* strcspn - Calculate the length of the initial substring of @s which does
|
* strcspn - Calculate the length of the initial substring of @s which does not contain letters in @reject
|
||||||
* not contain letters in @reject
|
|
||||||
* @s: The string to be searched
|
* @s: The string to be searched
|
||||||
* @reject: The string to avoid
|
* @reject: The string to avoid
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -218,7 +218,7 @@ static unsigned int get_linear_data(unsigned int consumed, const u8 **dst,
|
||||||
* Call textsearch_next() to retrieve subsequent matches.
|
* Call textsearch_next() to retrieve subsequent matches.
|
||||||
*
|
*
|
||||||
* Returns the position of first occurrence of the pattern or
|
* Returns the position of first occurrence of the pattern or
|
||||||
* UINT_MAX if no occurrence was found.
|
* %UINT_MAX if no occurrence was found.
|
||||||
*/
|
*/
|
||||||
unsigned int textsearch_find_continuous(struct ts_config *conf,
|
unsigned int textsearch_find_continuous(struct ts_config *conf,
|
||||||
struct ts_state *state,
|
struct ts_state *state,
|
||||||
|
|
|
@ -247,12 +247,12 @@ static char * number(char * buf, char * end, unsigned long long num, int base, i
|
||||||
* be generated for the given input, excluding the trailing
|
* be generated for the given input, excluding the trailing
|
||||||
* '\0', as per ISO C99. If you want to have the exact
|
* '\0', as per ISO C99. If you want to have the exact
|
||||||
* number of characters written into @buf as return value
|
* number of characters written into @buf as return value
|
||||||
* (not including the trailing '\0'), use vscnprintf. If the
|
* (not including the trailing '\0'), use vscnprintf(). If the
|
||||||
* return is greater than or equal to @size, the resulting
|
* return is greater than or equal to @size, the resulting
|
||||||
* string is truncated.
|
* string is truncated.
|
||||||
*
|
*
|
||||||
* Call this function if you are already dealing with a va_list.
|
* Call this function if you are already dealing with a va_list.
|
||||||
* You probably want snprintf instead.
|
* You probably want snprintf() instead.
|
||||||
*/
|
*/
|
||||||
int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
|
int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
|
||||||
{
|
{
|
||||||
|
@ -509,7 +509,7 @@ EXPORT_SYMBOL(vsnprintf);
|
||||||
* returns 0.
|
* returns 0.
|
||||||
*
|
*
|
||||||
* Call this function if you are already dealing with a va_list.
|
* Call this function if you are already dealing with a va_list.
|
||||||
* You probably want scnprintf instead.
|
* You probably want scnprintf() instead.
|
||||||
*/
|
*/
|
||||||
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
|
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
|
||||||
{
|
{
|
||||||
|
@ -577,11 +577,11 @@ EXPORT_SYMBOL(scnprintf);
|
||||||
* @args: Arguments for the format string
|
* @args: Arguments for the format string
|
||||||
*
|
*
|
||||||
* The function returns the number of characters written
|
* The function returns the number of characters written
|
||||||
* into @buf. Use vsnprintf or vscnprintf in order to avoid
|
* into @buf. Use vsnprintf() or vscnprintf() in order to avoid
|
||||||
* buffer overflows.
|
* buffer overflows.
|
||||||
*
|
*
|
||||||
* Call this function if you are already dealing with a va_list.
|
* Call this function if you are already dealing with a va_list.
|
||||||
* You probably want sprintf instead.
|
* You probably want sprintf() instead.
|
||||||
*/
|
*/
|
||||||
int vsprintf(char *buf, const char *fmt, va_list args)
|
int vsprintf(char *buf, const char *fmt, va_list args)
|
||||||
{
|
{
|
||||||
|
@ -597,7 +597,7 @@ EXPORT_SYMBOL(vsprintf);
|
||||||
* @...: Arguments for the format string
|
* @...: Arguments for the format string
|
||||||
*
|
*
|
||||||
* The function returns the number of characters written
|
* The function returns the number of characters written
|
||||||
* into @buf. Use snprintf or scnprintf in order to avoid
|
* into @buf. Use snprintf() or scnprintf() in order to avoid
|
||||||
* buffer overflows.
|
* buffer overflows.
|
||||||
*/
|
*/
|
||||||
int sprintf(char * buf, const char *fmt, ...)
|
int sprintf(char * buf, const char *fmt, ...)
|
||||||
|
|
|
@ -327,7 +327,7 @@ EXPORT_SYMBOL(sync_page_range);
|
||||||
* @pos: beginning offset in pages to write
|
* @pos: beginning offset in pages to write
|
||||||
* @count: number of bytes to write
|
* @count: number of bytes to write
|
||||||
*
|
*
|
||||||
* Note: Holding i_mutex across sync_page_range_nolock is not a good idea
|
* Note: Holding i_mutex across sync_page_range_nolock() is not a good idea
|
||||||
* as it forces O_SYNC writers to different parts of the same file
|
* as it forces O_SYNC writers to different parts of the same file
|
||||||
* to be serialised right until io completion.
|
* to be serialised right until io completion.
|
||||||
*/
|
*/
|
||||||
|
@ -784,7 +784,7 @@ unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
|
||||||
* @mapping: target address_space
|
* @mapping: target address_space
|
||||||
* @index: the page index
|
* @index: the page index
|
||||||
*
|
*
|
||||||
* Same as grab_cache_page, but do not wait if the page is unavailable.
|
* Same as grab_cache_page(), but do not wait if the page is unavailable.
|
||||||
* This is intended for speculative data generators, where the data can
|
* This is intended for speculative data generators, where the data can
|
||||||
* be regenerated if the page couldn't be grabbed. This routine should
|
* be regenerated if the page couldn't be grabbed. This routine should
|
||||||
* be safe to call while holding the lock for another page.
|
* be safe to call while holding the lock for another page.
|
||||||
|
|
|
@ -1775,9 +1775,7 @@ restart:
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* unmap_mapping_range - unmap the portion of all mmaps
|
* unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file.
|
||||||
* in the specified address_space corresponding to the specified
|
|
||||||
* page range in the underlying file.
|
|
||||||
* @mapping: the address space containing mmaps to be unmapped.
|
* @mapping: the address space containing mmaps to be unmapped.
|
||||||
* @holebegin: byte in first page to unmap, relative to the start of
|
* @holebegin: byte in first page to unmap, relative to the start of
|
||||||
* the underlying file. This will be rounded down to a PAGE_SIZE
|
* the underlying file. This will be rounded down to a PAGE_SIZE
|
||||||
|
|
|
@ -46,9 +46,9 @@ static void free_pool(mempool_t *pool)
|
||||||
* @pool_data: optional private data available to the user-defined functions.
|
* @pool_data: optional private data available to the user-defined functions.
|
||||||
*
|
*
|
||||||
* this function creates and allocates a guaranteed size, preallocated
|
* this function creates and allocates a guaranteed size, preallocated
|
||||||
* memory pool. The pool can be used from the mempool_alloc and mempool_free
|
* memory pool. The pool can be used from the mempool_alloc() and mempool_free()
|
||||||
* functions. This function might sleep. Both the alloc_fn() and the free_fn()
|
* functions. This function might sleep. Both the alloc_fn() and the free_fn()
|
||||||
* functions might sleep - as long as the mempool_alloc function is not called
|
* functions might sleep - as long as the mempool_alloc() function is not called
|
||||||
* from IRQ contexts.
|
* from IRQ contexts.
|
||||||
*/
|
*/
|
||||||
mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
|
mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
|
||||||
|
@ -195,7 +195,7 @@ EXPORT_SYMBOL(mempool_destroy);
|
||||||
* mempool_create().
|
* mempool_create().
|
||||||
* @gfp_mask: the usual allocation bitmask.
|
* @gfp_mask: the usual allocation bitmask.
|
||||||
*
|
*
|
||||||
* this function only sleeps if the alloc_fn function sleeps or
|
* this function only sleeps if the alloc_fn() function sleeps or
|
||||||
* returns NULL. Note that due to preallocation, this function
|
* returns NULL. Note that due to preallocation, this function
|
||||||
* *never* fails when called from process contexts. (it might
|
* *never* fails when called from process contexts. (it might
|
||||||
* fail if called from an IRQ context.)
|
* fail if called from an IRQ context.)
|
||||||
|
|
|
@ -549,9 +549,7 @@ void __init page_writeback_init(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* generic_writepages - walk the list of dirty pages of the given
|
* generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.
|
||||||
* address space and writepage() all of them.
|
|
||||||
*
|
|
||||||
* @mapping: address space structure to write
|
* @mapping: address space structure to write
|
||||||
* @wbc: subtract the number of written pages from *@wbc->nr_to_write
|
* @wbc: subtract the number of written pages from *@wbc->nr_to_write
|
||||||
*
|
*
|
||||||
|
@ -698,7 +696,6 @@ int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* write_one_page - write out a single page and optionally wait on I/O
|
* write_one_page - write out a single page and optionally wait on I/O
|
||||||
*
|
|
||||||
* @page: the page to write
|
* @page: the page to write
|
||||||
* @wait: if true, wait on writeout
|
* @wait: if true, wait on writeout
|
||||||
*
|
*
|
||||||
|
|
|
@ -2520,7 +2520,7 @@ EXPORT_SYMBOL(kmem_cache_shrink);
|
||||||
* kmem_cache_destroy - delete a cache
|
* kmem_cache_destroy - delete a cache
|
||||||
* @cachep: the cache to destroy
|
* @cachep: the cache to destroy
|
||||||
*
|
*
|
||||||
* Remove a struct kmem_cache object from the slab cache.
|
* Remove a &struct kmem_cache object from the slab cache.
|
||||||
*
|
*
|
||||||
* It is expected this function will be called by a module when it is
|
* It is expected this function will be called by a module when it is
|
||||||
* unloaded. This will remove the cache completely, and avoid a duplicate
|
* unloaded. This will remove the cache completely, and avoid a duplicate
|
||||||
|
|
|
@ -699,7 +699,7 @@ finished:
|
||||||
* that it is big enough to cover the vma. Will return failure if
|
* that it is big enough to cover the vma. Will return failure if
|
||||||
* that criteria isn't met.
|
* that criteria isn't met.
|
||||||
*
|
*
|
||||||
* Similar to remap_pfn_range (see mm/memory.c)
|
* Similar to remap_pfn_range() (see mm/memory.c)
|
||||||
*/
|
*/
|
||||||
int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
|
int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
|
||||||
unsigned long pgoff)
|
unsigned long pgoff)
|
||||||
|
|
Loading…
Reference in New Issue