mm: fix spelling mistakes in header files
Fix some spelling mistakes in comments: successfull ==> successful potentialy ==> potentially alloced ==> allocated indicies ==> indices wont ==> won't resposible ==> responsible dirtyness ==> dirtiness droppped ==> dropped alread ==> already occured ==> occurred interupts ==> interrupts extention ==> extension slighly ==> slightly Dont't ==> Don't Link: https://lkml.kernel.org/r/20210531034849.9549-2-thunder.leizhen@huawei.com Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com> Cc: Jerome Glisse <jglisse@redhat.com> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Dennis Zhou <dennis@kernel.org> Cc: Tejun Heo <tj@kernel.org> Cc: Christoph Lameter <cl@linux.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
76fe17ef58
commit
06c8839815
|
@ -35,12 +35,12 @@ enum compact_result {
|
||||||
COMPACT_CONTINUE,
|
COMPACT_CONTINUE,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The full zone was compacted scanned but wasn't successfull to compact
|
* The full zone was compacted scanned but wasn't successful to compact
|
||||||
* suitable pages.
|
* suitable pages.
|
||||||
*/
|
*/
|
||||||
COMPACT_COMPLETE,
|
COMPACT_COMPLETE,
|
||||||
/*
|
/*
|
||||||
* direct compaction has scanned part of the zone but wasn't successfull
|
* direct compaction has scanned part of the zone but wasn't successful
|
||||||
* to compact suitable pages.
|
* to compact suitable pages.
|
||||||
*/
|
*/
|
||||||
COMPACT_PARTIAL_SKIPPED,
|
COMPACT_PARTIAL_SKIPPED,
|
||||||
|
|
|
@ -113,7 +113,7 @@ int hmm_range_fault(struct hmm_range *range);
|
||||||
* HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range
|
* HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range
|
||||||
*
|
*
|
||||||
* When waiting for mmu notifiers we need some kind of time out otherwise we
|
* When waiting for mmu notifiers we need some kind of time out otherwise we
|
||||||
* could potentialy wait for ever, 1000ms ie 1s sounds like a long time to
|
* could potentially wait for ever, 1000ms ie 1s sounds like a long time to
|
||||||
* wait already.
|
* wait already.
|
||||||
*/
|
*/
|
||||||
#define HMM_RANGE_DEFAULT_TIMEOUT 1000
|
#define HMM_RANGE_DEFAULT_TIMEOUT 1000
|
||||||
|
|
|
@ -51,7 +51,7 @@ struct hugepage_subpool {
|
||||||
long count;
|
long count;
|
||||||
long max_hpages; /* Maximum huge pages or -1 if no maximum. */
|
long max_hpages; /* Maximum huge pages or -1 if no maximum. */
|
||||||
long used_hpages; /* Used count against maximum, includes */
|
long used_hpages; /* Used count against maximum, includes */
|
||||||
/* both alloced and reserved pages. */
|
/* both allocated and reserved pages. */
|
||||||
struct hstate *hstate;
|
struct hstate *hstate;
|
||||||
long min_hpages; /* Minimum huge pages or -1 if no minimum. */
|
long min_hpages; /* Minimum huge pages or -1 if no minimum. */
|
||||||
long rsv_hpages; /* Pages reserved against global pool to */
|
long rsv_hpages; /* Pages reserved against global pool to */
|
||||||
|
@ -85,7 +85,7 @@ struct resv_map {
|
||||||
* by a resv_map's lock. The set of regions within the resv_map represent
|
* by a resv_map's lock. The set of regions within the resv_map represent
|
||||||
* reservations for huge pages, or huge pages that have already been
|
* reservations for huge pages, or huge pages that have already been
|
||||||
* instantiated within the map. The from and to elements are huge page
|
* instantiated within the map. The from and to elements are huge page
|
||||||
* indicies into the associated mapping. from indicates the starting index
|
* indices into the associated mapping. from indicates the starting index
|
||||||
* of the region. to represents the first index past the end of the region.
|
* of the region. to represents the first index past the end of the region.
|
||||||
*
|
*
|
||||||
* For example, a file region structure with from == 0 and to == 4 represents
|
* For example, a file region structure with from == 0 and to == 4 represents
|
||||||
|
@ -797,7 +797,7 @@ static inline bool hugepage_migration_supported(struct hstate *h)
|
||||||
* It determines whether or not a huge page should be placed on
|
* It determines whether or not a huge page should be placed on
|
||||||
* movable zone or not. Movability of any huge page should be
|
* movable zone or not. Movability of any huge page should be
|
||||||
* required only if huge page size is supported for migration.
|
* required only if huge page size is supported for migration.
|
||||||
* There wont be any reason for the huge page to be movable if
|
* There won't be any reason for the huge page to be movable if
|
||||||
* it is not migratable to start with. Also the size of the huge
|
* it is not migratable to start with. Also the size of the huge
|
||||||
* page should be large enough to be placed under a movable zone
|
* page should be large enough to be placed under a movable zone
|
||||||
* and still feasible enough to be migratable. Just the presence
|
* and still feasible enough to be migratable. Just the presence
|
||||||
|
|
|
@ -146,7 +146,7 @@ typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
|
||||||
* @lru: the lru pointer.
|
* @lru: the lru pointer.
|
||||||
* @nid: the node id to scan from.
|
* @nid: the node id to scan from.
|
||||||
* @memcg: the cgroup to scan from.
|
* @memcg: the cgroup to scan from.
|
||||||
* @isolate: callback function that is resposible for deciding what to do with
|
* @isolate: callback function that is responsible for deciding what to do with
|
||||||
* the item currently being scanned
|
* the item currently being scanned
|
||||||
* @cb_arg: opaque type that will be passed to @isolate
|
* @cb_arg: opaque type that will be passed to @isolate
|
||||||
* @nr_to_walk: how many items to scan.
|
* @nr_to_walk: how many items to scan.
|
||||||
|
@ -172,7 +172,7 @@ unsigned long list_lru_walk_one(struct list_lru *lru,
|
||||||
* @lru: the lru pointer.
|
* @lru: the lru pointer.
|
||||||
* @nid: the node id to scan from.
|
* @nid: the node id to scan from.
|
||||||
* @memcg: the cgroup to scan from.
|
* @memcg: the cgroup to scan from.
|
||||||
* @isolate: callback function that is resposible for deciding what to do with
|
* @isolate: callback function that is responsible for deciding what to do with
|
||||||
* the item currently being scanned
|
* the item currently being scanned
|
||||||
* @cb_arg: opaque type that will be passed to @isolate
|
* @cb_arg: opaque type that will be passed to @isolate
|
||||||
* @nr_to_walk: how many items to scan.
|
* @nr_to_walk: how many items to scan.
|
||||||
|
|
|
@ -33,7 +33,7 @@ struct mmu_interval_notifier;
|
||||||
*
|
*
|
||||||
* @MMU_NOTIFY_SOFT_DIRTY: soft dirty accounting (still same page and same
|
* @MMU_NOTIFY_SOFT_DIRTY: soft dirty accounting (still same page and same
|
||||||
* access flags). User should soft dirty the page in the end callback to make
|
* access flags). User should soft dirty the page in the end callback to make
|
||||||
* sure that anyone relying on soft dirtyness catch pages that might be written
|
* sure that anyone relying on soft dirtiness catch pages that might be written
|
||||||
* through non CPU mappings.
|
* through non CPU mappings.
|
||||||
*
|
*
|
||||||
* @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal
|
* @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal
|
||||||
|
@ -167,7 +167,7 @@ struct mmu_notifier_ops {
|
||||||
* decrease the refcount. If the refcount is decreased on
|
* decrease the refcount. If the refcount is decreased on
|
||||||
* invalidate_range_start() then the VM can free pages as page
|
* invalidate_range_start() then the VM can free pages as page
|
||||||
* table entries are removed. If the refcount is only
|
* table entries are removed. If the refcount is only
|
||||||
* droppped on invalidate_range_end() then the driver itself
|
* dropped on invalidate_range_end() then the driver itself
|
||||||
* will drop the last refcount but it must take care to flush
|
* will drop the last refcount but it must take care to flush
|
||||||
* any secondary tlb before doing the final free on the
|
* any secondary tlb before doing the final free on the
|
||||||
* page. Pages will no longer be referenced by the linux
|
* page. Pages will no longer be referenced by the linux
|
||||||
|
@ -196,7 +196,7 @@ struct mmu_notifier_ops {
|
||||||
* If invalidate_range() is used to manage a non-CPU TLB with
|
* If invalidate_range() is used to manage a non-CPU TLB with
|
||||||
* shared page-tables, it not necessary to implement the
|
* shared page-tables, it not necessary to implement the
|
||||||
* invalidate_range_start()/end() notifiers, as
|
* invalidate_range_start()/end() notifiers, as
|
||||||
* invalidate_range() alread catches the points in time when an
|
* invalidate_range() already catches the points in time when an
|
||||||
* external TLB range needs to be flushed. For more in depth
|
* external TLB range needs to be flushed. For more in depth
|
||||||
* discussion on this see Documentation/vm/mmu_notifier.rst
|
* discussion on this see Documentation/vm/mmu_notifier.rst
|
||||||
*
|
*
|
||||||
|
@ -369,7 +369,7 @@ mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub,
|
||||||
* mmu_interval_read_retry() will return true.
|
* mmu_interval_read_retry() will return true.
|
||||||
*
|
*
|
||||||
* False is not reliable and only suggests a collision may not have
|
* False is not reliable and only suggests a collision may not have
|
||||||
* occured. It can be called many times and does not have to hold the user
|
* occurred. It can be called many times and does not have to hold the user
|
||||||
* provided lock.
|
* provided lock.
|
||||||
*
|
*
|
||||||
* This call can be used as part of loops and other expensive operations to
|
* This call can be used as part of loops and other expensive operations to
|
||||||
|
|
|
@ -412,7 +412,7 @@ do { \
|
||||||
* instead.
|
* instead.
|
||||||
*
|
*
|
||||||
* If there is no other protection through preempt disable and/or disabling
|
* If there is no other protection through preempt disable and/or disabling
|
||||||
* interupts then one of these RMW operations can show unexpected behavior
|
* interrupts then one of these RMW operations can show unexpected behavior
|
||||||
* because the execution thread was rescheduled on another processor or an
|
* because the execution thread was rescheduled on another processor or an
|
||||||
* interrupt occurred and the same percpu variable was modified from the
|
* interrupt occurred and the same percpu variable was modified from the
|
||||||
* interrupt context.
|
* interrupt context.
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This struct is used to pass information from page reclaim to the shrinkers.
|
* This struct is used to pass information from page reclaim to the shrinkers.
|
||||||
* We consolidate the values for easier extention later.
|
* We consolidate the values for easier extension later.
|
||||||
*
|
*
|
||||||
* The 'gfpmask' refers to the allocation we are currently trying to
|
* The 'gfpmask' refers to the allocation we are currently trying to
|
||||||
* fulfil.
|
* fulfil.
|
||||||
|
|
|
@ -29,7 +29,7 @@ struct notifier_block; /* in notifier.h */
|
||||||
#define VM_NO_HUGE_VMAP 0x00000400 /* force PAGE_SIZE pte mapping */
|
#define VM_NO_HUGE_VMAP 0x00000400 /* force PAGE_SIZE pte mapping */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC.
|
* VM_KASAN is used slightly differently depending on CONFIG_KASAN_VMALLOC.
|
||||||
*
|
*
|
||||||
* If IS_ENABLED(CONFIG_KASAN_VMALLOC), VM_KASAN is set on a vm_struct after
|
* If IS_ENABLED(CONFIG_KASAN_VMALLOC), VM_KASAN is set on a vm_struct after
|
||||||
* shadow memory has been mapped. It's used to handle allocation errors so that
|
* shadow memory has been mapped. It's used to handle allocation errors so that
|
||||||
|
@ -247,7 +247,7 @@ static inline void set_vm_flush_reset_perms(void *addr)
|
||||||
extern long vread(char *buf, char *addr, unsigned long count);
|
extern long vread(char *buf, char *addr, unsigned long count);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Internals. Dont't use..
|
* Internals. Don't use..
|
||||||
*/
|
*/
|
||||||
extern struct list_head vmap_area_list;
|
extern struct list_head vmap_area_list;
|
||||||
extern __init void vm_area_add_early(struct vm_struct *vm);
|
extern __init void vm_area_add_early(struct vm_struct *vm);
|
||||||
|
|
Loading…
Reference in New Issue