mm/vmalloc: add code comment for find_vmap_area_exceed_addr()
Its behaviour is like find_vma() which finds an area above the specified address, add comment to make it easier to understand. And also fix two places of grammer mistake/typo. Link: https://lkml.kernel.org/r/20220607105958.382076-5-bhe@redhat.com Signed-off-by: Baoquan He <bhe@redhat.com> Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com> Cc: Christoph Hellwig <hch@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
baa468a648
commit
153090f2c6
|
@ -790,6 +790,7 @@ unsigned long vmalloc_nr_pages(void)
|
||||||
return atomic_long_read(&nr_vmalloc_pages);
|
return atomic_long_read(&nr_vmalloc_pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Look up the first VA which satisfies addr < va_end, NULL if none. */
|
||||||
static struct vmap_area *find_vmap_area_exceed_addr(unsigned long addr)
|
static struct vmap_area *find_vmap_area_exceed_addr(unsigned long addr)
|
||||||
{
|
{
|
||||||
struct vmap_area *va = NULL;
|
struct vmap_area *va = NULL;
|
||||||
|
@ -929,7 +930,7 @@ link_va(struct vmap_area *va, struct rb_root *root,
|
||||||
* Some explanation here. Just perform simple insertion
|
* Some explanation here. Just perform simple insertion
|
||||||
* to the tree. We do not set va->subtree_max_size to
|
* to the tree. We do not set va->subtree_max_size to
|
||||||
* its current size before calling rb_insert_augmented().
|
* its current size before calling rb_insert_augmented().
|
||||||
* It is because of we populate the tree from the bottom
|
* It is because we populate the tree from the bottom
|
||||||
* to parent levels when the node _is_ in the tree.
|
* to parent levels when the node _is_ in the tree.
|
||||||
*
|
*
|
||||||
* Therefore we set subtree_max_size to zero after insertion,
|
* Therefore we set subtree_max_size to zero after insertion,
|
||||||
|
@ -1655,7 +1656,7 @@ static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Serialize vmap purging. There is no actual critical section protected
|
* Serialize vmap purging. There is no actual critical section protected
|
||||||
* by this look, but we want to avoid concurrent calls for performance
|
* by this lock, but we want to avoid concurrent calls for performance
|
||||||
* reasons and to make the pcpu_get_vm_areas more deterministic.
|
* reasons and to make the pcpu_get_vm_areas more deterministic.
|
||||||
*/
|
*/
|
||||||
static DEFINE_MUTEX(vmap_purge_lock);
|
static DEFINE_MUTEX(vmap_purge_lock);
|
||||||
|
|
Loading…
Reference in New Issue