mm: introduce page_size()
Patch series "Make working with compound pages easier", v2. These three patches add three helpers and convert the appropriate places to use them. This patch (of 3): It's unnecessarily hard to find out the size of a potentially huge page. Replace 'PAGE_SIZE << compound_order(page)' with page_size(page). Link: http://lkml.kernel.org/r/20190721104612.19120-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Acked-by: Michal Hocko <mhocko@suse.com> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Ira Weiny <ira.weiny@intel.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
1f18b29669
commit
a50b854e07
|
@ -204,8 +204,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
|
|||
* coherent with the kernels mapping.
|
||||
*/
|
||||
if (!PageHighMem(page)) {
|
||||
size_t page_size = PAGE_SIZE << compound_order(page);
|
||||
__cpuc_flush_dcache_area(page_address(page), page_size);
|
||||
__cpuc_flush_dcache_area(page_address(page), page_size(page));
|
||||
} else {
|
||||
unsigned long i;
|
||||
if (cache_is_vipt_nonaliasing()) {
|
||||
|
|
|
@ -56,8 +56,7 @@ void __sync_icache_dcache(pte_t pte)
|
|||
struct page *page = pte_page(pte);
|
||||
|
||||
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
|
||||
sync_icache_aliases(page_address(page),
|
||||
PAGE_SIZE << compound_order(page));
|
||||
sync_icache_aliases(page_address(page), page_size(page));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__sync_icache_dcache);
|
||||
|
||||
|
|
|
@ -64,7 +64,7 @@ __ia64_sync_icache_dcache (pte_t pte)
|
|||
if (test_bit(PG_arch_1, &page->flags))
|
||||
return; /* i-cache is already coherent with d-cache */
|
||||
|
||||
flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
|
||||
flush_icache_range(addr, addr + page_size(page));
|
||||
set_bit(PG_arch_1, &page->flags); /* mark page as clean */
|
||||
}
|
||||
|
||||
|
|
|
@ -1078,7 +1078,7 @@ new_buf:
|
|||
bool merge;
|
||||
|
||||
if (page)
|
||||
pg_size <<= compound_order(page);
|
||||
pg_size = page_size(page);
|
||||
if (off < pg_size &&
|
||||
skb_can_coalesce(skb, i, page, off)) {
|
||||
merge = 1;
|
||||
|
@ -1105,8 +1105,7 @@ new_buf:
|
|||
__GFP_NORETRY,
|
||||
order);
|
||||
if (page)
|
||||
pg_size <<=
|
||||
compound_order(page);
|
||||
pg_size <<= order;
|
||||
}
|
||||
if (!page) {
|
||||
page = alloc_page(gfp);
|
||||
|
|
|
@ -120,7 +120,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
|
|||
if (!page)
|
||||
goto free_pages;
|
||||
list_add_tail(&page->lru, &pages);
|
||||
size_remaining -= PAGE_SIZE << compound_order(page);
|
||||
size_remaining -= page_size(page);
|
||||
max_order = compound_order(page);
|
||||
i++;
|
||||
}
|
||||
|
@ -133,7 +133,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
|
|||
|
||||
sg = table->sgl;
|
||||
list_for_each_entry_safe(page, tmp_page, &pages, lru) {
|
||||
sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
|
||||
sg_set_page(sg, page, page_size(page), 0);
|
||||
sg = sg_next(sg);
|
||||
list_del(&page->lru);
|
||||
}
|
||||
|
|
|
@ -136,8 +136,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
|
|||
page, off_in_page, tlen);
|
||||
fr_len(fp) += tlen;
|
||||
fp_skb(fp)->data_len += tlen;
|
||||
fp_skb(fp)->truesize +=
|
||||
PAGE_SIZE << compound_order(page);
|
||||
fp_skb(fp)->truesize += page_size(page);
|
||||
} else {
|
||||
BUG_ON(!page);
|
||||
from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
|
||||
|
|
|
@ -3319,7 +3319,7 @@ static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
}
|
||||
|
||||
page = virt_to_head_page(ptr);
|
||||
if (sz > (PAGE_SIZE << compound_order(page)))
|
||||
if (sz > page_size(page))
|
||||
return -EINVAL;
|
||||
|
||||
pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
|
||||
|
|
|
@ -454,7 +454,7 @@ static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
|
|||
static inline struct hstate *page_hstate(struct page *page)
|
||||
{
|
||||
VM_BUG_ON_PAGE(!PageHuge(page), page);
|
||||
return size_to_hstate(PAGE_SIZE << compound_order(page));
|
||||
return size_to_hstate(page_size(page));
|
||||
}
|
||||
|
||||
static inline unsigned hstate_index_to_shift(unsigned index)
|
||||
|
|
|
@ -805,6 +805,12 @@ static inline void set_compound_order(struct page *page, unsigned int order)
|
|||
page[1].compound_order = order;
|
||||
}
|
||||
|
||||
/* Returns the number of bytes in this potentially compound page. */
|
||||
static inline unsigned long page_size(struct page *page)
|
||||
{
|
||||
return PAGE_SIZE << compound_order(page);
|
||||
}
|
||||
|
||||
void free_compound_page(struct page *page);
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
|
|
|
@ -878,7 +878,7 @@ static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
|
|||
head = compound_head(page);
|
||||
v += (page - head) << PAGE_SHIFT;
|
||||
|
||||
if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
|
||||
if (likely(n <= v && v <= (page_size(head))))
|
||||
return true;
|
||||
WARN_ON(1);
|
||||
return false;
|
||||
|
|
|
@ -338,8 +338,7 @@ void kasan_poison_slab(struct page *page)
|
|||
|
||||
for (i = 0; i < (1 << compound_order(page)); i++)
|
||||
page_kasan_tag_reset(page + i);
|
||||
kasan_poison_shadow(page_address(page),
|
||||
PAGE_SIZE << compound_order(page),
|
||||
kasan_poison_shadow(page_address(page), page_size(page),
|
||||
KASAN_KMALLOC_REDZONE);
|
||||
}
|
||||
|
||||
|
@ -542,7 +541,7 @@ void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
|
|||
page = virt_to_page(ptr);
|
||||
redzone_start = round_up((unsigned long)(ptr + size),
|
||||
KASAN_SHADOW_SCALE_SIZE);
|
||||
redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
|
||||
redzone_end = (unsigned long)ptr + page_size(page);
|
||||
|
||||
kasan_unpoison_shadow(ptr, size);
|
||||
kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
|
||||
|
@ -578,8 +577,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)
|
|||
kasan_report_invalid_free(ptr, ip);
|
||||
return;
|
||||
}
|
||||
kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
|
||||
KASAN_FREE_PAGE);
|
||||
kasan_poison_shadow(ptr, page_size(page), KASAN_FREE_PAGE);
|
||||
} else {
|
||||
__kasan_slab_free(page->slab_cache, ptr, ip, false);
|
||||
}
|
||||
|
|
|
@ -108,7 +108,7 @@ unsigned int kobjsize(const void *objp)
|
|||
* The ksize() function is only guaranteed to work for pointers
|
||||
* returned by kmalloc(). So handle arbitrary pointers here.
|
||||
*/
|
||||
return PAGE_SIZE << compound_order(page);
|
||||
return page_size(page);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -153,8 +153,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
|
|||
|
||||
if (unlikely(PageHuge(pvmw->page))) {
|
||||
/* when pud is not present, pte will be NULL */
|
||||
pvmw->pte = huge_pte_offset(mm, pvmw->address,
|
||||
PAGE_SIZE << compound_order(page));
|
||||
pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
|
||||
if (!pvmw->pte)
|
||||
return false;
|
||||
|
||||
|
|
|
@ -898,8 +898,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
|
|||
*/
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
|
||||
0, vma, vma->vm_mm, address,
|
||||
min(vma->vm_end, address +
|
||||
(PAGE_SIZE << compound_order(page))));
|
||||
min(vma->vm_end, address + page_size(page)));
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
|
||||
while (page_vma_mapped_walk(&pvmw)) {
|
||||
|
@ -1372,8 +1371,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|||
*/
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
|
||||
address,
|
||||
min(vma->vm_end, address +
|
||||
(PAGE_SIZE << compound_order(page))));
|
||||
min(vma->vm_end, address + page_size(page)));
|
||||
if (PageHuge(page)) {
|
||||
/*
|
||||
* If sharing is possible, start and end will be adjusted
|
||||
|
|
|
@ -539,7 +539,7 @@ size_t __ksize(const void *block)
|
|||
|
||||
sp = virt_to_page(block);
|
||||
if (unlikely(!PageSlab(sp)))
|
||||
return PAGE_SIZE << compound_order(sp);
|
||||
return page_size(sp);
|
||||
|
||||
align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
|
||||
m = (unsigned int *)(block - align);
|
||||
|
|
18
mm/slub.c
18
mm/slub.c
|
@ -829,7 +829,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
|
|||
return 1;
|
||||
|
||||
start = page_address(page);
|
||||
length = PAGE_SIZE << compound_order(page);
|
||||
length = page_size(page);
|
||||
end = start + length;
|
||||
remainder = length % s->size;
|
||||
if (!remainder)
|
||||
|
@ -1074,13 +1074,14 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
|
|||
init_tracking(s, object);
|
||||
}
|
||||
|
||||
static void setup_page_debug(struct kmem_cache *s, void *addr, int order)
|
||||
static
|
||||
void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr)
|
||||
{
|
||||
if (!(s->flags & SLAB_POISON))
|
||||
return;
|
||||
|
||||
metadata_access_enable();
|
||||
memset(addr, POISON_INUSE, PAGE_SIZE << order);
|
||||
memset(addr, POISON_INUSE, page_size(page));
|
||||
metadata_access_disable();
|
||||
}
|
||||
|
||||
|
@ -1340,8 +1341,8 @@ slab_flags_t kmem_cache_flags(unsigned int object_size,
|
|||
#else /* !CONFIG_SLUB_DEBUG */
|
||||
static inline void setup_object_debug(struct kmem_cache *s,
|
||||
struct page *page, void *object) {}
|
||||
static inline void setup_page_debug(struct kmem_cache *s,
|
||||
void *addr, int order) {}
|
||||
static inline
|
||||
void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) {}
|
||||
|
||||
static inline int alloc_debug_processing(struct kmem_cache *s,
|
||||
struct page *page, void *object, unsigned long addr) { return 0; }
|
||||
|
@ -1639,7 +1640,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|||
struct kmem_cache_order_objects oo = s->oo;
|
||||
gfp_t alloc_gfp;
|
||||
void *start, *p, *next;
|
||||
int idx, order;
|
||||
int idx;
|
||||
bool shuffle;
|
||||
|
||||
flags &= gfp_allowed_mask;
|
||||
|
@ -1673,7 +1674,6 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|||
|
||||
page->objects = oo_objects(oo);
|
||||
|
||||
order = compound_order(page);
|
||||
page->slab_cache = s;
|
||||
__SetPageSlab(page);
|
||||
if (page_is_pfmemalloc(page))
|
||||
|
@ -1683,7 +1683,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|||
|
||||
start = page_address(page);
|
||||
|
||||
setup_page_debug(s, start, order);
|
||||
setup_page_debug(s, page, start);
|
||||
|
||||
shuffle = shuffle_freelist(s, page);
|
||||
|
||||
|
@ -3932,7 +3932,7 @@ size_t __ksize(const void *object)
|
|||
|
||||
if (unlikely(!PageSlab(page))) {
|
||||
WARN_ON(!PageCompound(page));
|
||||
return PAGE_SIZE << compound_order(page);
|
||||
return page_size(page);
|
||||
}
|
||||
|
||||
return slab_ksize(page->slab_cache);
|
||||
|
|
|
@ -977,7 +977,7 @@ static int xsk_mmap(struct file *file, struct socket *sock,
|
|||
/* Matches the smp_wmb() in xsk_init_queue */
|
||||
smp_rmb();
|
||||
qpg = virt_to_head_page(q->ring);
|
||||
if (size > (PAGE_SIZE << compound_order(qpg)))
|
||||
if (size > page_size(qpg))
|
||||
return -EINVAL;
|
||||
|
||||
pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
|
||||
|
|
Loading…
Reference in New Issue