mm: convert ptlock_alloc() to use ptdescs
This removes some direct accesses to struct page, working towards splitting out struct ptdesc from struct page. Link: https://lkml.kernel.org/r/20230807230513.102486-6-vishal.moola@gmail.com Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com> Acked-by: Mike Rapoport (IBM) <rppt@kernel.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Claudio Imbrenda <imbrenda@linux.ibm.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: David Hildenbrand <david@redhat.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Dinh Nguyen <dinguyen@kernel.org> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Geert Uytterhoeven <geert+renesas@glider.be> Cc: Guo Ren <guoren@kernel.org> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Hugh Dickins <hughd@google.com> Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de> Cc: Jonas Bonn <jonas@southpole.se> Cc: Matthew Wilcox <willy@infradead.org> Cc: Palmer Dabbelt <palmer@rivosinc.com> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Richard Weinberger <richard@nod.at> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
f8546d8494
commit
f5ecca06b3
|
@ -2826,7 +2826,7 @@ static inline void pagetable_free(struct ptdesc *pt)
|
||||||
#if USE_SPLIT_PTE_PTLOCKS
|
#if USE_SPLIT_PTE_PTLOCKS
|
||||||
#if ALLOC_SPLIT_PTLOCKS
|
#if ALLOC_SPLIT_PTLOCKS
|
||||||
void __init ptlock_cache_init(void);
|
void __init ptlock_cache_init(void);
|
||||||
extern bool ptlock_alloc(struct page *page);
|
bool ptlock_alloc(struct ptdesc *ptdesc);
|
||||||
extern void ptlock_free(struct page *page);
|
extern void ptlock_free(struct page *page);
|
||||||
|
|
||||||
static inline spinlock_t *ptlock_ptr(struct page *page)
|
static inline spinlock_t *ptlock_ptr(struct page *page)
|
||||||
|
@ -2838,7 +2838,7 @@ static inline void ptlock_cache_init(void)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool ptlock_alloc(struct page *page)
|
static inline bool ptlock_alloc(struct ptdesc *ptdesc)
|
||||||
{
|
{
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -2868,7 +2868,7 @@ static inline bool ptlock_init(struct page *page)
|
||||||
* slab code uses page->slab_cache, which share storage with page->ptl.
|
* slab code uses page->slab_cache, which share storage with page->ptl.
|
||||||
*/
|
*/
|
||||||
VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
|
VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
|
||||||
if (!ptlock_alloc(page))
|
if (!ptlock_alloc(page_ptdesc(page)))
|
||||||
return false;
|
return false;
|
||||||
spin_lock_init(ptlock_ptr(page));
|
spin_lock_init(ptlock_ptr(page));
|
||||||
return true;
|
return true;
|
||||||
|
|
|
@ -6114,14 +6114,14 @@ void __init ptlock_cache_init(void)
|
||||||
SLAB_PANIC, NULL);
|
SLAB_PANIC, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ptlock_alloc(struct page *page)
|
bool ptlock_alloc(struct ptdesc *ptdesc)
|
||||||
{
|
{
|
||||||
spinlock_t *ptl;
|
spinlock_t *ptl;
|
||||||
|
|
||||||
ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
|
ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
|
||||||
if (!ptl)
|
if (!ptl)
|
||||||
return false;
|
return false;
|
||||||
page->ptl = ptl;
|
ptdesc->ptl = ptl;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue