mm: simplify freeing of devmap managed pages
Make put_devmap_managed_page return if it took charge of the page or not and remove the separate page_is_devmap_managed helper. Link: https://lkml.kernel.org/r/20220210072828.2930359-6-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Logan Gunthorpe <logang@deltatee.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com> Reviewed-by: Dan Williams <dan.j.williams@intel.com> Tested-by: "Sierra Guiza, Alejandro (Alex)" <alex.sierra@amd.com> Cc: Alex Deucher <alexander.deucher@amd.com> Cc: Alistair Popple <apopple@nvidia.com> Cc: Ben Skeggs <bskeggs@redhat.com> Cc: Christian Knig <christian.koenig@amd.com> Cc: Felix Kuehling <Felix.Kuehling@amd.com> Cc: Karol Herbst <kherbst@redhat.com> Cc: Lyude Paul <lyude@redhat.com> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Muchun Song <songmuchun@bytedance.com> Cc: "Pan, Xinhui" <Xinhui.Pan@amd.com> Cc: Ralph Campbell <rcampbell@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
This commit is contained in:
parent
75e55d8a10
commit
895749455f
|
@ -1094,33 +1094,24 @@ static inline bool is_zone_movable_page(const struct page *page)
|
|||
#ifdef CONFIG_DEV_PAGEMAP_OPS
|
||||
DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
|
||||
|
||||
static inline bool page_is_devmap_managed(struct page *page)
|
||||
bool __put_devmap_managed_page(struct page *page);
|
||||
static inline bool put_devmap_managed_page(struct page *page)
|
||||
{
|
||||
if (!static_branch_unlikely(&devmap_managed_key))
|
||||
return false;
|
||||
if (!is_zone_device_page(page))
|
||||
return false;
|
||||
switch (page->pgmap->type) {
|
||||
case MEMORY_DEVICE_PRIVATE:
|
||||
case MEMORY_DEVICE_FS_DAX:
|
||||
return true;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
if (page->pgmap->type != MEMORY_DEVICE_PRIVATE &&
|
||||
page->pgmap->type != MEMORY_DEVICE_FS_DAX)
|
||||
return false;
|
||||
return __put_devmap_managed_page(page);
|
||||
}
|
||||
|
||||
void put_devmap_managed_page(struct page *page);
|
||||
|
||||
#else /* CONFIG_DEV_PAGEMAP_OPS */
|
||||
static inline bool page_is_devmap_managed(struct page *page)
|
||||
static inline bool put_devmap_managed_page(struct page *page)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void put_devmap_managed_page(struct page *page)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_DEV_PAGEMAP_OPS */
|
||||
|
||||
static inline bool is_device_private_page(const struct page *page)
|
||||
|
@ -1220,16 +1211,11 @@ static inline void put_page(struct page *page)
|
|||
struct folio *folio = page_folio(page);
|
||||
|
||||
/*
|
||||
* For devmap managed pages we need to catch refcount transition from
|
||||
* 2 to 1, when refcount reach one it means the page is free and we
|
||||
* need to inform the device driver through callback. See
|
||||
* include/linux/memremap.h and HMM for details.
|
||||
* For some devmap managed pages we need to catch refcount transition
|
||||
* from 2 to 1:
|
||||
*/
|
||||
if (page_is_devmap_managed(&folio->page)) {
|
||||
put_devmap_managed_page(&folio->page);
|
||||
if (put_devmap_managed_page(&folio->page))
|
||||
return;
|
||||
}
|
||||
|
||||
folio_put(folio);
|
||||
}
|
||||
|
||||
|
|
|
@ -502,24 +502,22 @@ void free_devmap_managed_page(struct page *page)
|
|||
page->pgmap->ops->page_free(page);
|
||||
}
|
||||
|
||||
void put_devmap_managed_page(struct page *page)
|
||||
bool __put_devmap_managed_page(struct page *page)
|
||||
{
|
||||
int count;
|
||||
|
||||
if (WARN_ON_ONCE(!page_is_devmap_managed(page)))
|
||||
return;
|
||||
|
||||
count = page_ref_dec_return(page);
|
||||
|
||||
/*
|
||||
* devmap page refcounts are 1-based, rather than 0-based: if
|
||||
* refcount is 1, then the page is free and the refcount is
|
||||
* stable because nobody holds a reference on the page.
|
||||
*/
|
||||
if (count == 1)
|
||||
switch (page_ref_dec_return(page)) {
|
||||
case 1:
|
||||
free_devmap_managed_page(page);
|
||||
else if (!count)
|
||||
break;
|
||||
case 0:
|
||||
__put_page(page);
|
||||
break;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(put_devmap_managed_page);
|
||||
EXPORT_SYMBOL(__put_devmap_managed_page);
|
||||
#endif /* CONFIG_DEV_PAGEMAP_OPS */
|
||||
|
|
10
mm/swap.c
10
mm/swap.c
|
@ -930,16 +930,8 @@ void release_pages(struct page **pages, int nr)
|
|||
unlock_page_lruvec_irqrestore(lruvec, flags);
|
||||
lruvec = NULL;
|
||||
}
|
||||
/*
|
||||
* ZONE_DEVICE pages that return 'false' from
|
||||
* page_is_devmap_managed() do not require special
|
||||
* processing, and instead, expect a call to
|
||||
* put_page_testzero().
|
||||
*/
|
||||
if (page_is_devmap_managed(page)) {
|
||||
put_devmap_managed_page(page);
|
||||
if (put_devmap_managed_page(page))
|
||||
continue;
|
||||
}
|
||||
if (put_page_testzero(page))
|
||||
put_dev_pagemap(page->pgmap);
|
||||
continue;
|
||||
|
|
Loading…
Reference in New Issue