mm/page_vma_mapped: reformat map_pte() with less indentation
No functional change here, but adjust the format of map_pte() so that the following commit will be easier to read: separate out the PVMW_SYNC case first, and remove two levels of indentation from the ZONE_DEVICE case. Link: https://lkml.kernel.org/r/bf723f59-e3fc-6839-1cc3-c0631ee248bc@google.com Signed-off-by: Hugh Dickins <hughd@google.com> Cc: Alistair Popple <apopple@nvidia.com> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Axel Rasmussen <axelrasmussen@google.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Christoph Hellwig <hch@infradead.org> Cc: David Hildenbrand <david@redhat.com> Cc: "Huang, Ying" <ying.huang@intel.com> Cc: Ira Weiny <ira.weiny@intel.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Lorenzo Stoakes <lstoakes@gmail.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Mike Rapoport (IBM) <rppt@kernel.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Naoya Horiguchi <naoya.horiguchi@nec.com> Cc: Pavel Tatashin <pasha.tatashin@soleen.com> Cc: Peter Xu <peterx@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Qi Zheng <zhengqi.arch@bytedance.com> Cc: Ralph Campbell <rcampbell@nvidia.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: SeongJae Park <sj@kernel.org> Cc: Song Liu <song@kernel.org> Cc: Steven Price <steven.price@arm.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Shi <shy828301@gmail.com> Cc: Yu Zhao <yuzhao@google.com> Cc: Zack Rusin <zackr@vmware.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
45fe85e981
commit
90f43b0a13
|
@ -15,38 +15,41 @@ static inline bool not_found(struct page_vma_mapped_walk *pvmw)
|
||||||
|
|
||||||
static bool map_pte(struct page_vma_mapped_walk *pvmw)
|
static bool map_pte(struct page_vma_mapped_walk *pvmw)
|
||||||
{
|
{
|
||||||
pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
|
if (pvmw->flags & PVMW_SYNC) {
|
||||||
if (!(pvmw->flags & PVMW_SYNC)) {
|
/* Use the stricter lookup */
|
||||||
if (pvmw->flags & PVMW_MIGRATION) {
|
pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd,
|
||||||
if (!is_swap_pte(*pvmw->pte))
|
pvmw->address, &pvmw->ptl);
|
||||||
return false;
|
return true;
|
||||||
} else {
|
}
|
||||||
/*
|
|
||||||
* We get here when we are trying to unmap a private
|
|
||||||
* device page from the process address space. Such
|
|
||||||
* page is not CPU accessible and thus is mapped as
|
|
||||||
* a special swap entry, nonetheless it still does
|
|
||||||
* count as a valid regular mapping for the page (and
|
|
||||||
* is accounted as such in page maps count).
|
|
||||||
*
|
|
||||||
* So handle this special case as if it was a normal
|
|
||||||
* page mapping ie lock CPU page table and returns
|
|
||||||
* true.
|
|
||||||
*
|
|
||||||
* For more details on device private memory see HMM
|
|
||||||
* (include/linux/hmm.h or mm/hmm.c).
|
|
||||||
*/
|
|
||||||
if (is_swap_pte(*pvmw->pte)) {
|
|
||||||
swp_entry_t entry;
|
|
||||||
|
|
||||||
/* Handle un-addressable ZONE_DEVICE memory */
|
pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
|
||||||
entry = pte_to_swp_entry(*pvmw->pte);
|
if (pvmw->flags & PVMW_MIGRATION) {
|
||||||
if (!is_device_private_entry(entry) &&
|
if (!is_swap_pte(*pvmw->pte))
|
||||||
!is_device_exclusive_entry(entry))
|
return false;
|
||||||
return false;
|
} else if (is_swap_pte(*pvmw->pte)) {
|
||||||
} else if (!pte_present(*pvmw->pte))
|
swp_entry_t entry;
|
||||||
return false;
|
/*
|
||||||
}
|
* Handle un-addressable ZONE_DEVICE memory.
|
||||||
|
*
|
||||||
|
* We get here when we are trying to unmap a private
|
||||||
|
* device page from the process address space. Such
|
||||||
|
* page is not CPU accessible and thus is mapped as
|
||||||
|
* a special swap entry, nonetheless it still does
|
||||||
|
* count as a valid regular mapping for the page
|
||||||
|
* (and is accounted as such in page maps count).
|
||||||
|
*
|
||||||
|
* So handle this special case as if it was a normal
|
||||||
|
* page mapping ie lock CPU page table and return true.
|
||||||
|
*
|
||||||
|
* For more details on device private memory see HMM
|
||||||
|
* (include/linux/hmm.h or mm/hmm.c).
|
||||||
|
*/
|
||||||
|
entry = pte_to_swp_entry(*pvmw->pte);
|
||||||
|
if (!is_device_private_entry(entry) &&
|
||||||
|
!is_device_exclusive_entry(entry))
|
||||||
|
return false;
|
||||||
|
} else if (!pte_present(*pvmw->pte)) {
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
|
pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
|
||||||
spin_lock(pvmw->ptl);
|
spin_lock(pvmw->ptl);
|
||||||
|
|
Loading…
Reference in New Issue