mm: use is_cow_mapping() across tree where proper
After is_cow_mapping() is exported in mm.h, replace some manual checks elsewhere throughout the tree but start to use the new helper. Link: https://lkml.kernel.org/r/20210217233547.93892-5-peterx@redhat.com Signed-off-by: Peter Xu <peterx@redhat.com> Reviewed-by: Jason Gunthorpe <jgg@ziepe.ca> Cc: VMware Graphics <linux-graphics-maintainer@vmware.com> Cc: Roland Scheidegger <sroland@vmware.com> Cc: David Airlie <airlied@linux.ie> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Alexey Dobriyan <adobriyan@gmail.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: David Gibson <david@gibson.dropbear.id.au> Cc: Gal Pressman <galpress@amazon.com> Cc: Jan Kara <jack@suse.cz> Cc: Jann Horn <jannh@google.com> Cc: Kirill Shutemov <kirill@shutemov.name> Cc: Kirill Tkhai <ktkhai@virtuozzo.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Mike Rapoport <rppt@linux.vnet.ibm.com> Cc: Wei Zhang <wzam@amazon.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
97a7e4733b
commit
ca6eb14d64
|
@ -500,8 +500,6 @@ vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
|
|||
vm_fault_t ret;
|
||||
pgoff_t fault_page_size;
|
||||
bool write = vmf->flags & FAULT_FLAG_WRITE;
|
||||
bool is_cow_mapping =
|
||||
(vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
|
||||
|
||||
switch (pe_size) {
|
||||
case PE_SIZE_PMD:
|
||||
|
@ -518,7 +516,7 @@ vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
|
|||
}
|
||||
|
||||
/* Always do write dirty-tracking and COW on PTE level. */
|
||||
if (write && (READ_ONCE(vbo->dirty) || is_cow_mapping))
|
||||
if (write && (READ_ONCE(vbo->dirty) || is_cow_mapping(vma->vm_flags)))
|
||||
return VM_FAULT_FALLBACK;
|
||||
|
||||
ret = ttm_bo_vm_reserve(bo, vmf);
|
||||
|
|
|
@ -49,7 +49,7 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
vma->vm_ops = &vmw_vm_ops;
|
||||
|
||||
/* Use VM_PFNMAP rather than VM_MIXEDMAP if not a COW mapping */
|
||||
if ((vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) != VM_MAYWRITE)
|
||||
if (!is_cow_mapping(vma->vm_flags))
|
||||
vma->vm_flags = (vma->vm_flags & ~VM_MIXEDMAP) | VM_PFNMAP;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1036,8 +1036,6 @@ struct clear_refs_private {
|
|||
|
||||
#ifdef CONFIG_MEM_SOFT_DIRTY
|
||||
|
||||
#define is_cow_mapping(flags) (((flags) & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE)
|
||||
|
||||
static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
|
||||
{
|
||||
struct page *page;
|
||||
|
|
|
@ -3734,15 +3734,13 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
|
|||
pte_t *src_pte, *dst_pte, entry, dst_entry;
|
||||
struct page *ptepage;
|
||||
unsigned long addr;
|
||||
int cow;
|
||||
bool cow = is_cow_mapping(vma->vm_flags);
|
||||
struct hstate *h = hstate_vma(vma);
|
||||
unsigned long sz = huge_page_size(h);
|
||||
struct address_space *mapping = vma->vm_file->f_mapping;
|
||||
struct mmu_notifier_range range;
|
||||
int ret = 0;
|
||||
|
||||
cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
|
||||
|
||||
if (cow) {
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, src,
|
||||
vma->vm_start,
|
||||
|
|
Loading…
Reference in New Issue