drm ttm/mm changes for 5.6-rc1
-----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJeOKqlAAoJEAx081l5xIa+NKoP/1hhIF6iV6QwNxCvNukhiuNc VhS9LPllEQ3G86QE+CfPWmFmh1IBnbJi3CwigC1UX4nA2XoN2Eccm8f5BmitG+Dm YZNZBk8e1z9E68gqpdjA2qr3hddB8ZWcWW8SNqL0PID1UMdXlVr5QM+2Tw7flTQp YsajCwssMVZWIgWya+n+A//Qdu5z95KC0ycLjIrT1g+Hl1dxUUixqcbEWppE2wrF mJqTBJsfrPo2Njb6PFUKvUmzAZPJGWxLp4cGYJhHgqNrEtwJmmwhWpUDd+BpfxHn jq+ir0ALAph+quWdtouArrf1ibS+uipQl+7uaIW5J963czxxNRr7eNl5KP9gaHod zmtplXlw0ruNgCrDhRIJ4B4SB5M2nAZk7Y/xeHK+GwaCOGVt4rCaBWGBz/lorm7u 9phhygqYKdoIJc3JmXcKcXZHgLWuGgobWmYFlA/vNloXMeY5C5LPgDmAKIW40nut pKg9iGH6fJoN0HUcECPIbv1bterZy7YKK6OWl9TRHfMnabLJXNCej8hcEnf6KQ2l spDO//L8tICBftrcZdJFunzoFFlTavF18XBqm9ZdGNfNo9BIipwFJQEQDahVKHSM 6Du0kmuVoB02QLPXEUpA6+W5rFw7M5Qzi47pUnzM/VFJFkFx/eSsfs119aXJRXtc jIgI1vHIPFCX8Zg4SN/O =uiav -----END PGP SIGNATURE----- Merge tag 'drm-next-2020-02-04' of git://anongit.freedesktop.org/drm/drm Pull drm ttm/mm updates from Dave Airlie: "Thomas Hellstrom has some more changes to the TTM layer that needed a patch to the mm subsystem. This adds a new mm API vmf_insert_mixed_prot to avoid an ugly hack that has limitations in the TTM layer" * tag 'drm-next-2020-02-04' of git://anongit.freedesktop.org/drm/drm: mm, drm/ttm: Fix vm page protection handling mm: Add a vmf_insert_mixed_prot() function
This commit is contained in:
commit
9717c1cea1
|
@ -179,7 +179,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
|
||||||
pgoff_t num_prefault)
|
pgoff_t num_prefault)
|
||||||
{
|
{
|
||||||
struct vm_area_struct *vma = vmf->vma;
|
struct vm_area_struct *vma = vmf->vma;
|
||||||
struct vm_area_struct cvma = *vma;
|
|
||||||
struct ttm_buffer_object *bo = vma->vm_private_data;
|
struct ttm_buffer_object *bo = vma->vm_private_data;
|
||||||
struct ttm_bo_device *bdev = bo->bdev;
|
struct ttm_bo_device *bdev = bo->bdev;
|
||||||
unsigned long page_offset;
|
unsigned long page_offset;
|
||||||
|
@ -250,7 +249,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
|
||||||
goto out_io_unlock;
|
goto out_io_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, prot);
|
prot = ttm_io_prot(bo->mem.placement, prot);
|
||||||
if (!bo->mem.bus.is_iomem) {
|
if (!bo->mem.bus.is_iomem) {
|
||||||
struct ttm_operation_ctx ctx = {
|
struct ttm_operation_ctx ctx = {
|
||||||
.interruptible = false,
|
.interruptible = false,
|
||||||
|
@ -266,7 +265,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* Iomem should not be marked encrypted */
|
/* Iomem should not be marked encrypted */
|
||||||
cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
|
prot = pgprot_decrypted(prot);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -289,11 +288,20 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
|
||||||
pfn = page_to_pfn(page);
|
pfn = page_to_pfn(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Note that the value of @prot at this point may differ from
|
||||||
|
* the value of @vma->vm_page_prot in the caching- and
|
||||||
|
* encryption bits. This is because the exact location of the
|
||||||
|
* data may not be known at mmap() time and may also change
|
||||||
|
* at arbitrary times while the data is mmap'ed.
|
||||||
|
* See vmf_insert_mixed_prot() for a discussion.
|
||||||
|
*/
|
||||||
if (vma->vm_flags & VM_MIXEDMAP)
|
if (vma->vm_flags & VM_MIXEDMAP)
|
||||||
ret = vmf_insert_mixed(&cvma, address,
|
ret = vmf_insert_mixed_prot(vma, address,
|
||||||
__pfn_to_pfn_t(pfn, PFN_DEV));
|
__pfn_to_pfn_t(pfn, PFN_DEV),
|
||||||
|
prot);
|
||||||
else
|
else
|
||||||
ret = vmf_insert_pfn(&cvma, address, pfn);
|
ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
|
||||||
|
|
||||||
/* Never error on prefaulted PTEs */
|
/* Never error on prefaulted PTEs */
|
||||||
if (unlikely((ret & VM_FAULT_ERROR))) {
|
if (unlikely((ret & VM_FAULT_ERROR))) {
|
||||||
|
@ -325,7 +333,7 @@ vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
prot = vm_get_page_prot(vma->vm_flags);
|
prot = vma->vm_page_prot;
|
||||||
ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
|
ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
|
||||||
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
|
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -2535,6 +2535,8 @@ vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
|
||||||
unsigned long pfn, pgprot_t pgprot);
|
unsigned long pfn, pgprot_t pgprot);
|
||||||
vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
|
vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
|
||||||
pfn_t pfn);
|
pfn_t pfn);
|
||||||
|
vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
|
||||||
|
pfn_t pfn, pgprot_t pgprot);
|
||||||
vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
|
vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
|
||||||
unsigned long addr, pfn_t pfn);
|
unsigned long addr, pfn_t pfn);
|
||||||
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
|
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
|
||||||
|
|
|
@ -312,7 +312,12 @@ struct vm_area_struct {
|
||||||
/* Second cache line starts here. */
|
/* Second cache line starts here. */
|
||||||
|
|
||||||
struct mm_struct *vm_mm; /* The address space we belong to. */
|
struct mm_struct *vm_mm; /* The address space we belong to. */
|
||||||
pgprot_t vm_page_prot; /* Access permissions of this VMA. */
|
|
||||||
|
/*
|
||||||
|
* Access permissions of this VMA.
|
||||||
|
* See vmf_insert_mixed_prot() for discussion.
|
||||||
|
*/
|
||||||
|
pgprot_t vm_page_prot;
|
||||||
unsigned long vm_flags; /* Flags, see mm.h. */
|
unsigned long vm_flags; /* Flags, see mm.h. */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
44
mm/memory.c
44
mm/memory.c
|
@ -1664,6 +1664,9 @@ out_unlock:
|
||||||
* vmf_insert_pfn_prot should only be used if using multiple VMAs is
|
* vmf_insert_pfn_prot should only be used if using multiple VMAs is
|
||||||
* impractical.
|
* impractical.
|
||||||
*
|
*
|
||||||
|
* See vmf_insert_mixed_prot() for a discussion of the implication of using
|
||||||
|
* a value of @pgprot different from that of @vma->vm_page_prot.
|
||||||
|
*
|
||||||
* Context: Process context. May allocate using %GFP_KERNEL.
|
* Context: Process context. May allocate using %GFP_KERNEL.
|
||||||
* Return: vm_fault_t value.
|
* Return: vm_fault_t value.
|
||||||
*/
|
*/
|
||||||
|
@ -1737,9 +1740,9 @@ static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
|
||||||
}
|
}
|
||||||
|
|
||||||
static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
|
static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
|
||||||
unsigned long addr, pfn_t pfn, bool mkwrite)
|
unsigned long addr, pfn_t pfn, pgprot_t pgprot,
|
||||||
|
bool mkwrite)
|
||||||
{
|
{
|
||||||
pgprot_t pgprot = vma->vm_page_prot;
|
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
BUG_ON(!vm_mixed_ok(vma, pfn));
|
BUG_ON(!vm_mixed_ok(vma, pfn));
|
||||||
|
@ -1782,10 +1785,43 @@ static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
|
||||||
return VM_FAULT_NOPAGE;
|
return VM_FAULT_NOPAGE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vmf_insert_mixed_prot - insert single pfn into user vma with specified pgprot
|
||||||
|
* @vma: user vma to map to
|
||||||
|
* @addr: target user address of this page
|
||||||
|
* @pfn: source kernel pfn
|
||||||
|
* @pgprot: pgprot flags for the inserted page
|
||||||
|
*
|
||||||
|
* This is exactly like vmf_insert_mixed(), except that it allows drivers to
|
||||||
|
* to override pgprot on a per-page basis.
|
||||||
|
*
|
||||||
|
* Typically this function should be used by drivers to set caching- and
|
||||||
|
* encryption bits different than those of @vma->vm_page_prot, because
|
||||||
|
* the caching- or encryption mode may not be known at mmap() time.
|
||||||
|
* This is ok as long as @vma->vm_page_prot is not used by the core vm
|
||||||
|
* to set caching and encryption bits for those vmas (except for COW pages).
|
||||||
|
* This is ensured by core vm only modifying these page table entries using
|
||||||
|
* functions that don't touch caching- or encryption bits, using pte_modify()
|
||||||
|
* if needed. (See for example mprotect()).
|
||||||
|
* Also when new page-table entries are created, this is only done using the
|
||||||
|
* fault() callback, and never using the value of vma->vm_page_prot,
|
||||||
|
* except for page-table entries that point to anonymous pages as the result
|
||||||
|
* of COW.
|
||||||
|
*
|
||||||
|
* Context: Process context. May allocate using %GFP_KERNEL.
|
||||||
|
* Return: vm_fault_t value.
|
||||||
|
*/
|
||||||
|
vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
|
||||||
|
pfn_t pfn, pgprot_t pgprot)
|
||||||
|
{
|
||||||
|
return __vm_insert_mixed(vma, addr, pfn, pgprot, false);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(vmf_insert_mixed_prot);
|
||||||
|
|
||||||
vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
|
vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
|
||||||
pfn_t pfn)
|
pfn_t pfn)
|
||||||
{
|
{
|
||||||
return __vm_insert_mixed(vma, addr, pfn, false);
|
return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, false);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(vmf_insert_mixed);
|
EXPORT_SYMBOL(vmf_insert_mixed);
|
||||||
|
|
||||||
|
@ -1797,7 +1833,7 @@ EXPORT_SYMBOL(vmf_insert_mixed);
|
||||||
vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
|
vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
|
||||||
unsigned long addr, pfn_t pfn)
|
unsigned long addr, pfn_t pfn)
|
||||||
{
|
{
|
||||||
return __vm_insert_mixed(vma, addr, pfn, true);
|
return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, true);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
|
EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue