hexagon: implement the new page table range API

Add PFN_PTE_SHIFT and update_mmu_cache_range().

Link: https://lkml.kernel.org/r/20230802151406.3735276-13-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Brian Cain <bcain@quicinc.com>
Acked-by: Mike Rapoport (IBM) <rppt@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2023-08-02 16:13:40 +01:00 committed by Andrew Morton
parent e724e7aaf9
commit 9ff6339441
2 changed files with 7 additions and 10 deletions

View File

@ -58,12 +58,16 @@ extern void flush_cache_all_hexagon(void);
* clean the cache when the PTE is set. * clean the cache when the PTE is set.
* *
*/ */
static inline void update_mmu_cache(struct vm_area_struct *vma, static inline void update_mmu_cache_range(struct vm_fault *vmf,
unsigned long address, pte_t *ptep) struct vm_area_struct *vma, unsigned long address,
pte_t *ptep, unsigned int nr)
{ {
/* generic_ptrace_pokedata doesn't wind up here, does it? */ /* generic_ptrace_pokedata doesn't wind up here, does it? */
} }
#define update_mmu_cache(vma, addr, ptep) \
update_mmu_cache_range(NULL, vma, addr, ptep, 1)
void copy_to_user_page(struct vm_area_struct *vma, struct page *page, void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, void *src, int len); unsigned long vaddr, void *dst, void *src, int len);
#define copy_to_user_page copy_to_user_page #define copy_to_user_page copy_to_user_page

View File

@ -338,6 +338,7 @@ static inline int pte_exec(pte_t pte)
/* __swp_entry_to_pte - extract PTE from swap entry */ /* __swp_entry_to_pte - extract PTE from swap entry */
#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#define PFN_PTE_SHIFT PAGE_SHIFT
/* pfn_pte - convert page number and protection value to page table entry */ /* pfn_pte - convert page number and protection value to page table entry */
#define pfn_pte(pfn, pgprot) __pte((pfn << PAGE_SHIFT) | pgprot_val(pgprot)) #define pfn_pte(pfn, pgprot) __pte((pfn << PAGE_SHIFT) | pgprot_val(pgprot))
@ -345,14 +346,6 @@ static inline int pte_exec(pte_t pte)
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
/*
* set_pte_at - update page table and do whatever magic may be
* necessary to make the underlying hardware/firmware take note.
*
* VM may require a virtual instruction to alert the MMU.
*/
#define set_pte_at(mm, addr, ptep, pte) set_pte(ptep, pte)
static inline unsigned long pmd_page_vaddr(pmd_t pmd) static inline unsigned long pmd_page_vaddr(pmd_t pmd)
{ {
return (unsigned long)__va(pmd_val(pmd) & PAGE_MASK); return (unsigned long)__va(pmd_val(pmd) & PAGE_MASK);