Revert "um: support some of ARCH_HAS_SET_MEMORY"

This reverts commit 963285b0b4 ("um: support some of
ARCH_HAS_SET_MEMORY"), as it turns out that it's not only not
working (due to um never using the protection bits in the
page tables) but also corrupts the page tables if used on a
non-vmalloc page, since um never allocates proper page tables
for the 'physmem' in the first place.

Fixing all this will take more effort, so for now revert it.

Reported-by: Benjamin Berg <benjamin@sipsolutions.net>
Fixes: 963285b0b4 ("um: support some of ARCH_HAS_SET_MEMORY")
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Richard Weinberger <richard@nod.at>
This commit is contained in:
Johannes Berg 2021-01-10 19:05:09 +01:00 committed by Richard Weinberger
parent 2fcb4090cd
commit a31e9c4e72
4 changed files with 0 additions and 59 deletions

View File

@ -15,7 +15,6 @@ config UML
select HAVE_DEBUG_KMEMLEAK
select HAVE_DEBUG_BUGVERBOSE
select NO_DMA
select ARCH_HAS_SET_MEMORY
select GENERIC_IRQ_SHOW
select GENERIC_CPU_DEVICES
select HAVE_GCC_PLUGINS

View File

@ -55,15 +55,12 @@ extern unsigned long end_iomem;
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
#define __PAGE_KERNEL_EXEC \
(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
#define __PAGE_KERNEL_RO \
(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED)
#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
/*
* The i386 can't do page protection for execute, and considers that the same

View File

@ -1 +0,0 @@
#include <asm-generic/set_memory.h>

View File

@ -608,57 +608,3 @@ void force_flush_all(void)
vma = vma->vm_next;
}
}
struct page_change_data {
unsigned int set_mask, clear_mask;
};
static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
{
struct page_change_data *cdata = data;
pte_t pte = READ_ONCE(*ptep);
pte_clear_bits(pte, cdata->clear_mask);
pte_set_bits(pte, cdata->set_mask);
set_pte(ptep, pte);
return 0;
}
static int change_memory(unsigned long start, unsigned long pages,
unsigned int set_mask, unsigned int clear_mask)
{
unsigned long size = pages * PAGE_SIZE;
struct page_change_data data;
int ret;
data.set_mask = set_mask;
data.clear_mask = clear_mask;
ret = apply_to_page_range(&init_mm, start, size, change_page_range,
&data);
flush_tlb_kernel_range(start, start + size);
return ret;
}
int set_memory_ro(unsigned long addr, int numpages)
{
return change_memory(addr, numpages, 0, _PAGE_RW);
}
int set_memory_rw(unsigned long addr, int numpages)
{
return change_memory(addr, numpages, _PAGE_RW, 0);
}
int set_memory_nx(unsigned long addr, int numpages)
{
return -EOPNOTSUPP;
}
int set_memory_x(unsigned long addr, int numpages)
{
return -EOPNOTSUPP;
}