s390/pageattr: do a single TLB flush for change_page_attr
The change of the access rights for an address range in the kernel address space is currently done with a loop of IPTE + a store of the modified PTE. Between the IPTE and the store the PTE will be invalid, this intermediate state can cause problems with concurrent accesses. Consider a change of a kernel area from read-write to read-only, a concurrent reader of that area should be fine but with the invalid PTE it might get an unexpected exception. Remove the IPTEs for each PTE and do a global flush after all PTEs have been modified. Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
parent
2cfc5f9ce7
commit
007ccec53d
|
@ -65,19 +65,17 @@ static pte_t *walk_page_table(unsigned long addr)
|
|||
static void change_page_attr(unsigned long addr, int numpages,
|
||||
pte_t (*set) (pte_t))
|
||||
{
|
||||
pte_t *ptep, pte;
|
||||
pte_t *ptep;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < numpages; i++) {
|
||||
ptep = walk_page_table(addr);
|
||||
if (WARN_ON_ONCE(!ptep))
|
||||
break;
|
||||
pte = *ptep;
|
||||
pte = set(pte);
|
||||
__ptep_ipte(addr, ptep);
|
||||
*ptep = pte;
|
||||
*ptep = set(*ptep);
|
||||
addr += PAGE_SIZE;
|
||||
}
|
||||
__tlb_flush_kernel();
|
||||
}
|
||||
|
||||
int set_memory_ro(unsigned long addr, int numpages)
|
||||
|
|
Loading…
Reference in New Issue