s390/mm: use _SEGMENT_ENTRY_EMPTY in the code
_SEGMENT_ENTRY_INVALID denotes the invalid bit in a segment table entry whereas _SEGMENT_ENTRY_EMPTY means that the value of the whole entry is only the invalid bit, as the entry is completely empty. Therefore we use _SEGMENT_ENTRY_INVALID only to check and set the invalid bit with bitwise operations. _SEGMENT_ENTRY_EMPTY is only used to check for (un)equality. Signed-off-by: Dominik Dingel <dingel@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
parent
7775913724
commit
54397bb0bb
|
@ -640,12 +640,12 @@ static inline int pud_bad(pud_t pud)
|
|||
|
||||
static inline int pmd_present(pmd_t pmd)
|
||||
{
|
||||
return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID;
|
||||
return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
|
||||
}
|
||||
|
||||
static inline int pmd_none(pmd_t pmd)
|
||||
{
|
||||
return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID;
|
||||
return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
|
||||
}
|
||||
|
||||
static inline unsigned long pmd_pfn(pmd_t pmd)
|
||||
|
@ -803,7 +803,7 @@ static inline void pud_clear(pud_t *pud)
|
|||
|
||||
static inline void pmd_clear(pmd_t *pmdp)
|
||||
{
|
||||
pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID;
|
||||
pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
|
||||
}
|
||||
|
||||
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||
|
@ -1357,7 +1357,7 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
|
|||
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pmd_t *pmdp)
|
||||
{
|
||||
return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID));
|
||||
return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
|
||||
|
@ -1367,10 +1367,10 @@ static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
|
|||
{
|
||||
if (full) {
|
||||
pmd_t pmd = *pmdp;
|
||||
*pmdp = __pmd(_SEGMENT_ENTRY_INVALID);
|
||||
*pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
|
||||
return pmd;
|
||||
}
|
||||
return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID));
|
||||
return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
|
||||
|
@ -1384,7 +1384,7 @@ static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
|
|||
static inline void pmdp_invalidate(struct vm_area_struct *vma,
|
||||
unsigned long addr, pmd_t *pmdp)
|
||||
{
|
||||
pmdp_xchg_direct(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID));
|
||||
pmdp_xchg_direct(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
|
||||
|
|
|
@ -359,8 +359,8 @@ static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
|
|||
spin_lock(&gmap->guest_table_lock);
|
||||
entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
|
||||
if (entry) {
|
||||
flush = (*entry != _SEGMENT_ENTRY_INVALID);
|
||||
*entry = _SEGMENT_ENTRY_INVALID;
|
||||
flush = (*entry != _SEGMENT_ENTRY_EMPTY);
|
||||
*entry = _SEGMENT_ENTRY_EMPTY;
|
||||
}
|
||||
spin_unlock(&gmap->guest_table_lock);
|
||||
return flush;
|
||||
|
@ -589,7 +589,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
|
|||
return rc;
|
||||
ptl = pmd_lock(mm, pmd);
|
||||
spin_lock(&gmap->guest_table_lock);
|
||||
if (*table == _SEGMENT_ENTRY_INVALID) {
|
||||
if (*table == _SEGMENT_ENTRY_EMPTY) {
|
||||
rc = radix_tree_insert(&gmap->host_to_guest,
|
||||
vmaddr >> PMD_SHIFT, table);
|
||||
if (!rc)
|
||||
|
|
|
@ -62,7 +62,7 @@ static inline unsigned long __pte_to_rste(pte_t pte)
|
|||
rste |= move_set_bit(pte_val(pte), _PAGE_NOEXEC,
|
||||
_SEGMENT_ENTRY_NOEXEC);
|
||||
} else
|
||||
rste = _SEGMENT_ENTRY_INVALID;
|
||||
rste = _SEGMENT_ENTRY_EMPTY;
|
||||
return rste;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue