s390/mm: use memset64 instead of clear_table
Use memset64 instead of the (now) open-coded variant clear_table. Performance wise there is no difference. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
parent
0b77d6701c
commit
41879ff65d
|
@ -12,6 +12,7 @@
|
|||
#define _S390_PGALLOC_H
|
||||
|
||||
#include <linux/threads.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
|
@ -27,24 +28,9 @@ void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
|
|||
void page_table_free_pgste(struct page *page);
|
||||
extern int page_table_allocate_pgste;
|
||||
|
||||
static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
|
||||
{
|
||||
struct addrtype { char _[256]; };
|
||||
int i;
|
||||
|
||||
for (i = 0; i < n; i += 256) {
|
||||
*s = val;
|
||||
asm volatile(
|
||||
"mvc 8(248,%[s]),0(%[s])\n"
|
||||
: "+m" (*(struct addrtype *) s)
|
||||
: [s] "a" (s));
|
||||
s += 256 / sizeof(long);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void crst_table_init(unsigned long *crst, unsigned long entry)
|
||||
{
|
||||
clear_table(crst, entry, _CRST_TABLE_SIZE);
|
||||
memset64((u64 *)crst, entry, _CRST_ENTRIES);
|
||||
}
|
||||
|
||||
static inline unsigned long pgd_entry_type(struct mm_struct *mm)
|
||||
|
|
|
@ -166,10 +166,8 @@ int vdso_alloc_per_cpu(struct lowcore *lowcore)
|
|||
vd->node_id = cpu_to_node(vd->cpu_nr);
|
||||
|
||||
/* Set up access register mode page table */
|
||||
clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY,
|
||||
PAGE_SIZE << SEGMENT_ORDER);
|
||||
clear_table((unsigned long *) page_table, _PAGE_INVALID,
|
||||
256*sizeof(unsigned long));
|
||||
memset64((u64 *)segment_table, _SEGMENT_ENTRY_EMPTY, _CRST_ENTRIES);
|
||||
memset64((u64 *)page_table, _PAGE_INVALID, PTRS_PER_PTE);
|
||||
|
||||
*(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
|
||||
*(unsigned long *) page_table = _PAGE_PROTECT + page_frame;
|
||||
|
|
|
@ -158,13 +158,13 @@ static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
|
|||
struct page *page_table_alloc_pgste(struct mm_struct *mm)
|
||||
{
|
||||
struct page *page;
|
||||
unsigned long *table;
|
||||
u64 *table;
|
||||
|
||||
page = alloc_page(GFP_KERNEL);
|
||||
if (page) {
|
||||
table = (unsigned long *) page_to_phys(page);
|
||||
clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
|
||||
clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
|
||||
table = (u64 *)page_to_phys(page);
|
||||
memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
|
||||
memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
|
||||
}
|
||||
return page;
|
||||
}
|
||||
|
@ -221,12 +221,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
|
|||
if (mm_alloc_pgste(mm)) {
|
||||
/* Return 4K page table with PGSTEs */
|
||||
atomic_set(&page->_mapcount, 3);
|
||||
clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
|
||||
clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
|
||||
memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
|
||||
memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
|
||||
} else {
|
||||
/* Return the first 2K fragment of the page */
|
||||
atomic_set(&page->_mapcount, 1);
|
||||
clear_table(table, _PAGE_INVALID, PAGE_SIZE);
|
||||
memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
|
||||
spin_lock_bh(&mm->context.lock);
|
||||
list_add(&page->lru, &mm->context.pgtable_list);
|
||||
spin_unlock_bh(&mm->context.lock);
|
||||
|
|
|
@ -59,7 +59,7 @@ pte_t __ref *vmem_pte_alloc(void)
|
|||
pte = (pte_t *) memblock_alloc(size, size);
|
||||
if (!pte)
|
||||
return NULL;
|
||||
clear_table((unsigned long *) pte, _PAGE_INVALID, size);
|
||||
memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
|
||||
return pte;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue