ARM: 7582/2: rename kvm_seq to vmalloc_seq so to avoid confusion with KVM
The kvm_seq value has nothing to do what so ever with this other KVM. Given that KVM support on ARM is imminent, it's best to rename kvm_seq into something else to clearly identify what it is about i.e. a sequence number for vmalloc section mappings. Signed-off-by: Nicolas Pitre <nico@linaro.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
parent
ce7b175656
commit
3e99675af1
|
@ -7,7 +7,7 @@ typedef struct {
|
||||||
#ifdef CONFIG_CPU_HAS_ASID
|
#ifdef CONFIG_CPU_HAS_ASID
|
||||||
u64 id;
|
u64 id;
|
||||||
#endif
|
#endif
|
||||||
unsigned int kvm_seq;
|
unsigned int vmalloc_seq;
|
||||||
} mm_context_t;
|
} mm_context_t;
|
||||||
|
|
||||||
#ifdef CONFIG_CPU_HAS_ASID
|
#ifdef CONFIG_CPU_HAS_ASID
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
#include <asm/proc-fns.h>
|
#include <asm/proc-fns.h>
|
||||||
#include <asm-generic/mm_hooks.h>
|
#include <asm-generic/mm_hooks.h>
|
||||||
|
|
||||||
void __check_kvm_seq(struct mm_struct *mm);
|
void __check_vmalloc_seq(struct mm_struct *mm);
|
||||||
|
|
||||||
#ifdef CONFIG_CPU_HAS_ASID
|
#ifdef CONFIG_CPU_HAS_ASID
|
||||||
|
|
||||||
|
@ -34,8 +34,8 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
|
||||||
static inline void check_and_switch_context(struct mm_struct *mm,
|
static inline void check_and_switch_context(struct mm_struct *mm,
|
||||||
struct task_struct *tsk)
|
struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
|
if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
|
||||||
__check_kvm_seq(mm);
|
__check_vmalloc_seq(mm);
|
||||||
|
|
||||||
if (irqs_disabled())
|
if (irqs_disabled())
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -186,8 +186,8 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned int cpu = smp_processor_id();
|
unsigned int cpu = smp_processor_id();
|
||||||
|
|
||||||
if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
|
if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
|
||||||
__check_kvm_seq(mm);
|
__check_vmalloc_seq(mm);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Required during context switch to avoid speculative page table
|
* Required during context switch to avoid speculative page table
|
||||||
|
|
|
@ -47,18 +47,18 @@ int ioremap_page(unsigned long virt, unsigned long phys,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ioremap_page);
|
EXPORT_SYMBOL(ioremap_page);
|
||||||
|
|
||||||
void __check_kvm_seq(struct mm_struct *mm)
|
void __check_vmalloc_seq(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
unsigned int seq;
|
unsigned int seq;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
seq = init_mm.context.kvm_seq;
|
seq = init_mm.context.vmalloc_seq;
|
||||||
memcpy(pgd_offset(mm, VMALLOC_START),
|
memcpy(pgd_offset(mm, VMALLOC_START),
|
||||||
pgd_offset_k(VMALLOC_START),
|
pgd_offset_k(VMALLOC_START),
|
||||||
sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
|
sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
|
||||||
pgd_index(VMALLOC_START)));
|
pgd_index(VMALLOC_START)));
|
||||||
mm->context.kvm_seq = seq;
|
mm->context.vmalloc_seq = seq;
|
||||||
} while (seq != init_mm.context.kvm_seq);
|
} while (seq != init_mm.context.vmalloc_seq);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
|
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
|
||||||
|
@ -89,13 +89,13 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
|
||||||
if (!pmd_none(pmd)) {
|
if (!pmd_none(pmd)) {
|
||||||
/*
|
/*
|
||||||
* Clear the PMD from the page table, and
|
* Clear the PMD from the page table, and
|
||||||
* increment the kvm sequence so others
|
* increment the vmalloc sequence so others
|
||||||
* notice this change.
|
* notice this change.
|
||||||
*
|
*
|
||||||
* Note: this is still racy on SMP machines.
|
* Note: this is still racy on SMP machines.
|
||||||
*/
|
*/
|
||||||
pmd_clear(pmdp);
|
pmd_clear(pmdp);
|
||||||
init_mm.context.kvm_seq++;
|
init_mm.context.vmalloc_seq++;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Free the page table, if there was one.
|
* Free the page table, if there was one.
|
||||||
|
@ -112,8 +112,8 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
|
||||||
* Ensure that the active_mm is up to date - we want to
|
* Ensure that the active_mm is up to date - we want to
|
||||||
* catch any use-after-iounmap cases.
|
* catch any use-after-iounmap cases.
|
||||||
*/
|
*/
|
||||||
if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
|
if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)
|
||||||
__check_kvm_seq(current->active_mm);
|
__check_vmalloc_seq(current->active_mm);
|
||||||
|
|
||||||
flush_tlb_kernel_range(virt, end);
|
flush_tlb_kernel_range(virt, end);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue