mm/hmm: invalidate device page table at start of invalidation
Invalidate device page table at start of invalidation and invalidate in progress CPU page table snapshooting at both start and end of any invalidation. This is helpful when device need to dirty page because the device page table report the page as dirty. Dirtying page must happen in the start mmu notifier callback and not in the end one. Link: http://lkml.kernel.org/r/20181019160442.18723-7-jglisse@redhat.com Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Cc: Ralph Campbell <rcampbell@nvidia.com> Cc: John Hubbard <jhubbard@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
44532d4c59
commit
ec131b2d7f
27
mm/hmm.c
27
mm/hmm.c
|
@ -43,7 +43,6 @@ static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
|
||||||
*
|
*
|
||||||
* @mm: mm struct this HMM struct is bound to
|
* @mm: mm struct this HMM struct is bound to
|
||||||
* @lock: lock protecting ranges list
|
* @lock: lock protecting ranges list
|
||||||
* @sequence: we track updates to the CPU page table with a sequence number
|
|
||||||
* @ranges: list of range being snapshotted
|
* @ranges: list of range being snapshotted
|
||||||
* @mirrors: list of mirrors for this mm
|
* @mirrors: list of mirrors for this mm
|
||||||
* @mmu_notifier: mmu notifier to track updates to CPU page table
|
* @mmu_notifier: mmu notifier to track updates to CPU page table
|
||||||
|
@ -52,7 +51,6 @@ static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
|
||||||
struct hmm {
|
struct hmm {
|
||||||
struct mm_struct *mm;
|
struct mm_struct *mm;
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
atomic_t sequence;
|
|
||||||
struct list_head ranges;
|
struct list_head ranges;
|
||||||
struct list_head mirrors;
|
struct list_head mirrors;
|
||||||
struct mmu_notifier mmu_notifier;
|
struct mmu_notifier mmu_notifier;
|
||||||
|
@ -85,7 +83,6 @@ static struct hmm *hmm_register(struct mm_struct *mm)
|
||||||
return NULL;
|
return NULL;
|
||||||
INIT_LIST_HEAD(&hmm->mirrors);
|
INIT_LIST_HEAD(&hmm->mirrors);
|
||||||
init_rwsem(&hmm->mirrors_sem);
|
init_rwsem(&hmm->mirrors_sem);
|
||||||
atomic_set(&hmm->sequence, 0);
|
|
||||||
hmm->mmu_notifier.ops = NULL;
|
hmm->mmu_notifier.ops = NULL;
|
||||||
INIT_LIST_HEAD(&hmm->ranges);
|
INIT_LIST_HEAD(&hmm->ranges);
|
||||||
spin_lock_init(&hmm->lock);
|
spin_lock_init(&hmm->lock);
|
||||||
|
@ -126,7 +123,7 @@ void hmm_mm_destroy(struct mm_struct *mm)
|
||||||
kfree(mm->hmm);
|
kfree(mm->hmm);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hmm_invalidate_range(struct hmm *hmm,
|
static int hmm_invalidate_range(struct hmm *hmm, bool device,
|
||||||
const struct hmm_update *update)
|
const struct hmm_update *update)
|
||||||
{
|
{
|
||||||
struct hmm_mirror *mirror;
|
struct hmm_mirror *mirror;
|
||||||
|
@ -147,6 +144,9 @@ static int hmm_invalidate_range(struct hmm *hmm,
|
||||||
}
|
}
|
||||||
spin_unlock(&hmm->lock);
|
spin_unlock(&hmm->lock);
|
||||||
|
|
||||||
|
if (!device)
|
||||||
|
return 0;
|
||||||
|
|
||||||
down_read(&hmm->mirrors_sem);
|
down_read(&hmm->mirrors_sem);
|
||||||
list_for_each_entry(mirror, &hmm->mirrors, list) {
|
list_for_each_entry(mirror, &hmm->mirrors, list) {
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -189,18 +189,21 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hmm_invalidate_range_start(struct mmu_notifier *mn,
|
static int hmm_invalidate_range_start(struct mmu_notifier *mn,
|
||||||
struct mm_struct *mm,
|
struct mm_struct *mm,
|
||||||
unsigned long start,
|
unsigned long start,
|
||||||
unsigned long end,
|
unsigned long end,
|
||||||
bool blockable)
|
bool blockable)
|
||||||
{
|
{
|
||||||
|
struct hmm_update update;
|
||||||
struct hmm *hmm = mm->hmm;
|
struct hmm *hmm = mm->hmm;
|
||||||
|
|
||||||
VM_BUG_ON(!hmm);
|
VM_BUG_ON(!hmm);
|
||||||
|
|
||||||
atomic_inc(&hmm->sequence);
|
update.start = start;
|
||||||
|
update.end = end;
|
||||||
return 0;
|
update.event = HMM_UPDATE_INVALIDATE;
|
||||||
|
update.blockable = blockable;
|
||||||
|
return hmm_invalidate_range(hmm, true, &update);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hmm_invalidate_range_end(struct mmu_notifier *mn,
|
static void hmm_invalidate_range_end(struct mmu_notifier *mn,
|
||||||
|
@ -217,7 +220,7 @@ static void hmm_invalidate_range_end(struct mmu_notifier *mn,
|
||||||
update.end = end;
|
update.end = end;
|
||||||
update.event = HMM_UPDATE_INVALIDATE;
|
update.event = HMM_UPDATE_INVALIDATE;
|
||||||
update.blockable = true;
|
update.blockable = true;
|
||||||
hmm_invalidate_range(hmm, &update);
|
hmm_invalidate_range(hmm, false, &update);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
|
static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
|
||||||
|
|
Loading…
Reference in New Issue