mm/hmm: use a structure for update callback parameters
Use a structure to gather all the parameters for the update callback. This make it easier when adding new parameters by avoiding having to update all callback function signature. The hmm_update structure is always associated with a mmu_notifier callbacks so we are not planing on grouping multiple updates together. Nor do we care about page size for the range as range will over fully cover the page being invalidated (this is a mmu_notifier property). Link: http://lkml.kernel.org/r/20181019160442.18723-6-jglisse@redhat.com Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Cc: Ralph Campbell <rcampbell@nvidia.com> Cc: John Hubbard <jhubbard@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
d08faca018
commit
44532d4c59
|
@ -274,13 +274,28 @@ static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range,
|
||||||
struct hmm_mirror;
|
struct hmm_mirror;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* enum hmm_update_type - type of update
|
* enum hmm_update_event - type of update
|
||||||
* @HMM_UPDATE_INVALIDATE: invalidate range (no indication as to why)
|
* @HMM_UPDATE_INVALIDATE: invalidate range (no indication as to why)
|
||||||
*/
|
*/
|
||||||
enum hmm_update_type {
|
enum hmm_update_event {
|
||||||
HMM_UPDATE_INVALIDATE,
|
HMM_UPDATE_INVALIDATE,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* struct hmm_update - HMM update informations for callback
|
||||||
|
*
|
||||||
|
* @start: virtual start address of the range to update
|
||||||
|
* @end: virtual end address of the range to update
|
||||||
|
* @event: event triggering the update (what is happening)
|
||||||
|
* @blockable: can the callback block/sleep ?
|
||||||
|
*/
|
||||||
|
struct hmm_update {
|
||||||
|
unsigned long start;
|
||||||
|
unsigned long end;
|
||||||
|
enum hmm_update_event event;
|
||||||
|
bool blockable;
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* struct hmm_mirror_ops - HMM mirror device operations callback
|
* struct hmm_mirror_ops - HMM mirror device operations callback
|
||||||
*
|
*
|
||||||
|
@ -300,9 +315,9 @@ struct hmm_mirror_ops {
|
||||||
/* sync_cpu_device_pagetables() - synchronize page tables
|
/* sync_cpu_device_pagetables() - synchronize page tables
|
||||||
*
|
*
|
||||||
* @mirror: pointer to struct hmm_mirror
|
* @mirror: pointer to struct hmm_mirror
|
||||||
* @update_type: type of update that occurred to the CPU page table
|
* @update: update informations (see struct hmm_update)
|
||||||
* @start: virtual start address of the range to update
|
* Returns: -EAGAIN if update.blockable false and callback need to
|
||||||
* @end: virtual end address of the range to update
|
* block, 0 otherwise.
|
||||||
*
|
*
|
||||||
* This callback ultimately originates from mmu_notifiers when the CPU
|
* This callback ultimately originates from mmu_notifiers when the CPU
|
||||||
* page table is updated. The device driver must update its page table
|
* page table is updated. The device driver must update its page table
|
||||||
|
@ -313,10 +328,8 @@ struct hmm_mirror_ops {
|
||||||
* page tables are completely updated (TLBs flushed, etc); this is a
|
* page tables are completely updated (TLBs flushed, etc); this is a
|
||||||
* synchronous call.
|
* synchronous call.
|
||||||
*/
|
*/
|
||||||
void (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror,
|
int (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror,
|
||||||
enum hmm_update_type update_type,
|
const struct hmm_update *update);
|
||||||
unsigned long start,
|
|
||||||
unsigned long end);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
33
mm/hmm.c
33
mm/hmm.c
|
@ -126,10 +126,8 @@ void hmm_mm_destroy(struct mm_struct *mm)
|
||||||
kfree(mm->hmm);
|
kfree(mm->hmm);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hmm_invalidate_range(struct hmm *hmm,
|
static int hmm_invalidate_range(struct hmm *hmm,
|
||||||
enum hmm_update_type action,
|
const struct hmm_update *update)
|
||||||
unsigned long start,
|
|
||||||
unsigned long end)
|
|
||||||
{
|
{
|
||||||
struct hmm_mirror *mirror;
|
struct hmm_mirror *mirror;
|
||||||
struct hmm_range *range;
|
struct hmm_range *range;
|
||||||
|
@ -138,22 +136,30 @@ static void hmm_invalidate_range(struct hmm *hmm,
|
||||||
list_for_each_entry(range, &hmm->ranges, list) {
|
list_for_each_entry(range, &hmm->ranges, list) {
|
||||||
unsigned long addr, idx, npages;
|
unsigned long addr, idx, npages;
|
||||||
|
|
||||||
if (end < range->start || start >= range->end)
|
if (update->end < range->start || update->start >= range->end)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
range->valid = false;
|
range->valid = false;
|
||||||
addr = max(start, range->start);
|
addr = max(update->start, range->start);
|
||||||
idx = (addr - range->start) >> PAGE_SHIFT;
|
idx = (addr - range->start) >> PAGE_SHIFT;
|
||||||
npages = (min(range->end, end) - addr) >> PAGE_SHIFT;
|
npages = (min(range->end, update->end) - addr) >> PAGE_SHIFT;
|
||||||
memset(&range->pfns[idx], 0, sizeof(*range->pfns) * npages);
|
memset(&range->pfns[idx], 0, sizeof(*range->pfns) * npages);
|
||||||
}
|
}
|
||||||
spin_unlock(&hmm->lock);
|
spin_unlock(&hmm->lock);
|
||||||
|
|
||||||
down_read(&hmm->mirrors_sem);
|
down_read(&hmm->mirrors_sem);
|
||||||
list_for_each_entry(mirror, &hmm->mirrors, list)
|
list_for_each_entry(mirror, &hmm->mirrors, list) {
|
||||||
mirror->ops->sync_cpu_device_pagetables(mirror, action,
|
int ret;
|
||||||
start, end);
|
|
||||||
|
ret = mirror->ops->sync_cpu_device_pagetables(mirror, update);
|
||||||
|
if (!update->blockable && ret == -EAGAIN) {
|
||||||
|
up_read(&hmm->mirrors_sem);
|
||||||
|
return -EAGAIN;
|
||||||
|
}
|
||||||
|
}
|
||||||
up_read(&hmm->mirrors_sem);
|
up_read(&hmm->mirrors_sem);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
|
static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
|
||||||
|
@ -202,11 +208,16 @@ static void hmm_invalidate_range_end(struct mmu_notifier *mn,
|
||||||
unsigned long start,
|
unsigned long start,
|
||||||
unsigned long end)
|
unsigned long end)
|
||||||
{
|
{
|
||||||
|
struct hmm_update update;
|
||||||
struct hmm *hmm = mm->hmm;
|
struct hmm *hmm = mm->hmm;
|
||||||
|
|
||||||
VM_BUG_ON(!hmm);
|
VM_BUG_ON(!hmm);
|
||||||
|
|
||||||
hmm_invalidate_range(mm->hmm, HMM_UPDATE_INVALIDATE, start, end);
|
update.start = start;
|
||||||
|
update.end = end;
|
||||||
|
update.event = HMM_UPDATE_INVALIDATE;
|
||||||
|
update.blockable = true;
|
||||||
|
hmm_invalidate_range(hmm, &update);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
|
static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
|
||||||
|
|
Loading…
Reference in New Issue