hmm related patches for 5.6
This small series revises the names in mmu_notifier to make the code clearer and more readable. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEfB7FMLh+8QxL+6i3OG33FX4gmxoFAl4wf2EACgkQOG33FX4g mxqrdw//XIexbXQqP4dUKFCFeI7Um6ZqYE6iVCQi6JEetpKxCR8BSrJsq6EP60Mg cVCKolISuudzOccz/liotg9SrwRlcO3mzucd8LJZG0v2FZMzQr0EKjst0RC4/xvK U2RxGvwLQ+XVR/3/l6hXyWyw7u28+F1RsfQMMX3kqR3qlcQachQ3k7oUINDIq2XH JkQcBV+XK0doXEp6VCCVKwuwEN7O5xSm8lAIHDNFZEEPre0iKxwatgWxdXFIWQek tRywwB7bRzFROBlDcoOQ0GDTqScr3bghz6vWU4GGv3avYkystKwy44ha6BzO2xQc ZNIo8AN9UFFhcmF531wklsXTCbxbxJAJAwdyIuQnKq5glw64EFnrjo2sxuL6s56h C1GHADtxDccv+nr2sKP/rFFeq9K3VqHDtjEdBOhReuB0Vp1YfVr17A4R8yAn8A+1 vm3IusoOq+g8qMYxRHEb+76/S//joaxAlFQkU5Gjn/0xsykP99YQSQFBjXmkzWlS IiHLf0HJiCCL8SHe4Wnyhyl1DUIIl38HQULqbFWZ8hK4ELhTd2KEuDxzT8q+v+v7 2M9nBVdRaw1kskGiFv+F7mb6c990CTEZO9B5fHpAjPRxeVkLYc06QfJY+hXbbu4c 6yzIvERRRlAviCmgb7G+3pLyBCKdvlIlCVsVOdxHXSRsl904BnA= =hhT0 -----END PGP SIGNATURE----- Merge tag 'for-linus-hmm' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull mmu_notifier updates from Jason Gunthorpe: "This small series revises the names in mmu_notifier to make the code clearer and more readable" * tag 'for-linus-hmm' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: mm/mmu_notifiers: Use 'interval_sub' as the variable for mmu_interval_notifier mm/mmu_notifiers: Use 'subscription' as the variable name for mmu_notifier mm/mmu_notifier: Rename struct mmu_notifier_mm to mmu_notifier_subscriptions
This commit is contained in:
commit
39bed42de2
|
@ -149,14 +149,14 @@ CPU page table into a device page table; HMM helps keep both synchronized. A
|
|||
device driver that wants to mirror a process address space must start with the
|
||||
registration of a mmu_interval_notifier::
|
||||
|
||||
mni->ops = &driver_ops;
|
||||
int mmu_interval_notifier_insert(struct mmu_interval_notifier *mni,
|
||||
unsigned long start, unsigned long length,
|
||||
struct mm_struct *mm);
|
||||
int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
|
||||
struct mm_struct *mm, unsigned long start,
|
||||
unsigned long length,
|
||||
const struct mmu_interval_notifier_ops *ops);
|
||||
|
||||
During the driver_ops->invalidate() callback the device driver must perform
|
||||
the update action to the range (mark range read only, or fully unmap,
|
||||
etc.). The device must complete the update before the driver callback returns.
|
||||
During the ops->invalidate() callback the device driver must perform the
|
||||
update action to the range (mark range read only, or fully unmap, etc.). The
|
||||
device must complete the update before the driver callback returns.
|
||||
|
||||
When the device driver wants to populate a range of virtual addresses, it can
|
||||
use::
|
||||
|
@ -183,7 +183,7 @@ The usage pattern is::
|
|||
struct hmm_range range;
|
||||
...
|
||||
|
||||
range.notifier = &mni;
|
||||
range.notifier = &interval_sub;
|
||||
range.start = ...;
|
||||
range.end = ...;
|
||||
range.pfns = ...;
|
||||
|
@ -191,11 +191,11 @@ The usage pattern is::
|
|||
range.values = ...;
|
||||
range.pfn_shift = ...;
|
||||
|
||||
if (!mmget_not_zero(mni->notifier.mm))
|
||||
if (!mmget_not_zero(interval_sub->notifier.mm))
|
||||
return -EFAULT;
|
||||
|
||||
again:
|
||||
range.notifier_seq = mmu_interval_read_begin(&mni);
|
||||
range.notifier_seq = mmu_interval_read_begin(&interval_sub);
|
||||
down_read(&mm->mmap_sem);
|
||||
ret = hmm_range_fault(&range, HMM_RANGE_SNAPSHOT);
|
||||
if (ret) {
|
||||
|
|
|
@ -490,7 +490,7 @@ struct mm_struct {
|
|||
/* store ref to file /proc/<pid>/exe symlink points to */
|
||||
struct file __rcu *exe_file;
|
||||
#ifdef CONFIG_MMU_NOTIFIER
|
||||
struct mmu_notifier_mm *mmu_notifier_mm;
|
||||
struct mmu_notifier_subscriptions *notifier_subscriptions;
|
||||
#endif
|
||||
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
|
||||
pgtable_t pmd_huge_pte; /* protected by page_table_lock */
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#include <linux/srcu.h>
|
||||
#include <linux/interval_tree.h>
|
||||
|
||||
struct mmu_notifier_mm;
|
||||
struct mmu_notifier_subscriptions;
|
||||
struct mmu_notifier;
|
||||
struct mmu_notifier_range;
|
||||
struct mmu_interval_notifier;
|
||||
|
@ -73,7 +73,7 @@ struct mmu_notifier_ops {
|
|||
* through the gart alias address, so leading to memory
|
||||
* corruption.
|
||||
*/
|
||||
void (*release)(struct mmu_notifier *mn,
|
||||
void (*release)(struct mmu_notifier *subscription,
|
||||
struct mm_struct *mm);
|
||||
|
||||
/*
|
||||
|
@ -85,7 +85,7 @@ struct mmu_notifier_ops {
|
|||
* Start-end is necessary in case the secondary MMU is mapping the page
|
||||
* at a smaller granularity than the primary MMU.
|
||||
*/
|
||||
int (*clear_flush_young)(struct mmu_notifier *mn,
|
||||
int (*clear_flush_young)(struct mmu_notifier *subscription,
|
||||
struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long end);
|
||||
|
@ -95,7 +95,7 @@ struct mmu_notifier_ops {
|
|||
* latter, it is supposed to test-and-clear the young/accessed bitflag
|
||||
* in the secondary pte, but it may omit flushing the secondary tlb.
|
||||
*/
|
||||
int (*clear_young)(struct mmu_notifier *mn,
|
||||
int (*clear_young)(struct mmu_notifier *subscription,
|
||||
struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long end);
|
||||
|
@ -106,7 +106,7 @@ struct mmu_notifier_ops {
|
|||
* frequently used without actually clearing the flag or tearing
|
||||
* down the secondary mapping on the page.
|
||||
*/
|
||||
int (*test_young)(struct mmu_notifier *mn,
|
||||
int (*test_young)(struct mmu_notifier *subscription,
|
||||
struct mm_struct *mm,
|
||||
unsigned long address);
|
||||
|
||||
|
@ -114,7 +114,7 @@ struct mmu_notifier_ops {
|
|||
* change_pte is called in cases that pte mapping to page is changed:
|
||||
* for example, when ksm remaps pte to point to a new shared page.
|
||||
*/
|
||||
void (*change_pte)(struct mmu_notifier *mn,
|
||||
void (*change_pte)(struct mmu_notifier *subscription,
|
||||
struct mm_struct *mm,
|
||||
unsigned long address,
|
||||
pte_t pte);
|
||||
|
@ -169,9 +169,9 @@ struct mmu_notifier_ops {
|
|||
* invalidate_range_end.
|
||||
*
|
||||
*/
|
||||
int (*invalidate_range_start)(struct mmu_notifier *mn,
|
||||
int (*invalidate_range_start)(struct mmu_notifier *subscription,
|
||||
const struct mmu_notifier_range *range);
|
||||
void (*invalidate_range_end)(struct mmu_notifier *mn,
|
||||
void (*invalidate_range_end)(struct mmu_notifier *subscription,
|
||||
const struct mmu_notifier_range *range);
|
||||
|
||||
/*
|
||||
|
@ -192,8 +192,10 @@ struct mmu_notifier_ops {
|
|||
* of what was passed to invalidate_range_start()/end(), if
|
||||
* called between those functions.
|
||||
*/
|
||||
void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end);
|
||||
void (*invalidate_range)(struct mmu_notifier *subscription,
|
||||
struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long end);
|
||||
|
||||
/*
|
||||
* These callbacks are used with the get/put interface to manage the
|
||||
|
@ -206,7 +208,7 @@ struct mmu_notifier_ops {
|
|||
* and cannot sleep.
|
||||
*/
|
||||
struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm);
|
||||
void (*free_notifier)(struct mmu_notifier *mn);
|
||||
void (*free_notifier)(struct mmu_notifier *subscription);
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -235,7 +237,7 @@ struct mmu_notifier {
|
|||
* was required but mmu_notifier_range_blockable(range) is false.
|
||||
*/
|
||||
struct mmu_interval_notifier_ops {
|
||||
bool (*invalidate)(struct mmu_interval_notifier *mni,
|
||||
bool (*invalidate)(struct mmu_interval_notifier *interval_sub,
|
||||
const struct mmu_notifier_range *range,
|
||||
unsigned long cur_seq);
|
||||
};
|
||||
|
@ -265,7 +267,7 @@ struct mmu_notifier_range {
|
|||
|
||||
static inline int mm_has_notifiers(struct mm_struct *mm)
|
||||
{
|
||||
return unlikely(mm->mmu_notifier_mm);
|
||||
return unlikely(mm->notifier_subscriptions);
|
||||
}
|
||||
|
||||
struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
|
||||
|
@ -280,30 +282,31 @@ mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
|
|||
up_write(&mm->mmap_sem);
|
||||
return ret;
|
||||
}
|
||||
void mmu_notifier_put(struct mmu_notifier *mn);
|
||||
void mmu_notifier_put(struct mmu_notifier *subscription);
|
||||
void mmu_notifier_synchronize(void);
|
||||
|
||||
extern int mmu_notifier_register(struct mmu_notifier *mn,
|
||||
extern int mmu_notifier_register(struct mmu_notifier *subscription,
|
||||
struct mm_struct *mm);
|
||||
extern int __mmu_notifier_register(struct mmu_notifier *mn,
|
||||
extern int __mmu_notifier_register(struct mmu_notifier *subscription,
|
||||
struct mm_struct *mm);
|
||||
extern void mmu_notifier_unregister(struct mmu_notifier *mn,
|
||||
extern void mmu_notifier_unregister(struct mmu_notifier *subscription,
|
||||
struct mm_struct *mm);
|
||||
|
||||
unsigned long mmu_interval_read_begin(struct mmu_interval_notifier *mni);
|
||||
int mmu_interval_notifier_insert(struct mmu_interval_notifier *mni,
|
||||
unsigned long
|
||||
mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub);
|
||||
int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
|
||||
struct mm_struct *mm, unsigned long start,
|
||||
unsigned long length,
|
||||
const struct mmu_interval_notifier_ops *ops);
|
||||
int mmu_interval_notifier_insert_locked(
|
||||
struct mmu_interval_notifier *mni, struct mm_struct *mm,
|
||||
struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long length,
|
||||
const struct mmu_interval_notifier_ops *ops);
|
||||
void mmu_interval_notifier_remove(struct mmu_interval_notifier *mni);
|
||||
void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub);
|
||||
|
||||
/**
|
||||
* mmu_interval_set_seq - Save the invalidation sequence
|
||||
* @mni - The mni passed to invalidate
|
||||
* @interval_sub - The subscription passed to invalidate
|
||||
* @cur_seq - The cur_seq passed to the invalidate() callback
|
||||
*
|
||||
* This must be called unconditionally from the invalidate callback of a
|
||||
|
@ -314,15 +317,16 @@ void mmu_interval_notifier_remove(struct mmu_interval_notifier *mni);
|
|||
* If the caller does not call mmu_interval_read_begin() or
|
||||
* mmu_interval_read_retry() then this call is not required.
|
||||
*/
|
||||
static inline void mmu_interval_set_seq(struct mmu_interval_notifier *mni,
|
||||
unsigned long cur_seq)
|
||||
static inline void
|
||||
mmu_interval_set_seq(struct mmu_interval_notifier *interval_sub,
|
||||
unsigned long cur_seq)
|
||||
{
|
||||
WRITE_ONCE(mni->invalidate_seq, cur_seq);
|
||||
WRITE_ONCE(interval_sub->invalidate_seq, cur_seq);
|
||||
}
|
||||
|
||||
/**
|
||||
* mmu_interval_read_retry - End a read side critical section against a VA range
|
||||
* mni: The range
|
||||
* interval_sub: The subscription
|
||||
* seq: The return of the paired mmu_interval_read_begin()
|
||||
*
|
||||
* This MUST be called under a user provided lock that is also held
|
||||
|
@ -334,15 +338,16 @@ static inline void mmu_interval_set_seq(struct mmu_interval_notifier *mni,
|
|||
* Returns true if an invalidation collided with this critical section, and
|
||||
* the caller should retry.
|
||||
*/
|
||||
static inline bool mmu_interval_read_retry(struct mmu_interval_notifier *mni,
|
||||
unsigned long seq)
|
||||
static inline bool
|
||||
mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub,
|
||||
unsigned long seq)
|
||||
{
|
||||
return mni->invalidate_seq != seq;
|
||||
return interval_sub->invalidate_seq != seq;
|
||||
}
|
||||
|
||||
/**
|
||||
* mmu_interval_check_retry - Test if a collision has occurred
|
||||
* mni: The range
|
||||
* interval_sub: The subscription
|
||||
* seq: The return of the matching mmu_interval_read_begin()
|
||||
*
|
||||
* This can be used in the critical section between mmu_interval_read_begin()
|
||||
|
@ -357,14 +362,15 @@ static inline bool mmu_interval_read_retry(struct mmu_interval_notifier *mni,
|
|||
* This call can be used as part of loops and other expensive operations to
|
||||
* expedite a retry.
|
||||
*/
|
||||
static inline bool mmu_interval_check_retry(struct mmu_interval_notifier *mni,
|
||||
unsigned long seq)
|
||||
static inline bool
|
||||
mmu_interval_check_retry(struct mmu_interval_notifier *interval_sub,
|
||||
unsigned long seq)
|
||||
{
|
||||
/* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
|
||||
return READ_ONCE(mni->invalidate_seq) != seq;
|
||||
return READ_ONCE(interval_sub->invalidate_seq) != seq;
|
||||
}
|
||||
|
||||
extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
|
||||
extern void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm);
|
||||
extern void __mmu_notifier_release(struct mm_struct *mm);
|
||||
extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
|
@ -480,15 +486,15 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
|
|||
__mmu_notifier_invalidate_range(mm, start, end);
|
||||
}
|
||||
|
||||
static inline void mmu_notifier_mm_init(struct mm_struct *mm)
|
||||
static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
|
||||
{
|
||||
mm->mmu_notifier_mm = NULL;
|
||||
mm->notifier_subscriptions = NULL;
|
||||
}
|
||||
|
||||
static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
|
||||
static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
|
||||
{
|
||||
if (mm_has_notifiers(mm))
|
||||
__mmu_notifier_mm_destroy(mm);
|
||||
__mmu_notifier_subscriptions_destroy(mm);
|
||||
}
|
||||
|
||||
|
||||
|
@ -692,11 +698,11 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
|
|||
{
|
||||
}
|
||||
|
||||
static inline void mmu_notifier_mm_init(struct mm_struct *mm)
|
||||
static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
|
||||
static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -692,7 +692,7 @@ void __mmdrop(struct mm_struct *mm)
|
|||
WARN_ON_ONCE(mm == current->active_mm);
|
||||
mm_free_pgd(mm);
|
||||
destroy_context(mm);
|
||||
mmu_notifier_mm_destroy(mm);
|
||||
mmu_notifier_subscriptions_destroy(mm);
|
||||
check_mm(mm);
|
||||
put_user_ns(mm->user_ns);
|
||||
free_mm(mm);
|
||||
|
@ -1025,7 +1025,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
|
|||
mm_init_aio(mm);
|
||||
mm_init_owner(mm, p);
|
||||
RCU_INIT_POINTER(mm->exe_file, NULL);
|
||||
mmu_notifier_mm_init(mm);
|
||||
mmu_notifier_subscriptions_init(mm);
|
||||
init_tlb_flush_pending(mm);
|
||||
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
|
||||
mm->pmd_huge_pte = NULL;
|
||||
|
|
|
@ -153,7 +153,7 @@ void dump_mm(const struct mm_struct *mm)
|
|||
#endif
|
||||
"exe_file %px\n"
|
||||
#ifdef CONFIG_MMU_NOTIFIER
|
||||
"mmu_notifier_mm %px\n"
|
||||
"notifier_subscriptions %px\n"
|
||||
#endif
|
||||
#ifdef CONFIG_NUMA_BALANCING
|
||||
"numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
|
||||
|
@ -185,7 +185,7 @@ void dump_mm(const struct mm_struct *mm)
|
|||
#endif
|
||||
mm->exe_file,
|
||||
#ifdef CONFIG_MMU_NOTIFIER
|
||||
mm->mmu_notifier_mm,
|
||||
mm->notifier_subscriptions,
|
||||
#endif
|
||||
#ifdef CONFIG_NUMA_BALANCING
|
||||
mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
|
||||
|
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue