mm/mmu_notifiers: annotate with might_sleep()
Since mmu notifiers don't exist for many processes, but could block in interesting places, add some annotations. This should help make sure the core mm keeps up its end of the mmu notifier contract. The checks here are outside of all notifier checks because of that. They compile away without CONFIG_DEBUG_ATOMIC_SLEEP. Link: https://lore.kernel.org/r/20190826201425.17547-6-daniel.vetter@ffwll.ch Suggested-by: Jason Gunthorpe <jgg@mellanox.com> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> Reviewed-by: Jason Gunthorpe <jgg@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
66204f1d2d
commit
810e24e009
|
@ -343,6 +343,8 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm,
|
|||
static inline void
|
||||
mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
|
||||
{
|
||||
might_sleep();
|
||||
|
||||
lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
|
||||
if (mm_has_notifiers(range->mm)) {
|
||||
range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
|
||||
|
@ -368,6 +370,9 @@ mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
|
|||
static inline void
|
||||
mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
|
||||
{
|
||||
if (mmu_notifier_range_blockable(range))
|
||||
might_sleep();
|
||||
|
||||
if (mm_has_notifiers(range->mm))
|
||||
__mmu_notifier_invalidate_range_end(range, false);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue