drm/i915/gem: Avoid kmalloc under i915->mm_lock
Rearrange the allocation of the mm_struct registration to avoid allocating underneath the i915->mm_lock, so that we avoid tainting the lock (and in turn many other locks that may be held as i915->mm_lock is taken, and those locks we may want on the free [shrinker] paths). In doing so, we convert the lookup to be RCU protected by courtesy of converting the free-worker to be an rcu_work. v2: Remember to use hash_rcu variants to protect the list iteration from concurrent add/del. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200619194038.5088-1-chris@chris-wilson.co.uk
This commit is contained in:
parent
f6a7d3952c
commit
040e123c0b
|
@ -21,7 +21,7 @@ struct i915_mm_struct {
|
||||||
struct i915_mmu_notifier *mn;
|
struct i915_mmu_notifier *mn;
|
||||||
struct hlist_node node;
|
struct hlist_node node;
|
||||||
struct kref kref;
|
struct kref kref;
|
||||||
struct work_struct work;
|
struct rcu_work work;
|
||||||
};
|
};
|
||||||
|
|
||||||
#if defined(CONFIG_MMU_NOTIFIER)
|
#if defined(CONFIG_MMU_NOTIFIER)
|
||||||
|
@ -189,40 +189,31 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
|
||||||
static struct i915_mmu_notifier *
|
static struct i915_mmu_notifier *
|
||||||
i915_mmu_notifier_find(struct i915_mm_struct *mm)
|
i915_mmu_notifier_find(struct i915_mm_struct *mm)
|
||||||
{
|
{
|
||||||
struct i915_mmu_notifier *mn;
|
struct i915_mmu_notifier *mn, *old;
|
||||||
int err = 0;
|
int err;
|
||||||
|
|
||||||
mn = mm->mn;
|
mn = READ_ONCE(mm->mn);
|
||||||
if (mn)
|
if (likely(mn))
|
||||||
return mn;
|
return mn;
|
||||||
|
|
||||||
mn = i915_mmu_notifier_create(mm);
|
mn = i915_mmu_notifier_create(mm);
|
||||||
if (IS_ERR(mn))
|
if (IS_ERR(mn))
|
||||||
err = PTR_ERR(mn);
|
return mn;
|
||||||
|
|
||||||
mmap_write_lock(mm->mm);
|
err = mmu_notifier_register(&mn->mn, mm->mm);
|
||||||
mutex_lock(&mm->i915->mm_lock);
|
if (err) {
|
||||||
if (mm->mn == NULL && !err) {
|
|
||||||
/* Protected by mmap_lock (write-lock) */
|
|
||||||
err = __mmu_notifier_register(&mn->mn, mm->mm);
|
|
||||||
if (!err) {
|
|
||||||
/* Protected by mm_lock */
|
|
||||||
mm->mn = fetch_and_zero(&mn);
|
|
||||||
}
|
|
||||||
} else if (mm->mn) {
|
|
||||||
/*
|
|
||||||
* Someone else raced and successfully installed the mmu
|
|
||||||
* notifier, we can cancel our own errors.
|
|
||||||
*/
|
|
||||||
err = 0;
|
|
||||||
}
|
|
||||||
mutex_unlock(&mm->i915->mm_lock);
|
|
||||||
mmap_write_unlock(mm->mm);
|
|
||||||
|
|
||||||
if (mn && !IS_ERR(mn))
|
|
||||||
kfree(mn);
|
kfree(mn);
|
||||||
|
return ERR_PTR(err);
|
||||||
|
}
|
||||||
|
|
||||||
return err ? ERR_PTR(err) : mm->mn;
|
old = cmpxchg(&mm->mn, NULL, mn);
|
||||||
|
if (old) {
|
||||||
|
mmu_notifier_unregister(&mn->mn, mm->mm);
|
||||||
|
kfree(mn);
|
||||||
|
mn = old;
|
||||||
|
}
|
||||||
|
|
||||||
|
return mn;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
@ -301,23 +292,28 @@ i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static struct i915_mm_struct *
|
static struct i915_mm_struct *
|
||||||
__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
|
__i915_mm_struct_find(struct drm_i915_private *i915, struct mm_struct *real)
|
||||||
{
|
{
|
||||||
struct i915_mm_struct *mm;
|
struct i915_mm_struct *it, *mm = NULL;
|
||||||
|
|
||||||
/* Protected by dev_priv->mm_lock */
|
rcu_read_lock();
|
||||||
hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
|
hash_for_each_possible_rcu(i915->mm_structs,
|
||||||
if (mm->mm == real)
|
it, node,
|
||||||
return mm;
|
(unsigned long)real)
|
||||||
|
if (it->mm == real && kref_get_unless_zero(&it->kref)) {
|
||||||
|
mm = it;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
return NULL;
|
return mm;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
|
i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
|
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||||
struct i915_mm_struct *mm;
|
struct i915_mm_struct *mm, *new;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
/* During release of the GEM object we hold the struct_mutex. This
|
/* During release of the GEM object we hold the struct_mutex. This
|
||||||
|
@ -330,39 +326,42 @@ i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
|
||||||
* struct_mutex, i.e. we need to schedule a worker to do the clean
|
* struct_mutex, i.e. we need to schedule a worker to do the clean
|
||||||
* up.
|
* up.
|
||||||
*/
|
*/
|
||||||
mutex_lock(&dev_priv->mm_lock);
|
mm = __i915_mm_struct_find(i915, current->mm);
|
||||||
mm = __i915_mm_struct_find(dev_priv, current->mm);
|
if (mm)
|
||||||
if (mm == NULL) {
|
goto out;
|
||||||
mm = kmalloc(sizeof(*mm), GFP_KERNEL);
|
|
||||||
if (mm == NULL) {
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
kref_init(&mm->kref);
|
new = kmalloc(sizeof(*mm), GFP_KERNEL);
|
||||||
mm->i915 = to_i915(obj->base.dev);
|
if (!new)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
mm->mm = current->mm;
|
kref_init(&new->kref);
|
||||||
|
new->i915 = to_i915(obj->base.dev);
|
||||||
|
new->mm = current->mm;
|
||||||
|
new->mn = NULL;
|
||||||
|
|
||||||
|
spin_lock(&i915->mm_lock);
|
||||||
|
mm = __i915_mm_struct_find(i915, current->mm);
|
||||||
|
if (!mm) {
|
||||||
|
hash_add_rcu(i915->mm_structs,
|
||||||
|
&new->node,
|
||||||
|
(unsigned long)new->mm);
|
||||||
mmgrab(current->mm);
|
mmgrab(current->mm);
|
||||||
|
mm = new;
|
||||||
|
}
|
||||||
|
spin_unlock(&i915->mm_lock);
|
||||||
|
if (mm != new)
|
||||||
|
kfree(new);
|
||||||
|
|
||||||
mm->mn = NULL;
|
|
||||||
|
|
||||||
/* Protected by dev_priv->mm_lock */
|
|
||||||
hash_add(dev_priv->mm_structs,
|
|
||||||
&mm->node, (unsigned long)mm->mm);
|
|
||||||
} else
|
|
||||||
kref_get(&mm->kref);
|
|
||||||
|
|
||||||
obj->userptr.mm = mm;
|
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&dev_priv->mm_lock);
|
obj->userptr.mm = mm;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
__i915_mm_struct_free__worker(struct work_struct *work)
|
__i915_mm_struct_free__worker(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
|
struct i915_mm_struct *mm = container_of(work, typeof(*mm), work.work);
|
||||||
|
|
||||||
i915_mmu_notifier_free(mm->mn, mm->mm);
|
i915_mmu_notifier_free(mm->mn, mm->mm);
|
||||||
mmdrop(mm->mm);
|
mmdrop(mm->mm);
|
||||||
kfree(mm);
|
kfree(mm);
|
||||||
|
@ -373,12 +372,12 @@ __i915_mm_struct_free(struct kref *kref)
|
||||||
{
|
{
|
||||||
struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
|
struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
|
||||||
|
|
||||||
/* Protected by dev_priv->mm_lock */
|
spin_lock(&mm->i915->mm_lock);
|
||||||
hash_del(&mm->node);
|
hash_del_rcu(&mm->node);
|
||||||
mutex_unlock(&mm->i915->mm_lock);
|
spin_unlock(&mm->i915->mm_lock);
|
||||||
|
|
||||||
INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
|
INIT_RCU_WORK(&mm->work, __i915_mm_struct_free__worker);
|
||||||
queue_work(mm->i915->mm.userptr_wq, &mm->work);
|
queue_rcu_work(system_wq, &mm->work);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -387,9 +386,7 @@ i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
|
||||||
if (obj->userptr.mm == NULL)
|
if (obj->userptr.mm == NULL)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
kref_put_mutex(&obj->userptr.mm->kref,
|
kref_put(&obj->userptr.mm->kref, __i915_mm_struct_free);
|
||||||
__i915_mm_struct_free,
|
|
||||||
&to_i915(obj->base.dev)->mm_lock);
|
|
||||||
obj->userptr.mm = NULL;
|
obj->userptr.mm = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -851,7 +848,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
|
||||||
|
|
||||||
int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
|
int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
mutex_init(&dev_priv->mm_lock);
|
spin_lock_init(&dev_priv->mm_lock);
|
||||||
hash_init(dev_priv->mm_structs);
|
hash_init(dev_priv->mm_structs);
|
||||||
|
|
||||||
dev_priv->mm.userptr_wq =
|
dev_priv->mm.userptr_wq =
|
||||||
|
|
|
@ -993,7 +993,7 @@ struct drm_i915_private {
|
||||||
|
|
||||||
struct i915_gem_mm mm;
|
struct i915_gem_mm mm;
|
||||||
DECLARE_HASHTABLE(mm_structs, 7);
|
DECLARE_HASHTABLE(mm_structs, 7);
|
||||||
struct mutex mm_lock;
|
spinlock_t mm_lock;
|
||||||
|
|
||||||
/* Kernel Modesetting */
|
/* Kernel Modesetting */
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue