drm/i915: Move vma vfuns to adddress_space
They change with the address space and not with each vma, so move them into the right pile of vfuncs. Save 2 pointers per vma and clarifies the code. Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
This commit is contained in:
parent
c7e16f22e8
commit
777dc5bb26
|
@ -3067,7 +3067,7 @@ int i915_vma_unbind(struct i915_vma *vma)
|
|||
|
||||
trace_i915_vma_unbind(vma);
|
||||
|
||||
vma->unbind_vma(vma);
|
||||
vma->vm->unbind_vma(vma);
|
||||
|
||||
list_del_init(&vma->mm_list);
|
||||
if (i915_is_ggtt(vma->vm)) {
|
||||
|
|
|
@ -995,6 +995,8 @@ static int gen8_ppgtt_init_common(struct i915_hw_ppgtt *ppgtt, uint64_t size)
|
|||
ppgtt->base.cleanup = gen8_ppgtt_cleanup;
|
||||
ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
|
||||
ppgtt->base.clear_range = gen8_ppgtt_clear_range;
|
||||
ppgtt->base.unbind_vma = ppgtt_unbind_vma;
|
||||
ppgtt->base.bind_vma = ppgtt_bind_vma;
|
||||
|
||||
ppgtt->switch_mm = gen8_mm_switch;
|
||||
|
||||
|
@ -1579,6 +1581,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt, bool aliasing)
|
|||
ppgtt->base.allocate_va_range = aliasing ? NULL : gen6_alloc_va_range;
|
||||
ppgtt->base.clear_range = gen6_ppgtt_clear_range;
|
||||
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
|
||||
ppgtt->base.unbind_vma = ppgtt_unbind_vma;
|
||||
ppgtt->base.bind_vma = ppgtt_bind_vma;
|
||||
ppgtt->base.cleanup = gen6_ppgtt_cleanup;
|
||||
ppgtt->base.start = 0;
|
||||
ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
|
||||
|
@ -2573,6 +2577,8 @@ static int gen8_gmch_probe(struct drm_device *dev,
|
|||
|
||||
dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range;
|
||||
dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries;
|
||||
dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
|
||||
dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -2613,6 +2619,8 @@ static int gen6_gmch_probe(struct drm_device *dev,
|
|||
|
||||
dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
|
||||
dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
|
||||
dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
|
||||
dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -2645,6 +2653,8 @@ static int i915_gmch_probe(struct drm_device *dev,
|
|||
|
||||
dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
|
||||
dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
|
||||
dev_priv->gtt.base.bind_vma = i915_ggtt_bind_vma;
|
||||
dev_priv->gtt.base.unbind_vma = i915_ggtt_unbind_vma;
|
||||
|
||||
if (unlikely(dev_priv->gtt.do_idle_maps))
|
||||
DRM_INFO("applying Ironlake quirks for intel_iommu\n");
|
||||
|
@ -2732,22 +2742,8 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
|
|||
vma->vm = vm;
|
||||
vma->obj = obj;
|
||||
|
||||
if (INTEL_INFO(vm->dev)->gen >= 6) {
|
||||
if (i915_is_ggtt(vm)) {
|
||||
vma->ggtt_view = *ggtt_view;
|
||||
|
||||
vma->unbind_vma = ggtt_unbind_vma;
|
||||
vma->bind_vma = ggtt_bind_vma;
|
||||
} else {
|
||||
vma->unbind_vma = ppgtt_unbind_vma;
|
||||
vma->bind_vma = ppgtt_bind_vma;
|
||||
}
|
||||
} else {
|
||||
BUG_ON(!i915_is_ggtt(vm));
|
||||
if (i915_is_ggtt(vm))
|
||||
vma->ggtt_view = *ggtt_view;
|
||||
vma->unbind_vma = i915_ggtt_unbind_vma;
|
||||
vma->bind_vma = i915_ggtt_bind_vma;
|
||||
}
|
||||
|
||||
list_add_tail(&vma->vma_link, &obj->vma_list);
|
||||
if (!i915_is_ggtt(vm))
|
||||
|
@ -2957,7 +2953,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
|
|||
return ret;
|
||||
}
|
||||
|
||||
vma->bind_vma(vma, cache_level, flags);
|
||||
vma->vm->bind_vma(vma, cache_level, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -196,14 +196,6 @@ struct i915_vma {
|
|||
* bits with absolutely no headroom. So use 4 bits. */
|
||||
unsigned int pin_count:4;
|
||||
#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
|
||||
|
||||
/** Unmap an object from an address space. This usually consists of
|
||||
* setting the valid PTE entries to a reserved scratch page. */
|
||||
void (*unbind_vma)(struct i915_vma *vma);
|
||||
/* Map an object into an address space with the given cache flags. */
|
||||
void (*bind_vma)(struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags);
|
||||
};
|
||||
|
||||
struct i915_page_table {
|
||||
|
@ -281,6 +273,13 @@ struct i915_address_space {
|
|||
uint64_t start,
|
||||
enum i915_cache_level cache_level, u32 flags);
|
||||
void (*cleanup)(struct i915_address_space *vm);
|
||||
/** Unmap an object from an address space. This usually consists of
|
||||
* setting the valid PTE entries to a reserved scratch page. */
|
||||
void (*unbind_vma)(struct i915_vma *vma);
|
||||
/* Map an object into an address space with the given cache flags. */
|
||||
void (*bind_vma)(struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags);
|
||||
};
|
||||
|
||||
/* The Graphics Translation Table is the way in which GEN hardware translates a
|
||||
|
|
Loading…
Reference in New Issue