drm/msm: Pass the MMU domain index in struct msm_file_private

Pass the index of the MMU domain in struct msm_file_private instead
of assuming gpu->id throughout the submit path. This clears the way
to change ctx->aspace to a per-instance pagetable.

Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org>
Signed-off-by: Rob Clark <robdclark@chromium.org>
This commit is contained in:
Jordan Crouse 2019-05-07 12:02:07 -06:00 committed by Rob Clark
parent bdad5c53e1
commit 295b22ae59
5 changed files with 14 additions and 8 deletions

View File

@ -611,6 +611,7 @@ static void load_gpu(struct drm_device *dev)
static int context_init(struct drm_device *dev, struct drm_file *file) static int context_init(struct drm_device *dev, struct drm_file *file)
{ {
struct msm_drm_private *priv = dev->dev_private;
struct msm_file_private *ctx; struct msm_file_private *ctx;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@ -619,6 +620,7 @@ static int context_init(struct drm_device *dev, struct drm_file *file)
msm_submitqueue_init(dev, ctx); msm_submitqueue_init(dev, ctx);
ctx->aspace = priv->gpu->aspace;
file->driver_priv = ctx; file->driver_priv = ctx;
return 0; return 0;

View File

@ -68,6 +68,7 @@ struct msm_file_private {
rwlock_t queuelock; rwlock_t queuelock;
struct list_head submitqueues; struct list_head submitqueues;
int queueid; int queueid;
struct msm_gem_address_space *aspace;
}; };
enum msm_mdp_plane_property { enum msm_mdp_plane_property {

View File

@ -141,6 +141,7 @@ void msm_gem_free_work(struct work_struct *work);
struct msm_gem_submit { struct msm_gem_submit {
struct drm_device *dev; struct drm_device *dev;
struct msm_gpu *gpu; struct msm_gpu *gpu;
struct msm_gem_address_space *aspace;
struct list_head node; /* node in ring submit list */ struct list_head node; /* node in ring submit list */
struct list_head bo_list; struct list_head bo_list;
struct ww_acquire_ctx ticket; struct ww_acquire_ctx ticket;

View File

@ -32,8 +32,9 @@
#define BO_PINNED 0x2000 #define BO_PINNED 0x2000
static struct msm_gem_submit *submit_create(struct drm_device *dev, static struct msm_gem_submit *submit_create(struct drm_device *dev,
struct msm_gpu *gpu, struct msm_gpu_submitqueue *queue, struct msm_gpu *gpu, struct msm_gem_address_space *aspace,
uint32_t nr_bos, uint32_t nr_cmds) struct msm_gpu_submitqueue *queue, uint32_t nr_bos,
uint32_t nr_cmds)
{ {
struct msm_gem_submit *submit; struct msm_gem_submit *submit;
uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) + uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
@ -47,6 +48,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
return NULL; return NULL;
submit->dev = dev; submit->dev = dev;
submit->aspace = aspace;
submit->gpu = gpu; submit->gpu = gpu;
submit->fence = NULL; submit->fence = NULL;
submit->cmd = (void *)&submit->bos[nr_bos]; submit->cmd = (void *)&submit->bos[nr_bos];
@ -160,7 +162,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
struct msm_gem_object *msm_obj = submit->bos[i].obj; struct msm_gem_object *msm_obj = submit->bos[i].obj;
if (submit->bos[i].flags & BO_PINNED) if (submit->bos[i].flags & BO_PINNED)
msm_gem_unpin_iova(&msm_obj->base, submit->gpu->aspace); msm_gem_unpin_iova(&msm_obj->base, submit->aspace);
if (submit->bos[i].flags & BO_LOCKED) if (submit->bos[i].flags & BO_LOCKED)
ww_mutex_unlock(&msm_obj->base.resv->lock); ww_mutex_unlock(&msm_obj->base.resv->lock);
@ -264,7 +266,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
/* if locking succeeded, pin bo: */ /* if locking succeeded, pin bo: */
ret = msm_gem_get_and_pin_iova(&msm_obj->base, ret = msm_gem_get_and_pin_iova(&msm_obj->base,
submit->gpu->aspace, &iova); submit->aspace, &iova);
if (ret) if (ret)
break; break;
@ -477,7 +479,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
} }
} }
submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds); submit = submit_create(dev, gpu, ctx->aspace, queue, args->nr_bos,
args->nr_cmds);
if (!submit) { if (!submit) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_unlock; goto out_unlock;

View File

@ -684,7 +684,7 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
struct msm_gem_object *msm_obj = submit->bos[i].obj; struct msm_gem_object *msm_obj = submit->bos[i].obj;
/* move to inactive: */ /* move to inactive: */
msm_gem_move_to_inactive(&msm_obj->base); msm_gem_move_to_inactive(&msm_obj->base);
msm_gem_unpin_iova(&msm_obj->base, gpu->aspace); msm_gem_unpin_iova(&msm_obj->base, submit->aspace);
drm_gem_object_put(&msm_obj->base); drm_gem_object_put(&msm_obj->base);
} }
@ -768,8 +768,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
/* submit takes a reference to the bo and iova until retired: */ /* submit takes a reference to the bo and iova until retired: */
drm_gem_object_get(&msm_obj->base); drm_gem_object_get(&msm_obj->base);
msm_gem_get_and_pin_iova(&msm_obj->base, msm_gem_get_and_pin_iova(&msm_obj->base, submit->aspace, &iova);
submit->gpu->aspace, &iova);
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);