drm: verify vma access in TTM+GEM drivers
GEM does already a good job in tracking access to gem buffers via handles and drm_vma access management. However, TTM drivers currently do not verify this during mmap(). TTM provides the verify_access() callback to test this. So fix all drivers to actually call into gem+vma to verify access instead of always returning 0. All drivers assume that user-space can only get access to TTM buffers via GEM handles. So whenever the verify_access() callback is called from ttm_bo_mmap(), the buffer must have a valid embedded gem object. This is true for all TTM+GEM drivers. But that's why this patch doesn't touch pure TTM drivers (ie, vmwgfx). v2: Switch to drm_vma_node_verify_access() to correctly return -EACCES if access was denied. Cc: Dave Airlie <airlied@redhat.com> Cc: Alex Deucher <alexander.deucher@amd.com> Cc: Ben Skeggs <bskeggs@redhat.com> Cc: Maarten Lankhorst <maarten.lankhorst@canonical.com> Cc: Jerome Glisse <jglisse@redhat.com> Signed-off-by: David Herrmann <dh.herrmann@gmail.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
parent
ca481c9b2a
commit
acb4652703
|
@ -148,7 +148,9 @@ ast_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
|
|||
|
||||
static int ast_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
|
||||
{
|
||||
return 0;
|
||||
struct ast_bo *astbo = ast_bo(bo);
|
||||
|
||||
return drm_vma_node_verify_access(&astbo->gem.vma_node, filp);
|
||||
}
|
||||
|
||||
static int ast_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
|
||||
|
|
|
@ -148,7 +148,9 @@ cirrus_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
|
|||
|
||||
static int cirrus_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
|
||||
{
|
||||
return 0;
|
||||
struct cirrus_bo *cirrusbo = cirrus_bo(bo);
|
||||
|
||||
return drm_vma_node_verify_access(&cirrusbo->gem.vma_node, filp);
|
||||
}
|
||||
|
||||
static int cirrus_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
|
||||
|
|
|
@ -148,7 +148,9 @@ mgag200_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
|
|||
|
||||
static int mgag200_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
|
||||
{
|
||||
return 0;
|
||||
struct mgag200_bo *mgabo = mgag200_bo(bo);
|
||||
|
||||
return drm_vma_node_verify_access(&mgabo->gem.vma_node, filp);
|
||||
}
|
||||
|
||||
static int mgag200_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
|
||||
|
|
|
@ -1260,7 +1260,9 @@ out:
|
|||
static int
|
||||
nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
|
||||
{
|
||||
return 0;
|
||||
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
||||
|
||||
return drm_vma_node_verify_access(&nvbo->gem->vma_node, filp);
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -212,7 +212,9 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
|
|||
|
||||
static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp)
|
||||
{
|
||||
return 0;
|
||||
struct qxl_bo *qbo = to_qxl_bo(bo);
|
||||
|
||||
return drm_vma_node_verify_access(&qbo->gem_base.vma_node, filp);
|
||||
}
|
||||
|
||||
static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
|
||||
|
|
|
@ -203,7 +203,9 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
|
|||
|
||||
static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
|
||||
{
|
||||
return 0;
|
||||
struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
|
||||
|
||||
return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
|
||||
}
|
||||
|
||||
static void radeon_move_null(struct ttm_buffer_object *bo,
|
||||
|
|
Loading…
Reference in New Issue