drm/amdgpu: use amdgpu_bo_user bo for metadata and tiling flag
Tiling flag and metadata are only needed for BOs created by amdgpu_gem_object_create(), so we can remove those from the base class. v2: * squash tiling_flags and metadata relared patches into one * use BUG_ON for non ttm_bo_type_device type when accessing tiling_flags and metadata._ v3: *include to_amdgpu_bo_user Signed-off-by: Nirmoy Das <nirmoy.das@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
22b40f7a3a
commit
cc1bcf85b0
|
@ -499,8 +499,6 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
|
|||
*dma_buf_kgd = (struct kgd_dev *)adev;
|
||||
if (bo_size)
|
||||
*bo_size = amdgpu_bo_size(bo);
|
||||
if (metadata_size)
|
||||
*metadata_size = bo->metadata_size;
|
||||
if (metadata_buffer)
|
||||
r = amdgpu_bo_get_metadata(bo, metadata_buffer, buffer_size,
|
||||
metadata_size, &metadata_flags);
|
||||
|
|
|
@ -77,6 +77,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
|
|||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
|
||||
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
|
||||
struct amdgpu_bo_user *ubo;
|
||||
|
||||
if (bo->tbo.pin_count > 0)
|
||||
amdgpu_bo_subtract_pin_size(bo);
|
||||
|
@ -94,7 +95,11 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
|
|||
}
|
||||
amdgpu_bo_unref(&bo->parent);
|
||||
|
||||
kfree(bo->metadata);
|
||||
if (bo->tbo.type == ttm_bo_type_device) {
|
||||
ubo = to_amdgpu_bo_user(bo);
|
||||
kfree(ubo->metadata);
|
||||
}
|
||||
|
||||
kfree(bo);
|
||||
}
|
||||
|
||||
|
@ -1157,12 +1162,15 @@ int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
|
|||
int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
struct amdgpu_bo_user *ubo;
|
||||
|
||||
BUG_ON(bo->tbo.type != ttm_bo_type_device);
|
||||
if (adev->family <= AMDGPU_FAMILY_CZ &&
|
||||
AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
|
||||
return -EINVAL;
|
||||
|
||||
bo->tiling_flags = tiling_flags;
|
||||
ubo = to_amdgpu_bo_user(bo);
|
||||
ubo->tiling_flags = tiling_flags;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1176,10 +1184,14 @@ int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
|
|||
*/
|
||||
void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
|
||||
{
|
||||
struct amdgpu_bo_user *ubo;
|
||||
|
||||
BUG_ON(bo->tbo.type != ttm_bo_type_device);
|
||||
dma_resv_assert_held(bo->tbo.base.resv);
|
||||
ubo = to_amdgpu_bo_user(bo);
|
||||
|
||||
if (tiling_flags)
|
||||
*tiling_flags = bo->tiling_flags;
|
||||
*tiling_flags = ubo->tiling_flags;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1198,13 +1210,16 @@ void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
|
|||
int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
|
||||
uint32_t metadata_size, uint64_t flags)
|
||||
{
|
||||
struct amdgpu_bo_user *ubo;
|
||||
void *buffer;
|
||||
|
||||
BUG_ON(bo->tbo.type != ttm_bo_type_device);
|
||||
ubo = to_amdgpu_bo_user(bo);
|
||||
if (!metadata_size) {
|
||||
if (bo->metadata_size) {
|
||||
kfree(bo->metadata);
|
||||
bo->metadata = NULL;
|
||||
bo->metadata_size = 0;
|
||||
if (ubo->metadata_size) {
|
||||
kfree(ubo->metadata);
|
||||
ubo->metadata = NULL;
|
||||
ubo->metadata_size = 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1216,10 +1231,10 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
|
|||
if (buffer == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
kfree(bo->metadata);
|
||||
bo->metadata_flags = flags;
|
||||
bo->metadata = buffer;
|
||||
bo->metadata_size = metadata_size;
|
||||
kfree(ubo->metadata);
|
||||
ubo->metadata_flags = flags;
|
||||
ubo->metadata = buffer;
|
||||
ubo->metadata_size = metadata_size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1243,21 +1258,25 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
|
|||
size_t buffer_size, uint32_t *metadata_size,
|
||||
uint64_t *flags)
|
||||
{
|
||||
struct amdgpu_bo_user *ubo;
|
||||
|
||||
if (!buffer && !metadata_size)
|
||||
return -EINVAL;
|
||||
|
||||
BUG_ON(bo->tbo.type != ttm_bo_type_device);
|
||||
ubo = to_amdgpu_bo_user(bo);
|
||||
if (buffer) {
|
||||
if (buffer_size < bo->metadata_size)
|
||||
if (buffer_size < ubo->metadata_size)
|
||||
return -EINVAL;
|
||||
|
||||
if (bo->metadata_size)
|
||||
memcpy(buffer, bo->metadata, bo->metadata_size);
|
||||
if (ubo->metadata_size)
|
||||
memcpy(buffer, ubo->metadata, ubo->metadata_size);
|
||||
}
|
||||
|
||||
if (metadata_size)
|
||||
*metadata_size = bo->metadata_size;
|
||||
*metadata_size = ubo->metadata_size;
|
||||
if (flags)
|
||||
*flags = bo->metadata_flags;
|
||||
*flags = ubo->metadata_flags;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -92,10 +92,6 @@ struct amdgpu_bo {
|
|||
struct ttm_buffer_object tbo;
|
||||
struct ttm_bo_kmap_obj kmap;
|
||||
u64 flags;
|
||||
u64 tiling_flags;
|
||||
u64 metadata_flags;
|
||||
void *metadata;
|
||||
u32 metadata_size;
|
||||
unsigned prime_shared_count;
|
||||
/* per VM structure for page tables and with virtual addresses */
|
||||
struct amdgpu_vm_bo_base *vm_bo;
|
||||
|
|
Loading…
Reference in New Issue