drm/ttm: make ttm_range_man_init/takedown take type + args
This makes it easier to move these to a driver allocated system Reviewed-by: Christian König <christian.koenig@amd.com> Reviewed-by: Ben Skeggs <bskeggs@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200804025632.3868079-47-airlied@gmail.com
This commit is contained in:
parent
0af135b892
commit
37205891d8
|
@ -66,12 +66,9 @@ static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
|
|||
unsigned int type,
|
||||
uint64_t size)
|
||||
{
|
||||
struct ttm_mem_type_manager *man = ttm_manager_type(&adev->mman.bdev, type);
|
||||
|
||||
man->available_caching = TTM_PL_FLAG_UNCACHED;
|
||||
man->default_caching = TTM_PL_FLAG_UNCACHED;
|
||||
|
||||
return ttm_range_man_init(&adev->mman.bdev, man, size >> PAGE_SHIFT);
|
||||
return ttm_range_man_init(&adev->mman.bdev, type,
|
||||
TTM_PL_FLAG_UNCACHED, TTM_PL_FLAG_UNCACHED,
|
||||
false, size >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1996,9 +1993,9 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
|
|||
|
||||
amdgpu_vram_mgr_fini(adev);
|
||||
amdgpu_gtt_mgr_fini(adev);
|
||||
ttm_range_man_fini(&adev->mman.bdev, ttm_manager_type(&adev->mman.bdev, AMDGPU_PL_GDS));
|
||||
ttm_range_man_fini(&adev->mman.bdev, ttm_manager_type(&adev->mman.bdev, AMDGPU_PL_GWS));
|
||||
ttm_range_man_fini(&adev->mman.bdev, ttm_manager_type(&adev->mman.bdev, AMDGPU_PL_OA));
|
||||
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
|
||||
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
|
||||
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
|
||||
ttm_bo_device_release(&adev->mman.bdev);
|
||||
adev->mman.initialized = false;
|
||||
DRM_INFO("amdgpu: ttm finalized\n");
|
||||
|
|
|
@ -1103,7 +1103,6 @@ EXPORT_SYMBOL(drm_vram_mm_debugfs_init);
|
|||
static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
|
||||
uint64_t vram_base, size_t vram_size)
|
||||
{
|
||||
struct ttm_mem_type_manager *man = ttm_manager_type(&vmm->bdev, TTM_PL_VRAM);
|
||||
int ret;
|
||||
|
||||
vmm->vram_base = vram_base;
|
||||
|
@ -1116,9 +1115,10 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
|
||||
man->default_caching = TTM_PL_FLAG_WC;
|
||||
ret = ttm_range_man_init(&vmm->bdev, man, vram_size >> PAGE_SHIFT);
|
||||
ret = ttm_range_man_init(&vmm->bdev, TTM_PL_VRAM,
|
||||
TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC,
|
||||
TTM_PL_FLAG_WC, false,
|
||||
vram_size >> PAGE_SHIFT);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1127,7 +1127,7 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
|
|||
|
||||
static void drm_vram_mm_cleanup(struct drm_vram_mm *vmm)
|
||||
{
|
||||
ttm_range_man_fini(&vmm->bdev, ttm_manager_type(&vmm->bdev, TTM_PL_VRAM));
|
||||
ttm_range_man_fini(&vmm->bdev, TTM_PL_VRAM);
|
||||
ttm_bo_device_release(&vmm->bdev);
|
||||
}
|
||||
|
||||
|
|
|
@ -156,16 +156,17 @@ nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind)
|
|||
static int
|
||||
nouveau_ttm_init_vram(struct nouveau_drm *drm)
|
||||
{
|
||||
struct ttm_mem_type_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
|
||||
struct nvif_mmu *mmu = &drm->client.mmu;
|
||||
|
||||
man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
|
||||
man->default_caching = TTM_PL_FLAG_WC;
|
||||
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
||||
struct ttm_mem_type_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
|
||||
|
||||
/* Some BARs do not support being ioremapped WC */
|
||||
const u8 type = mmu->type[drm->ttm.type_vram].type;
|
||||
|
||||
man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
|
||||
man->default_caching = TTM_PL_FLAG_WC;
|
||||
|
||||
if (type & NVIF_MEM_UNCACHED) {
|
||||
man->available_caching = TTM_PL_FLAG_UNCACHED;
|
||||
man->default_caching = TTM_PL_FLAG_UNCACHED;
|
||||
|
@ -178,7 +179,9 @@ nouveau_ttm_init_vram(struct nouveau_drm *drm)
|
|||
ttm_mem_type_manager_set_used(man, true);
|
||||
return 0;
|
||||
} else {
|
||||
return ttm_range_man_init(&drm->ttm.bdev, man,
|
||||
return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_VRAM,
|
||||
TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC,
|
||||
TTM_PL_FLAG_WC, false,
|
||||
drm->gem.vram_available >> PAGE_SHIFT);
|
||||
}
|
||||
}
|
||||
|
@ -193,7 +196,7 @@ nouveau_ttm_fini_vram(struct nouveau_drm *drm)
|
|||
ttm_mem_type_manager_force_list_clean(&drm->ttm.bdev, man);
|
||||
ttm_mem_type_manager_cleanup(man);
|
||||
} else
|
||||
ttm_range_man_fini(&drm->ttm.bdev, man);
|
||||
ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_VRAM);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -216,9 +219,10 @@ nouveau_ttm_init_gtt(struct nouveau_drm *drm)
|
|||
else if (!drm->agp.bridge)
|
||||
man->func = &nv04_gart_manager;
|
||||
else
|
||||
return ttm_range_man_init(&drm->ttm.bdev, man,
|
||||
return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_TT,
|
||||
TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC,
|
||||
TTM_PL_FLAG_WC, true,
|
||||
size_pages);
|
||||
|
||||
ttm_mem_type_manager_init(&drm->ttm.bdev, man,
|
||||
size_pages);
|
||||
ttm_mem_type_manager_set_used(man, true);
|
||||
|
@ -232,7 +236,7 @@ nouveau_ttm_fini_gtt(struct nouveau_drm *drm)
|
|||
|
||||
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA &&
|
||||
drm->agp.bridge)
|
||||
ttm_range_man_fini(&drm->ttm.bdev, man);
|
||||
ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_TT);
|
||||
else {
|
||||
ttm_mem_type_manager_disable(man);
|
||||
ttm_mem_type_manager_force_list_clean(&drm->ttm.bdev, man);
|
||||
|
|
|
@ -220,12 +220,8 @@ static int qxl_ttm_init_mem_type(struct qxl_device *qdev,
|
|||
unsigned int type,
|
||||
uint64_t size)
|
||||
{
|
||||
struct ttm_mem_type_manager *man = ttm_manager_type(&qdev->mman.bdev, type);
|
||||
|
||||
man->available_caching = TTM_PL_MASK_CACHING;
|
||||
man->default_caching = TTM_PL_FLAG_CACHED;
|
||||
|
||||
return ttm_range_man_init(&qdev->mman.bdev, man, size);
|
||||
return ttm_range_man_init(&qdev->mman.bdev, type, TTM_PL_MASK_CACHING,
|
||||
TTM_PL_FLAG_CACHED, false, size);
|
||||
}
|
||||
|
||||
int qxl_ttm_init(struct qxl_device *qdev)
|
||||
|
@ -267,8 +263,8 @@ int qxl_ttm_init(struct qxl_device *qdev)
|
|||
|
||||
void qxl_ttm_fini(struct qxl_device *qdev)
|
||||
{
|
||||
ttm_range_man_fini(&qdev->mman.bdev, ttm_manager_type(&qdev->mman.bdev, TTM_PL_VRAM));
|
||||
ttm_range_man_fini(&qdev->mman.bdev, ttm_manager_type(&qdev->mman.bdev, TTM_PL_PRIV));
|
||||
ttm_range_man_fini(&qdev->mman.bdev, TTM_PL_VRAM);
|
||||
ttm_range_man_fini(&qdev->mman.bdev, TTM_PL_PRIV);
|
||||
ttm_bo_device_release(&qdev->mman.bdev);
|
||||
DRM_INFO("qxl: ttm finalized\n");
|
||||
}
|
||||
|
|
|
@ -68,35 +68,34 @@ struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
|
|||
|
||||
static int radeon_ttm_init_vram(struct radeon_device *rdev)
|
||||
{
|
||||
struct ttm_mem_type_manager *man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM);
|
||||
|
||||
man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
|
||||
man->default_caching = TTM_PL_FLAG_WC;
|
||||
|
||||
return ttm_range_man_init(&rdev->mman.bdev, man,
|
||||
return ttm_range_man_init(&rdev->mman.bdev, TTM_PL_VRAM,
|
||||
TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC,
|
||||
TTM_PL_FLAG_WC, false,
|
||||
rdev->mc.real_vram_size >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static int radeon_ttm_init_gtt(struct radeon_device *rdev)
|
||||
{
|
||||
struct ttm_mem_type_manager *man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_TT);
|
||||
uint32_t available_caching, default_caching;
|
||||
|
||||
available_caching = TTM_PL_MASK_CACHING;
|
||||
default_caching = TTM_PL_FLAG_CACHED;
|
||||
|
||||
man->available_caching = TTM_PL_MASK_CACHING;
|
||||
man->default_caching = TTM_PL_FLAG_CACHED;
|
||||
man->use_tt = true;
|
||||
#if IS_ENABLED(CONFIG_AGP)
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
if (!rdev->ddev->agp) {
|
||||
DRM_ERROR("AGP is not enabled\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
man->available_caching = TTM_PL_FLAG_UNCACHED |
|
||||
TTM_PL_FLAG_WC;
|
||||
man->default_caching = TTM_PL_FLAG_WC;
|
||||
available_caching = TTM_PL_FLAG_UNCACHED |
|
||||
TTM_PL_FLAG_WC;
|
||||
default_caching = TTM_PL_FLAG_WC;
|
||||
}
|
||||
#endif
|
||||
|
||||
return ttm_range_man_init(&rdev->mman.bdev, man,
|
||||
return ttm_range_man_init(&rdev->mman.bdev, TTM_PL_TT,
|
||||
available_caching,
|
||||
default_caching, true,
|
||||
rdev->mc.gtt_size >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
|
@ -827,8 +826,8 @@ void radeon_ttm_fini(struct radeon_device *rdev)
|
|||
}
|
||||
radeon_bo_unref(&rdev->stolen_vga_memory);
|
||||
}
|
||||
ttm_range_man_fini(&rdev->mman.bdev, ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM));
|
||||
ttm_range_man_fini(&rdev->mman.bdev, ttm_manager_type(&rdev->mman.bdev, TTM_PL_TT));
|
||||
ttm_range_man_fini(&rdev->mman.bdev, TTM_PL_VRAM);
|
||||
ttm_range_man_fini(&rdev->mman.bdev, TTM_PL_TT);
|
||||
ttm_bo_device_release(&rdev->mman.bdev);
|
||||
radeon_gart_fini(rdev);
|
||||
rdev->mman.initialized = false;
|
||||
|
|
|
@ -107,19 +107,27 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
|
|||
static const struct ttm_mem_type_manager_func ttm_bo_manager_func;
|
||||
|
||||
int ttm_range_man_init(struct ttm_bo_device *bdev,
|
||||
struct ttm_mem_type_manager *man,
|
||||
unsigned type,
|
||||
uint32_t available_caching,
|
||||
uint32_t default_caching,
|
||||
bool use_tt,
|
||||
unsigned long p_size)
|
||||
{
|
||||
struct ttm_mem_type_manager *man = ttm_manager_type(bdev, type);
|
||||
struct ttm_range_manager *rman;
|
||||
|
||||
man->func = &ttm_bo_manager_func;
|
||||
|
||||
ttm_mem_type_manager_init(bdev, man, p_size);
|
||||
man->available_caching = available_caching;
|
||||
man->default_caching = default_caching;
|
||||
man->use_tt = use_tt;
|
||||
|
||||
rman = kzalloc(sizeof(*rman), GFP_KERNEL);
|
||||
if (!rman)
|
||||
return -ENOMEM;
|
||||
|
||||
man->func = &ttm_bo_manager_func;
|
||||
|
||||
ttm_mem_type_manager_init(bdev, man, p_size);
|
||||
|
||||
drm_mm_init(&rman->mm, 0, p_size);
|
||||
spin_lock_init(&rman->lock);
|
||||
man->priv = rman;
|
||||
|
@ -130,8 +138,9 @@ int ttm_range_man_init(struct ttm_bo_device *bdev,
|
|||
EXPORT_SYMBOL(ttm_range_man_init);
|
||||
|
||||
int ttm_range_man_fini(struct ttm_bo_device *bdev,
|
||||
struct ttm_mem_type_manager *man)
|
||||
unsigned type)
|
||||
{
|
||||
struct ttm_mem_type_manager *man = ttm_manager_type(bdev, type);
|
||||
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
|
||||
struct drm_mm *mm = &rman->mm;
|
||||
int ret;
|
||||
|
|
|
@ -626,13 +626,9 @@ static int vmw_vram_manager_init(struct vmw_private *dev_priv)
|
|||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
ret = vmw_thp_init(dev_priv);
|
||||
#else
|
||||
struct ttm_mem_type_manager *man = &dev_priv->bdev.man[TTM_PL_VRAM];
|
||||
|
||||
man->available_caching = TTM_PL_FLAG_CACHED;
|
||||
man->default_caching = TTM_PL_FLAG_CACHED;
|
||||
|
||||
ret = ttm_range_man_init(&dev_priv->bdev, man,
|
||||
dev_priv->vram_size >> PAGE_SHIFT);
|
||||
ret = ttm_range_man_init(&dev_priv->bdev, TTM_PL_VRAM,
|
||||
TTM_PL_FLAG_CACHED, TTM_PL_FLAG_CACHED,
|
||||
false, dev_priv->vram_size >> PAGE_SHIFT);
|
||||
#endif
|
||||
ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM)->use_type = false;
|
||||
return ret;
|
||||
|
@ -643,8 +639,7 @@ static void vmw_vram_manager_fini(struct vmw_private *dev_priv)
|
|||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
vmw_thp_fini(dev_priv);
|
||||
#else
|
||||
ttm_bo_man_fini(&dev_priv->bdev,
|
||||
ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM));
|
||||
ttm_bo_man_fini(&dev_priv->bdev, TTM_PL_VRAM);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -856,14 +856,20 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
|
|||
* ttm_range_man_init
|
||||
*
|
||||
* @bdev: ttm device
|
||||
* @man: the manager to initialise with the range manager.
|
||||
* @type: memory manager type
|
||||
* @available_caching: TTM_PL_FLAG_* for allowed caching modes
|
||||
* @default_caching: default caching mode
|
||||
* @use_tt: if the memory manager uses tt
|
||||
* @p_size: size of area to be managed in pages.
|
||||
*
|
||||
* Initialise a generic range manager for the selected memory type.
|
||||
* The range manager is installed for this device in the type slot.
|
||||
*/
|
||||
int ttm_range_man_init(struct ttm_bo_device *bdev,
|
||||
struct ttm_mem_type_manager *man,
|
||||
unsigned type,
|
||||
uint32_t available_caching,
|
||||
uint32_t default_caching,
|
||||
bool use_tt,
|
||||
unsigned long p_size);
|
||||
|
||||
/**
|
||||
|
@ -875,7 +881,7 @@ int ttm_range_man_init(struct ttm_bo_device *bdev,
|
|||
* Remove the generic range manager from a slot and tear it down.
|
||||
*/
|
||||
int ttm_range_man_fini(struct ttm_bo_device *bdev,
|
||||
struct ttm_mem_type_manager *man);
|
||||
unsigned type);
|
||||
|
||||
/**
|
||||
* ttm_mem_type_manager_debug
|
||||
|
|
Loading…
Reference in New Issue