Merge branch 'drm-next-4.9' of git://people.freedesktop.org/~agd5f/linux into drm-next
Some additional fixes for 4.9: - The rest of Christian's GTT rework which fixes a long standing bug in the GPUVM code among other things - Changes to the pci shutdown callbacks for certain hypervisors - Fix hpd interrupt storms on eDP panels which have the hpd interrupt enabled by the bios - misc cleanups and bug fixes * 'drm-next-4.9' of git://people.freedesktop.org/~agd5f/linux: (33 commits) drm/radeon: always apply pci shutdown callbacks drm/amdgpu: always apply pci shutdown callbacks (v2) drm/amdgpu: improve VM PTE trace points drm/amdgpu: fix GART_DEBUGFS define drm/amdgpu: free userptrs even if GTT isn't bound drm/amd/amdgpu: Various cleanups for DCEv6 drm/amdgpu: fix BO move offsets drm/amdgpu: fix amdgpu_move_blit on 32bit systems drm/amdgpu: fix gtt_mgr bo's offset drm/amdgpu: fix initializing the VM BO shadow drm/amdgpu: fix initializing the VM last eviction counter drm/amdgpu: cleanup VM shadow BO unreferencing drm/amdgpu: allocate GTT space for shadow VM page tables drm/amdgpu: rename all rbo variable to abo v2 drm/amdgpu: remove unused member from struct amdgpu_bo drm/amdgpu: add a custom GTT memory manager v2 drm/amdgpu/dce6: disable hpd on local panels drm/amdgpu/dce8: disable hpd on local panels drm/amdgpu/dce11: disable hpd on local panels drm/amdgpu/dce10: disable hpd on local panels ...
This commit is contained in:
commit
28a396545a
|
@ -23,7 +23,8 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
|
||||||
amdgpu_pm.o atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \
|
amdgpu_pm.o atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \
|
||||||
atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
|
atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
|
||||||
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
|
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
|
||||||
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o
|
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
|
||||||
|
amdgpu_gtt_mgr.o
|
||||||
|
|
||||||
# add asic specific block
|
# add asic specific block
|
||||||
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
|
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
|
||||||
|
|
|
@ -445,8 +445,6 @@ struct amdgpu_bo_va {
|
||||||
#define AMDGPU_GEM_DOMAIN_MAX 0x3
|
#define AMDGPU_GEM_DOMAIN_MAX 0x3
|
||||||
|
|
||||||
struct amdgpu_bo {
|
struct amdgpu_bo {
|
||||||
/* Protected by gem.mutex */
|
|
||||||
struct list_head list;
|
|
||||||
/* Protected by tbo.reserved */
|
/* Protected by tbo.reserved */
|
||||||
u32 prefered_domains;
|
u32 prefered_domains;
|
||||||
u32 allowed_domains;
|
u32 allowed_domains;
|
||||||
|
@ -704,7 +702,7 @@ struct amdgpu_flip_work {
|
||||||
u32 target_vblank;
|
u32 target_vblank;
|
||||||
uint64_t base;
|
uint64_t base;
|
||||||
struct drm_pending_vblank_event *event;
|
struct drm_pending_vblank_event *event;
|
||||||
struct amdgpu_bo *old_rbo;
|
struct amdgpu_bo *old_abo;
|
||||||
struct fence *excl;
|
struct fence *excl;
|
||||||
unsigned shared_count;
|
unsigned shared_count;
|
||||||
struct fence **shared;
|
struct fence **shared;
|
||||||
|
@ -2417,7 +2415,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
|
||||||
int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
|
int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
|
||||||
u32 ip_instance, u32 ring,
|
u32 ip_instance, u32 ring,
|
||||||
struct amdgpu_ring **out_ring);
|
struct amdgpu_ring **out_ring);
|
||||||
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain);
|
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
|
||||||
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
|
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
|
||||||
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
|
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
|
||||||
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
|
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
|
||||||
|
|
|
@ -648,7 +648,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
||||||
if (!r && p->uf_entry.robj) {
|
if (!r && p->uf_entry.robj) {
|
||||||
struct amdgpu_bo *uf = p->uf_entry.robj;
|
struct amdgpu_bo *uf = p->uf_entry.robj;
|
||||||
|
|
||||||
r = amdgpu_ttm_bind(uf->tbo.ttm, &uf->tbo.mem);
|
r = amdgpu_ttm_bind(&uf->tbo, &uf->tbo.mem);
|
||||||
p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
|
p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1192,7 +1192,7 @@ int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
|
||||||
for (i = 0; i < parser->bo_list->num_entries; i++) {
|
for (i = 0; i < parser->bo_list->num_entries; i++) {
|
||||||
struct amdgpu_bo *bo = parser->bo_list->array[i].robj;
|
struct amdgpu_bo *bo = parser->bo_list->array[i].robj;
|
||||||
|
|
||||||
r = amdgpu_ttm_bind(bo->tbo.ttm, &bo->tbo.mem);
|
r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
|
||||||
if (unlikely(r))
|
if (unlikely(r))
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
|
@ -60,6 +60,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
|
||||||
amd_sched_entity_fini(&adev->rings[j]->sched,
|
amd_sched_entity_fini(&adev->rings[j]->sched,
|
||||||
&ctx->rings[j].entity);
|
&ctx->rings[j].entity);
|
||||||
kfree(ctx->fences);
|
kfree(ctx->fences);
|
||||||
|
ctx->fences = NULL;
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -77,6 +78,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
|
||||||
for (j = 0; j < amdgpu_sched_jobs; ++j)
|
for (j = 0; j < amdgpu_sched_jobs; ++j)
|
||||||
fence_put(ctx->rings[i].fences[j]);
|
fence_put(ctx->rings[i].fences[j]);
|
||||||
kfree(ctx->fences);
|
kfree(ctx->fences);
|
||||||
|
ctx->fences = NULL;
|
||||||
|
|
||||||
for (i = 0; i < adev->num_rings; i++)
|
for (i = 0; i < adev->num_rings; i++)
|
||||||
amd_sched_entity_fini(&adev->rings[i]->sched,
|
amd_sched_entity_fini(&adev->rings[i]->sched,
|
||||||
|
|
|
@ -123,17 +123,17 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
/* unpin of the old buffer */
|
/* unpin of the old buffer */
|
||||||
r = amdgpu_bo_reserve(work->old_rbo, false);
|
r = amdgpu_bo_reserve(work->old_abo, false);
|
||||||
if (likely(r == 0)) {
|
if (likely(r == 0)) {
|
||||||
r = amdgpu_bo_unpin(work->old_rbo);
|
r = amdgpu_bo_unpin(work->old_abo);
|
||||||
if (unlikely(r != 0)) {
|
if (unlikely(r != 0)) {
|
||||||
DRM_ERROR("failed to unpin buffer after flip\n");
|
DRM_ERROR("failed to unpin buffer after flip\n");
|
||||||
}
|
}
|
||||||
amdgpu_bo_unreserve(work->old_rbo);
|
amdgpu_bo_unreserve(work->old_abo);
|
||||||
} else
|
} else
|
||||||
DRM_ERROR("failed to reserve buffer after flip\n");
|
DRM_ERROR("failed to reserve buffer after flip\n");
|
||||||
|
|
||||||
amdgpu_bo_unref(&work->old_rbo);
|
amdgpu_bo_unref(&work->old_abo);
|
||||||
kfree(work->shared);
|
kfree(work->shared);
|
||||||
kfree(work);
|
kfree(work);
|
||||||
}
|
}
|
||||||
|
@ -150,7 +150,7 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
|
||||||
struct amdgpu_framebuffer *new_amdgpu_fb;
|
struct amdgpu_framebuffer *new_amdgpu_fb;
|
||||||
struct drm_gem_object *obj;
|
struct drm_gem_object *obj;
|
||||||
struct amdgpu_flip_work *work;
|
struct amdgpu_flip_work *work;
|
||||||
struct amdgpu_bo *new_rbo;
|
struct amdgpu_bo *new_abo;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u64 tiling_flags;
|
u64 tiling_flags;
|
||||||
u64 base;
|
u64 base;
|
||||||
|
@ -173,28 +173,28 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
|
||||||
obj = old_amdgpu_fb->obj;
|
obj = old_amdgpu_fb->obj;
|
||||||
|
|
||||||
/* take a reference to the old object */
|
/* take a reference to the old object */
|
||||||
work->old_rbo = gem_to_amdgpu_bo(obj);
|
work->old_abo = gem_to_amdgpu_bo(obj);
|
||||||
amdgpu_bo_ref(work->old_rbo);
|
amdgpu_bo_ref(work->old_abo);
|
||||||
|
|
||||||
new_amdgpu_fb = to_amdgpu_framebuffer(fb);
|
new_amdgpu_fb = to_amdgpu_framebuffer(fb);
|
||||||
obj = new_amdgpu_fb->obj;
|
obj = new_amdgpu_fb->obj;
|
||||||
new_rbo = gem_to_amdgpu_bo(obj);
|
new_abo = gem_to_amdgpu_bo(obj);
|
||||||
|
|
||||||
/* pin the new buffer */
|
/* pin the new buffer */
|
||||||
r = amdgpu_bo_reserve(new_rbo, false);
|
r = amdgpu_bo_reserve(new_abo, false);
|
||||||
if (unlikely(r != 0)) {
|
if (unlikely(r != 0)) {
|
||||||
DRM_ERROR("failed to reserve new rbo buffer before flip\n");
|
DRM_ERROR("failed to reserve new abo buffer before flip\n");
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_bo_pin_restricted(new_rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base);
|
r = amdgpu_bo_pin_restricted(new_abo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base);
|
||||||
if (unlikely(r != 0)) {
|
if (unlikely(r != 0)) {
|
||||||
r = -EINVAL;
|
r = -EINVAL;
|
||||||
DRM_ERROR("failed to pin new rbo buffer before flip\n");
|
DRM_ERROR("failed to pin new abo buffer before flip\n");
|
||||||
goto unreserve;
|
goto unreserve;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl,
|
r = reservation_object_get_fences_rcu(new_abo->tbo.resv, &work->excl,
|
||||||
&work->shared_count,
|
&work->shared_count,
|
||||||
&work->shared);
|
&work->shared);
|
||||||
if (unlikely(r != 0)) {
|
if (unlikely(r != 0)) {
|
||||||
|
@ -202,8 +202,8 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
|
||||||
goto unpin;
|
goto unpin;
|
||||||
}
|
}
|
||||||
|
|
||||||
amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags);
|
amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
|
||||||
amdgpu_bo_unreserve(new_rbo);
|
amdgpu_bo_unreserve(new_abo);
|
||||||
|
|
||||||
work->base = base;
|
work->base = base;
|
||||||
work->target_vblank = target - drm_crtc_vblank_count(crtc) +
|
work->target_vblank = target - drm_crtc_vblank_count(crtc) +
|
||||||
|
@ -231,19 +231,19 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
pflip_cleanup:
|
pflip_cleanup:
|
||||||
if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) {
|
if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
|
||||||
DRM_ERROR("failed to reserve new rbo in error path\n");
|
DRM_ERROR("failed to reserve new abo in error path\n");
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
unpin:
|
unpin:
|
||||||
if (unlikely(amdgpu_bo_unpin(new_rbo) != 0)) {
|
if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) {
|
||||||
DRM_ERROR("failed to unpin new rbo in error path\n");
|
DRM_ERROR("failed to unpin new abo in error path\n");
|
||||||
}
|
}
|
||||||
unreserve:
|
unreserve:
|
||||||
amdgpu_bo_unreserve(new_rbo);
|
amdgpu_bo_unreserve(new_abo);
|
||||||
|
|
||||||
cleanup:
|
cleanup:
|
||||||
amdgpu_bo_unref(&work->old_rbo);
|
amdgpu_bo_unref(&work->old_abo);
|
||||||
fence_put(work->excl);
|
fence_put(work->excl);
|
||||||
for (i = 0; i < work->shared_count; ++i)
|
for (i = 0; i < work->shared_count; ++i)
|
||||||
fence_put(work->shared[i]);
|
fence_put(work->shared[i]);
|
||||||
|
|
|
@ -57,9 +57,10 @@
|
||||||
* - 3.5.0 - Add support for new UVD_NO_OP register.
|
* - 3.5.0 - Add support for new UVD_NO_OP register.
|
||||||
* - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer.
|
* - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer.
|
||||||
* - 3.7.0 - Add support for VCE clock list packet
|
* - 3.7.0 - Add support for VCE clock list packet
|
||||||
|
* - 3.8.0 - Add support raster config init in the kernel
|
||||||
*/
|
*/
|
||||||
#define KMS_DRIVER_MAJOR 3
|
#define KMS_DRIVER_MAJOR 3
|
||||||
#define KMS_DRIVER_MINOR 7
|
#define KMS_DRIVER_MINOR 8
|
||||||
#define KMS_DRIVER_PATCHLEVEL 0
|
#define KMS_DRIVER_PATCHLEVEL 0
|
||||||
|
|
||||||
int amdgpu_vram_limit = 0;
|
int amdgpu_vram_limit = 0;
|
||||||
|
@ -480,14 +481,12 @@ amdgpu_pci_remove(struct pci_dev *pdev)
|
||||||
static void
|
static void
|
||||||
amdgpu_pci_shutdown(struct pci_dev *pdev)
|
amdgpu_pci_shutdown(struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
|
||||||
struct amdgpu_device *adev = dev->dev_private;
|
|
||||||
|
|
||||||
/* if we are running in a VM, make sure the device
|
/* if we are running in a VM, make sure the device
|
||||||
* torn down properly on reboot/shutdown
|
* torn down properly on reboot/shutdown.
|
||||||
|
* unfortunately we can't detect certain
|
||||||
|
* hypervisors so just do this all the time.
|
||||||
*/
|
*/
|
||||||
if (amdgpu_passthrough(adev))
|
amdgpu_pci_remove(pdev);
|
||||||
amdgpu_pci_remove(pdev);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amdgpu_pmops_suspend(struct device *dev)
|
static int amdgpu_pmops_suspend(struct device *dev)
|
||||||
|
|
|
@ -115,14 +115,14 @@ int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tile
|
||||||
|
|
||||||
static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj)
|
static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj)
|
||||||
{
|
{
|
||||||
struct amdgpu_bo *rbo = gem_to_amdgpu_bo(gobj);
|
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = amdgpu_bo_reserve(rbo, false);
|
ret = amdgpu_bo_reserve(abo, false);
|
||||||
if (likely(ret == 0)) {
|
if (likely(ret == 0)) {
|
||||||
amdgpu_bo_kunmap(rbo);
|
amdgpu_bo_kunmap(abo);
|
||||||
amdgpu_bo_unpin(rbo);
|
amdgpu_bo_unpin(abo);
|
||||||
amdgpu_bo_unreserve(rbo);
|
amdgpu_bo_unreserve(abo);
|
||||||
}
|
}
|
||||||
drm_gem_object_unreference_unlocked(gobj);
|
drm_gem_object_unreference_unlocked(gobj);
|
||||||
}
|
}
|
||||||
|
@ -133,7 +133,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = rfbdev->adev;
|
struct amdgpu_device *adev = rfbdev->adev;
|
||||||
struct drm_gem_object *gobj = NULL;
|
struct drm_gem_object *gobj = NULL;
|
||||||
struct amdgpu_bo *rbo = NULL;
|
struct amdgpu_bo *abo = NULL;
|
||||||
bool fb_tiled = false; /* useful for testing */
|
bool fb_tiled = false; /* useful for testing */
|
||||||
u32 tiling_flags = 0;
|
u32 tiling_flags = 0;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -159,30 +159,30 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
|
||||||
aligned_size);
|
aligned_size);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
rbo = gem_to_amdgpu_bo(gobj);
|
abo = gem_to_amdgpu_bo(gobj);
|
||||||
|
|
||||||
if (fb_tiled)
|
if (fb_tiled)
|
||||||
tiling_flags = AMDGPU_TILING_SET(ARRAY_MODE, GRPH_ARRAY_2D_TILED_THIN1);
|
tiling_flags = AMDGPU_TILING_SET(ARRAY_MODE, GRPH_ARRAY_2D_TILED_THIN1);
|
||||||
|
|
||||||
ret = amdgpu_bo_reserve(rbo, false);
|
ret = amdgpu_bo_reserve(abo, false);
|
||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
goto out_unref;
|
goto out_unref;
|
||||||
|
|
||||||
if (tiling_flags) {
|
if (tiling_flags) {
|
||||||
ret = amdgpu_bo_set_tiling_flags(rbo,
|
ret = amdgpu_bo_set_tiling_flags(abo,
|
||||||
tiling_flags);
|
tiling_flags);
|
||||||
if (ret)
|
if (ret)
|
||||||
dev_err(adev->dev, "FB failed to set tiling flags\n");
|
dev_err(adev->dev, "FB failed to set tiling flags\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
ret = amdgpu_bo_pin_restricted(rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, NULL);
|
ret = amdgpu_bo_pin_restricted(abo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, NULL);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
amdgpu_bo_unreserve(rbo);
|
amdgpu_bo_unreserve(abo);
|
||||||
goto out_unref;
|
goto out_unref;
|
||||||
}
|
}
|
||||||
ret = amdgpu_bo_kmap(rbo, NULL);
|
ret = amdgpu_bo_kmap(abo, NULL);
|
||||||
amdgpu_bo_unreserve(rbo);
|
amdgpu_bo_unreserve(abo);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
goto out_unref;
|
goto out_unref;
|
||||||
}
|
}
|
||||||
|
@ -204,7 +204,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
|
||||||
struct drm_framebuffer *fb = NULL;
|
struct drm_framebuffer *fb = NULL;
|
||||||
struct drm_mode_fb_cmd2 mode_cmd;
|
struct drm_mode_fb_cmd2 mode_cmd;
|
||||||
struct drm_gem_object *gobj = NULL;
|
struct drm_gem_object *gobj = NULL;
|
||||||
struct amdgpu_bo *rbo = NULL;
|
struct amdgpu_bo *abo = NULL;
|
||||||
int ret;
|
int ret;
|
||||||
unsigned long tmp;
|
unsigned long tmp;
|
||||||
|
|
||||||
|
@ -223,7 +223,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
rbo = gem_to_amdgpu_bo(gobj);
|
abo = gem_to_amdgpu_bo(gobj);
|
||||||
|
|
||||||
/* okay we have an object now allocate the framebuffer */
|
/* okay we have an object now allocate the framebuffer */
|
||||||
info = drm_fb_helper_alloc_fbi(helper);
|
info = drm_fb_helper_alloc_fbi(helper);
|
||||||
|
@ -246,7 +246,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
|
||||||
/* setup helper */
|
/* setup helper */
|
||||||
rfbdev->helper.fb = fb;
|
rfbdev->helper.fb = fb;
|
||||||
|
|
||||||
memset_io(rbo->kptr, 0x0, amdgpu_bo_size(rbo));
|
memset_io(abo->kptr, 0x0, amdgpu_bo_size(abo));
|
||||||
|
|
||||||
strcpy(info->fix.id, "amdgpudrmfb");
|
strcpy(info->fix.id, "amdgpudrmfb");
|
||||||
|
|
||||||
|
@ -255,11 +255,11 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
|
||||||
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
|
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
|
||||||
info->fbops = &amdgpufb_ops;
|
info->fbops = &amdgpufb_ops;
|
||||||
|
|
||||||
tmp = amdgpu_bo_gpu_offset(rbo) - adev->mc.vram_start;
|
tmp = amdgpu_bo_gpu_offset(abo) - adev->mc.vram_start;
|
||||||
info->fix.smem_start = adev->mc.aper_base + tmp;
|
info->fix.smem_start = adev->mc.aper_base + tmp;
|
||||||
info->fix.smem_len = amdgpu_bo_size(rbo);
|
info->fix.smem_len = amdgpu_bo_size(abo);
|
||||||
info->screen_base = rbo->kptr;
|
info->screen_base = abo->kptr;
|
||||||
info->screen_size = amdgpu_bo_size(rbo);
|
info->screen_size = amdgpu_bo_size(abo);
|
||||||
|
|
||||||
drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
|
drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
|
||||||
|
|
||||||
|
@ -276,7 +276,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
|
||||||
|
|
||||||
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
|
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
|
||||||
DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->mc.aper_base);
|
DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->mc.aper_base);
|
||||||
DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(rbo));
|
DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(abo));
|
||||||
DRM_INFO("fb depth is %d\n", fb->depth);
|
DRM_INFO("fb depth is %d\n", fb->depth);
|
||||||
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
|
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
|
||||||
|
|
||||||
|
@ -286,7 +286,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
|
||||||
out_destroy_fbi:
|
out_destroy_fbi:
|
||||||
drm_fb_helper_release_fbi(helper);
|
drm_fb_helper_release_fbi(helper);
|
||||||
out_unref:
|
out_unref:
|
||||||
if (rbo) {
|
if (abo) {
|
||||||
|
|
||||||
}
|
}
|
||||||
if (fb && ret) {
|
if (fb && ret) {
|
||||||
|
|
|
@ -454,6 +454,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
|
||||||
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
|
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
|
||||||
fence_put(ring->fence_drv.fences[j]);
|
fence_put(ring->fence_drv.fences[j]);
|
||||||
kfree(ring->fence_drv.fences);
|
kfree(ring->fence_drv.fences);
|
||||||
|
ring->fence_drv.fences = NULL;
|
||||||
ring->fence_drv.initialized = false;
|
ring->fence_drv.initialized = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -238,7 +238,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
|
||||||
t = offset / AMDGPU_GPU_PAGE_SIZE;
|
t = offset / AMDGPU_GPU_PAGE_SIZE;
|
||||||
p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
|
p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
|
||||||
for (i = 0; i < pages; i++, p++) {
|
for (i = 0; i < pages; i++, p++) {
|
||||||
#ifdef CONFIG_AMDGPU_GART_DEBUGFS
|
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
|
||||||
adev->gart.pages[p] = NULL;
|
adev->gart.pages[p] = NULL;
|
||||||
#endif
|
#endif
|
||||||
page_base = adev->dummy_page.addr;
|
page_base = adev->dummy_page.addr;
|
||||||
|
@ -286,7 +286,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
|
||||||
p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
|
p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
|
||||||
|
|
||||||
for (i = 0; i < pages; i++, p++) {
|
for (i = 0; i < pages; i++, p++) {
|
||||||
#ifdef CONFIG_AMDGPU_GART_DEBUGFS
|
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
|
||||||
adev->gart.pages[p] = pagelist[i];
|
adev->gart.pages[p] = pagelist[i];
|
||||||
#endif
|
#endif
|
||||||
if (adev->gart.ptr) {
|
if (adev->gart.ptr) {
|
||||||
|
@ -331,7 +331,7 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
|
||||||
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
|
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
|
||||||
adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
|
adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
|
||||||
|
|
||||||
#ifdef CONFIG_AMDGPU_GART_DEBUGFS
|
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
|
||||||
/* Allocate pages table */
|
/* Allocate pages table */
|
||||||
adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages);
|
adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages);
|
||||||
if (adev->gart.pages == NULL) {
|
if (adev->gart.pages == NULL) {
|
||||||
|
@ -357,7 +357,7 @@ void amdgpu_gart_fini(struct amdgpu_device *adev)
|
||||||
amdgpu_gart_unbind(adev, 0, adev->gart.num_cpu_pages);
|
amdgpu_gart_unbind(adev, 0, adev->gart.num_cpu_pages);
|
||||||
}
|
}
|
||||||
adev->gart.ready = false;
|
adev->gart.ready = false;
|
||||||
#ifdef CONFIG_AMDGPU_GART_DEBUGFS
|
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
|
||||||
vfree(adev->gart.pages);
|
vfree(adev->gart.pages);
|
||||||
adev->gart.pages = NULL;
|
adev->gart.pages = NULL;
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -118,23 +118,23 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
|
||||||
*/
|
*/
|
||||||
int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
|
int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
|
||||||
{
|
{
|
||||||
struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
|
struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
|
||||||
struct amdgpu_device *adev = rbo->adev;
|
struct amdgpu_device *adev = abo->adev;
|
||||||
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
|
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
|
||||||
struct amdgpu_vm *vm = &fpriv->vm;
|
struct amdgpu_vm *vm = &fpriv->vm;
|
||||||
struct amdgpu_bo_va *bo_va;
|
struct amdgpu_bo_va *bo_va;
|
||||||
int r;
|
int r;
|
||||||
r = amdgpu_bo_reserve(rbo, false);
|
r = amdgpu_bo_reserve(abo, false);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
bo_va = amdgpu_vm_bo_find(vm, rbo);
|
bo_va = amdgpu_vm_bo_find(vm, abo);
|
||||||
if (!bo_va) {
|
if (!bo_va) {
|
||||||
bo_va = amdgpu_vm_bo_add(adev, vm, rbo);
|
bo_va = amdgpu_vm_bo_add(adev, vm, abo);
|
||||||
} else {
|
} else {
|
||||||
++bo_va->ref_count;
|
++bo_va->ref_count;
|
||||||
}
|
}
|
||||||
amdgpu_bo_unreserve(rbo);
|
amdgpu_bo_unreserve(abo);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -528,7 +528,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
|
||||||
goto error_unreserve;
|
goto error_unreserve;
|
||||||
|
|
||||||
if (operation == AMDGPU_VA_OP_MAP)
|
if (operation == AMDGPU_VA_OP_MAP)
|
||||||
r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
|
r = amdgpu_vm_bo_update(adev, bo_va, false);
|
||||||
|
|
||||||
error_unreserve:
|
error_unreserve:
|
||||||
ttm_eu_backoff_reservation(&ticket, &list);
|
ttm_eu_backoff_reservation(&ticket, &list);
|
||||||
|
@ -547,7 +547,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_gem_object *gobj;
|
struct drm_gem_object *gobj;
|
||||||
struct amdgpu_device *adev = dev->dev_private;
|
struct amdgpu_device *adev = dev->dev_private;
|
||||||
struct amdgpu_fpriv *fpriv = filp->driver_priv;
|
struct amdgpu_fpriv *fpriv = filp->driver_priv;
|
||||||
struct amdgpu_bo *rbo;
|
struct amdgpu_bo *abo;
|
||||||
struct amdgpu_bo_va *bo_va;
|
struct amdgpu_bo_va *bo_va;
|
||||||
struct ttm_validate_buffer tv, tv_pd;
|
struct ttm_validate_buffer tv, tv_pd;
|
||||||
struct ww_acquire_ctx ticket;
|
struct ww_acquire_ctx ticket;
|
||||||
|
@ -587,10 +587,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
||||||
gobj = drm_gem_object_lookup(filp, args->handle);
|
gobj = drm_gem_object_lookup(filp, args->handle);
|
||||||
if (gobj == NULL)
|
if (gobj == NULL)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
rbo = gem_to_amdgpu_bo(gobj);
|
abo = gem_to_amdgpu_bo(gobj);
|
||||||
INIT_LIST_HEAD(&list);
|
INIT_LIST_HEAD(&list);
|
||||||
INIT_LIST_HEAD(&duplicates);
|
INIT_LIST_HEAD(&duplicates);
|
||||||
tv.bo = &rbo->tbo;
|
tv.bo = &abo->tbo;
|
||||||
tv.shared = true;
|
tv.shared = true;
|
||||||
list_add(&tv.head, &list);
|
list_add(&tv.head, &list);
|
||||||
|
|
||||||
|
@ -604,7 +604,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
|
bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
|
||||||
if (!bo_va) {
|
if (!bo_va) {
|
||||||
ttm_eu_backoff_reservation(&ticket, &list);
|
ttm_eu_backoff_reservation(&ticket, &list);
|
||||||
drm_gem_object_unreference_unlocked(gobj);
|
drm_gem_object_unreference_unlocked(gobj);
|
||||||
|
|
|
@ -0,0 +1,239 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2016 Advanced Micro Devices, Inc.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
|
* to deal in the Software without restriction, including without limitation
|
||||||
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in
|
||||||
|
* all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||||
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||||
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||||
|
* OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
* Authors: Christian König
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <drm/drmP.h>
|
||||||
|
#include "amdgpu.h"
|
||||||
|
|
||||||
|
struct amdgpu_gtt_mgr {
|
||||||
|
struct drm_mm mm;
|
||||||
|
spinlock_t lock;
|
||||||
|
uint64_t available;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_gtt_mgr_init - init GTT manager and DRM MM
|
||||||
|
*
|
||||||
|
* @man: TTM memory type manager
|
||||||
|
* @p_size: maximum size of GTT
|
||||||
|
*
|
||||||
|
* Allocate and initialize the GTT manager.
|
||||||
|
*/
|
||||||
|
static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
|
||||||
|
unsigned long p_size)
|
||||||
|
{
|
||||||
|
struct amdgpu_gtt_mgr *mgr;
|
||||||
|
|
||||||
|
mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
|
||||||
|
if (!mgr)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
drm_mm_init(&mgr->mm, 0, p_size);
|
||||||
|
spin_lock_init(&mgr->lock);
|
||||||
|
mgr->available = p_size;
|
||||||
|
man->priv = mgr;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_gtt_mgr_fini - free and destroy GTT manager
|
||||||
|
*
|
||||||
|
* @man: TTM memory type manager
|
||||||
|
*
|
||||||
|
* Destroy and free the GTT manager, returns -EBUSY if ranges are still
|
||||||
|
* allocated inside it.
|
||||||
|
*/
|
||||||
|
static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
|
||||||
|
{
|
||||||
|
struct amdgpu_gtt_mgr *mgr = man->priv;
|
||||||
|
|
||||||
|
spin_lock(&mgr->lock);
|
||||||
|
if (!drm_mm_clean(&mgr->mm)) {
|
||||||
|
spin_unlock(&mgr->lock);
|
||||||
|
return -EBUSY;
|
||||||
|
}
|
||||||
|
|
||||||
|
drm_mm_takedown(&mgr->mm);
|
||||||
|
spin_unlock(&mgr->lock);
|
||||||
|
kfree(mgr);
|
||||||
|
man->priv = NULL;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_gtt_mgr_alloc - allocate new ranges
|
||||||
|
*
|
||||||
|
* @man: TTM memory type manager
|
||||||
|
* @tbo: TTM BO we need this range for
|
||||||
|
* @place: placement flags and restrictions
|
||||||
|
* @mem: the resulting mem object
|
||||||
|
*
|
||||||
|
* Allocate the address space for a node.
|
||||||
|
*/
|
||||||
|
int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
|
||||||
|
struct ttm_buffer_object *tbo,
|
||||||
|
const struct ttm_place *place,
|
||||||
|
struct ttm_mem_reg *mem)
|
||||||
|
{
|
||||||
|
struct amdgpu_gtt_mgr *mgr = man->priv;
|
||||||
|
struct drm_mm_node *node = mem->mm_node;
|
||||||
|
enum drm_mm_search_flags sflags = DRM_MM_SEARCH_BEST;
|
||||||
|
enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
|
||||||
|
unsigned long fpfn, lpfn;
|
||||||
|
int r;
|
||||||
|
|
||||||
|
if (node->start != AMDGPU_BO_INVALID_OFFSET)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (place)
|
||||||
|
fpfn = place->fpfn;
|
||||||
|
else
|
||||||
|
fpfn = 0;
|
||||||
|
|
||||||
|
if (place && place->lpfn)
|
||||||
|
lpfn = place->lpfn;
|
||||||
|
else
|
||||||
|
lpfn = man->size;
|
||||||
|
|
||||||
|
if (place && place->flags & TTM_PL_FLAG_TOPDOWN) {
|
||||||
|
sflags = DRM_MM_SEARCH_BELOW;
|
||||||
|
aflags = DRM_MM_CREATE_TOP;
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_lock(&mgr->lock);
|
||||||
|
r = drm_mm_insert_node_in_range_generic(&mgr->mm, node, mem->num_pages,
|
||||||
|
mem->page_alignment, 0,
|
||||||
|
fpfn, lpfn, sflags, aflags);
|
||||||
|
spin_unlock(&mgr->lock);
|
||||||
|
|
||||||
|
if (!r) {
|
||||||
|
mem->start = node->start;
|
||||||
|
if (&tbo->mem == mem)
|
||||||
|
tbo->offset = (tbo->mem.start << PAGE_SHIFT) +
|
||||||
|
tbo->bdev->man[tbo->mem.mem_type].gpu_offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_gtt_mgr_new - allocate a new node
|
||||||
|
*
|
||||||
|
* @man: TTM memory type manager
|
||||||
|
* @tbo: TTM BO we need this range for
|
||||||
|
* @place: placement flags and restrictions
|
||||||
|
* @mem: the resulting mem object
|
||||||
|
*
|
||||||
|
* Dummy, allocate the node but no space for it yet.
|
||||||
|
*/
|
||||||
|
static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
|
||||||
|
struct ttm_buffer_object *tbo,
|
||||||
|
const struct ttm_place *place,
|
||||||
|
struct ttm_mem_reg *mem)
|
||||||
|
{
|
||||||
|
struct amdgpu_gtt_mgr *mgr = man->priv;
|
||||||
|
struct drm_mm_node *node;
|
||||||
|
int r;
|
||||||
|
|
||||||
|
spin_lock(&mgr->lock);
|
||||||
|
if (mgr->available < mem->num_pages) {
|
||||||
|
spin_unlock(&mgr->lock);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
mgr->available -= mem->num_pages;
|
||||||
|
spin_unlock(&mgr->lock);
|
||||||
|
|
||||||
|
node = kzalloc(sizeof(*node), GFP_KERNEL);
|
||||||
|
if (!node)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
node->start = AMDGPU_BO_INVALID_OFFSET;
|
||||||
|
mem->mm_node = node;
|
||||||
|
|
||||||
|
if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) {
|
||||||
|
r = amdgpu_gtt_mgr_alloc(man, tbo, place, mem);
|
||||||
|
if (unlikely(r)) {
|
||||||
|
kfree(node);
|
||||||
|
mem->mm_node = NULL;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
mem->start = node->start;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_gtt_mgr_del - free ranges
|
||||||
|
*
|
||||||
|
* @man: TTM memory type manager
|
||||||
|
* @tbo: TTM BO we need this range for
|
||||||
|
* @place: placement flags and restrictions
|
||||||
|
* @mem: TTM memory object
|
||||||
|
*
|
||||||
|
* Free the allocated GTT again.
|
||||||
|
*/
|
||||||
|
static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
|
||||||
|
struct ttm_mem_reg *mem)
|
||||||
|
{
|
||||||
|
struct amdgpu_gtt_mgr *mgr = man->priv;
|
||||||
|
struct drm_mm_node *node = mem->mm_node;
|
||||||
|
|
||||||
|
if (!node)
|
||||||
|
return;
|
||||||
|
|
||||||
|
spin_lock(&mgr->lock);
|
||||||
|
if (node->start != AMDGPU_BO_INVALID_OFFSET)
|
||||||
|
drm_mm_remove_node(node);
|
||||||
|
mgr->available += mem->num_pages;
|
||||||
|
spin_unlock(&mgr->lock);
|
||||||
|
|
||||||
|
kfree(node);
|
||||||
|
mem->mm_node = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_gtt_mgr_debug - dump VRAM table
|
||||||
|
*
|
||||||
|
* @man: TTM memory type manager
|
||||||
|
* @prefix: text prefix
|
||||||
|
*
|
||||||
|
* Dump the table content using printk.
|
||||||
|
*/
|
||||||
|
static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man,
|
||||||
|
const char *prefix)
|
||||||
|
{
|
||||||
|
struct amdgpu_gtt_mgr *mgr = man->priv;
|
||||||
|
|
||||||
|
spin_lock(&mgr->lock);
|
||||||
|
drm_mm_debug_table(&mgr->mm, prefix);
|
||||||
|
spin_unlock(&mgr->lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func = {
|
||||||
|
amdgpu_gtt_mgr_init,
|
||||||
|
amdgpu_gtt_mgr_fini,
|
||||||
|
amdgpu_gtt_mgr_new,
|
||||||
|
amdgpu_gtt_mgr_del,
|
||||||
|
amdgpu_gtt_mgr_debug
|
||||||
|
};
|
|
@ -158,8 +158,8 @@ static const struct i2c_algorithm amdgpu_atombios_i2c_algo = {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
|
struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
|
||||||
struct amdgpu_i2c_bus_rec *rec,
|
const struct amdgpu_i2c_bus_rec *rec,
|
||||||
const char *name)
|
const char *name)
|
||||||
{
|
{
|
||||||
struct amdgpu_i2c_chan *i2c;
|
struct amdgpu_i2c_chan *i2c;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -249,8 +249,8 @@ void amdgpu_i2c_fini(struct amdgpu_device *adev)
|
||||||
|
|
||||||
/* Add additional buses */
|
/* Add additional buses */
|
||||||
void amdgpu_i2c_add(struct amdgpu_device *adev,
|
void amdgpu_i2c_add(struct amdgpu_device *adev,
|
||||||
struct amdgpu_i2c_bus_rec *rec,
|
const struct amdgpu_i2c_bus_rec *rec,
|
||||||
const char *name)
|
const char *name)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev->ddev;
|
||||||
int i;
|
int i;
|
||||||
|
@ -266,7 +266,7 @@ void amdgpu_i2c_add(struct amdgpu_device *adev,
|
||||||
/* looks up bus based on id */
|
/* looks up bus based on id */
|
||||||
struct amdgpu_i2c_chan *
|
struct amdgpu_i2c_chan *
|
||||||
amdgpu_i2c_lookup(struct amdgpu_device *adev,
|
amdgpu_i2c_lookup(struct amdgpu_device *adev,
|
||||||
struct amdgpu_i2c_bus_rec *i2c_bus)
|
const struct amdgpu_i2c_bus_rec *i2c_bus)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -336,7 +336,7 @@ static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus,
|
||||||
|
|
||||||
/* ddc router switching */
|
/* ddc router switching */
|
||||||
void
|
void
|
||||||
amdgpu_i2c_router_select_ddc_port(struct amdgpu_connector *amdgpu_connector)
|
amdgpu_i2c_router_select_ddc_port(const struct amdgpu_connector *amdgpu_connector)
|
||||||
{
|
{
|
||||||
u8 val;
|
u8 val;
|
||||||
|
|
||||||
|
@ -365,7 +365,7 @@ amdgpu_i2c_router_select_ddc_port(struct amdgpu_connector *amdgpu_connector)
|
||||||
|
|
||||||
/* clock/data router switching */
|
/* clock/data router switching */
|
||||||
void
|
void
|
||||||
amdgpu_i2c_router_select_cd_port(struct amdgpu_connector *amdgpu_connector)
|
amdgpu_i2c_router_select_cd_port(const struct amdgpu_connector *amdgpu_connector)
|
||||||
{
|
{
|
||||||
u8 val;
|
u8 val;
|
||||||
|
|
||||||
|
|
|
@ -25,20 +25,20 @@
|
||||||
#define __AMDGPU_I2C_H__
|
#define __AMDGPU_I2C_H__
|
||||||
|
|
||||||
struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
|
struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
|
||||||
struct amdgpu_i2c_bus_rec *rec,
|
const struct amdgpu_i2c_bus_rec *rec,
|
||||||
const char *name);
|
const char *name);
|
||||||
void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c);
|
void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c);
|
||||||
void amdgpu_i2c_init(struct amdgpu_device *adev);
|
void amdgpu_i2c_init(struct amdgpu_device *adev);
|
||||||
void amdgpu_i2c_fini(struct amdgpu_device *adev);
|
void amdgpu_i2c_fini(struct amdgpu_device *adev);
|
||||||
void amdgpu_i2c_add(struct amdgpu_device *adev,
|
void amdgpu_i2c_add(struct amdgpu_device *adev,
|
||||||
struct amdgpu_i2c_bus_rec *rec,
|
const struct amdgpu_i2c_bus_rec *rec,
|
||||||
const char *name);
|
const char *name);
|
||||||
struct amdgpu_i2c_chan *
|
struct amdgpu_i2c_chan *
|
||||||
amdgpu_i2c_lookup(struct amdgpu_device *adev,
|
amdgpu_i2c_lookup(struct amdgpu_device *adev,
|
||||||
struct amdgpu_i2c_bus_rec *i2c_bus);
|
const struct amdgpu_i2c_bus_rec *i2c_bus);
|
||||||
void
|
void
|
||||||
amdgpu_i2c_router_select_ddc_port(struct amdgpu_connector *amdgpu_connector);
|
amdgpu_i2c_router_select_ddc_port(const struct amdgpu_connector *connector);
|
||||||
void
|
void
|
||||||
amdgpu_i2c_router_select_cd_port(struct amdgpu_connector *amdgpu_connector);
|
amdgpu_i2c_router_select_cd_port(const struct amdgpu_connector *connector);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -203,10 +203,10 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
|
||||||
placement->busy_placement = places;
|
placement->busy_placement = places;
|
||||||
}
|
}
|
||||||
|
|
||||||
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain)
|
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
|
||||||
{
|
{
|
||||||
amdgpu_ttm_placement_init(rbo->adev, &rbo->placement,
|
amdgpu_ttm_placement_init(abo->adev, &abo->placement,
|
||||||
rbo->placements, domain, rbo->flags);
|
abo->placements, domain, abo->flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
|
static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
|
||||||
|
@ -352,7 +352,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
bo->adev = adev;
|
bo->adev = adev;
|
||||||
INIT_LIST_HEAD(&bo->list);
|
|
||||||
INIT_LIST_HEAD(&bo->shadow_list);
|
INIT_LIST_HEAD(&bo->shadow_list);
|
||||||
INIT_LIST_HEAD(&bo->va);
|
INIT_LIST_HEAD(&bo->va);
|
||||||
bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
|
bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
|
||||||
|
@ -673,7 +672,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
||||||
dev_err(bo->adev->dev, "%p pin failed\n", bo);
|
dev_err(bo->adev->dev, "%p pin failed\n", bo);
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
r = amdgpu_ttm_bind(bo->tbo.ttm, &bo->tbo.mem);
|
r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
|
||||||
if (unlikely(r)) {
|
if (unlikely(r)) {
|
||||||
dev_err(bo->adev->dev, "%p bind failed\n", bo);
|
dev_err(bo->adev->dev, "%p bind failed\n", bo);
|
||||||
goto error;
|
goto error;
|
||||||
|
@ -850,23 +849,23 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
|
||||||
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
|
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
|
||||||
struct ttm_mem_reg *new_mem)
|
struct ttm_mem_reg *new_mem)
|
||||||
{
|
{
|
||||||
struct amdgpu_bo *rbo;
|
struct amdgpu_bo *abo;
|
||||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||||
|
|
||||||
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
|
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
rbo = container_of(bo, struct amdgpu_bo, tbo);
|
abo = container_of(bo, struct amdgpu_bo, tbo);
|
||||||
amdgpu_vm_bo_invalidate(rbo->adev, rbo);
|
amdgpu_vm_bo_invalidate(abo->adev, abo);
|
||||||
|
|
||||||
/* update statistics */
|
/* update statistics */
|
||||||
if (!new_mem)
|
if (!new_mem)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* move_notify is called before move happens */
|
/* move_notify is called before move happens */
|
||||||
amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem);
|
amdgpu_update_memory_usage(abo->adev, &bo->mem, new_mem);
|
||||||
|
|
||||||
trace_amdgpu_ttm_bo_move(rbo, new_mem->mem_type, old_mem->mem_type);
|
trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
|
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
|
||||||
|
|
|
@ -264,6 +264,8 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
|
||||||
(void **)&ring->ring);
|
(void **)&ring->ring);
|
||||||
|
|
||||||
amdgpu_debugfs_ring_fini(ring);
|
amdgpu_debugfs_ring_fini(ring);
|
||||||
|
|
||||||
|
ring->adev->rings[ring->idx] = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -247,7 +247,7 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_mapping,
|
||||||
TP_ARGS(mapping)
|
TP_ARGS(mapping)
|
||||||
);
|
);
|
||||||
|
|
||||||
TRACE_EVENT(amdgpu_vm_set_page,
|
TRACE_EVENT(amdgpu_vm_set_ptes,
|
||||||
TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
|
TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
|
||||||
uint32_t incr, uint32_t flags),
|
uint32_t incr, uint32_t flags),
|
||||||
TP_ARGS(pe, addr, count, incr, flags),
|
TP_ARGS(pe, addr, count, incr, flags),
|
||||||
|
@ -271,6 +271,24 @@ TRACE_EVENT(amdgpu_vm_set_page,
|
||||||
__entry->flags, __entry->count)
|
__entry->flags, __entry->count)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
TRACE_EVENT(amdgpu_vm_copy_ptes,
|
||||||
|
TP_PROTO(uint64_t pe, uint64_t src, unsigned count),
|
||||||
|
TP_ARGS(pe, src, count),
|
||||||
|
TP_STRUCT__entry(
|
||||||
|
__field(u64, pe)
|
||||||
|
__field(u64, src)
|
||||||
|
__field(u32, count)
|
||||||
|
),
|
||||||
|
|
||||||
|
TP_fast_assign(
|
||||||
|
__entry->pe = pe;
|
||||||
|
__entry->src = src;
|
||||||
|
__entry->count = count;
|
||||||
|
),
|
||||||
|
TP_printk("pe=%010Lx, src=%010Lx, count=%u",
|
||||||
|
__entry->pe, __entry->src, __entry->count)
|
||||||
|
);
|
||||||
|
|
||||||
TRACE_EVENT(amdgpu_vm_flush,
|
TRACE_EVENT(amdgpu_vm_flush,
|
||||||
TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id),
|
TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id),
|
||||||
TP_ARGS(pd_addr, ring, id),
|
TP_ARGS(pd_addr, ring, id),
|
||||||
|
|
|
@ -160,7 +160,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
|
||||||
man->default_caching = TTM_PL_FLAG_CACHED;
|
man->default_caching = TTM_PL_FLAG_CACHED;
|
||||||
break;
|
break;
|
||||||
case TTM_PL_TT:
|
case TTM_PL_TT:
|
||||||
man->func = &ttm_bo_manager_func;
|
man->func = &amdgpu_gtt_mgr_func;
|
||||||
man->gpu_offset = adev->mc.gtt_start;
|
man->gpu_offset = adev->mc.gtt_start;
|
||||||
man->available_caching = TTM_PL_MASK_CACHING;
|
man->available_caching = TTM_PL_MASK_CACHING;
|
||||||
man->default_caching = TTM_PL_FLAG_CACHED;
|
man->default_caching = TTM_PL_FLAG_CACHED;
|
||||||
|
@ -195,7 +195,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
|
||||||
static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
|
static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
|
||||||
struct ttm_placement *placement)
|
struct ttm_placement *placement)
|
||||||
{
|
{
|
||||||
struct amdgpu_bo *rbo;
|
struct amdgpu_bo *abo;
|
||||||
static struct ttm_place placements = {
|
static struct ttm_place placements = {
|
||||||
.fpfn = 0,
|
.fpfn = 0,
|
||||||
.lpfn = 0,
|
.lpfn = 0,
|
||||||
|
@ -210,43 +210,43 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
|
||||||
placement->num_busy_placement = 1;
|
placement->num_busy_placement = 1;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
rbo = container_of(bo, struct amdgpu_bo, tbo);
|
abo = container_of(bo, struct amdgpu_bo, tbo);
|
||||||
switch (bo->mem.mem_type) {
|
switch (bo->mem.mem_type) {
|
||||||
case TTM_PL_VRAM:
|
case TTM_PL_VRAM:
|
||||||
if (rbo->adev->mman.buffer_funcs_ring->ready == false) {
|
if (abo->adev->mman.buffer_funcs_ring->ready == false) {
|
||||||
amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU);
|
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
|
||||||
} else {
|
} else {
|
||||||
amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_GTT);
|
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
|
||||||
for (i = 0; i < rbo->placement.num_placement; ++i) {
|
for (i = 0; i < abo->placement.num_placement; ++i) {
|
||||||
if (!(rbo->placements[i].flags &
|
if (!(abo->placements[i].flags &
|
||||||
TTM_PL_FLAG_TT))
|
TTM_PL_FLAG_TT))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (rbo->placements[i].lpfn)
|
if (abo->placements[i].lpfn)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* set an upper limit to force directly
|
/* set an upper limit to force directly
|
||||||
* allocating address space for the BO.
|
* allocating address space for the BO.
|
||||||
*/
|
*/
|
||||||
rbo->placements[i].lpfn =
|
abo->placements[i].lpfn =
|
||||||
rbo->adev->mc.gtt_size >> PAGE_SHIFT;
|
abo->adev->mc.gtt_size >> PAGE_SHIFT;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case TTM_PL_TT:
|
case TTM_PL_TT:
|
||||||
default:
|
default:
|
||||||
amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU);
|
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
|
||||||
}
|
}
|
||||||
*placement = rbo->placement;
|
*placement = abo->placement;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
|
static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
|
||||||
{
|
{
|
||||||
struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo);
|
struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo);
|
||||||
|
|
||||||
if (amdgpu_ttm_tt_get_usermm(bo->ttm))
|
if (amdgpu_ttm_tt_get_usermm(bo->ttm))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
return drm_vma_node_verify_access(&rbo->gem_base.vma_node,
|
return drm_vma_node_verify_access(&abo->gem_base.vma_node,
|
||||||
filp->private_data);
|
filp->private_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -273,16 +273,15 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
|
||||||
|
|
||||||
adev = amdgpu_get_adev(bo->bdev);
|
adev = amdgpu_get_adev(bo->bdev);
|
||||||
ring = adev->mman.buffer_funcs_ring;
|
ring = adev->mman.buffer_funcs_ring;
|
||||||
old_start = (u64)old_mem->start << PAGE_SHIFT;
|
|
||||||
new_start = (u64)new_mem->start << PAGE_SHIFT;
|
|
||||||
|
|
||||||
switch (old_mem->mem_type) {
|
switch (old_mem->mem_type) {
|
||||||
case TTM_PL_TT:
|
case TTM_PL_TT:
|
||||||
r = amdgpu_ttm_bind(bo->ttm, old_mem);
|
r = amdgpu_ttm_bind(bo, old_mem);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
case TTM_PL_VRAM:
|
case TTM_PL_VRAM:
|
||||||
|
old_start = (u64)old_mem->start << PAGE_SHIFT;
|
||||||
old_start += bo->bdev->man[old_mem->mem_type].gpu_offset;
|
old_start += bo->bdev->man[old_mem->mem_type].gpu_offset;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -291,11 +290,12 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
|
||||||
}
|
}
|
||||||
switch (new_mem->mem_type) {
|
switch (new_mem->mem_type) {
|
||||||
case TTM_PL_TT:
|
case TTM_PL_TT:
|
||||||
r = amdgpu_ttm_bind(bo->ttm, new_mem);
|
r = amdgpu_ttm_bind(bo, new_mem);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
case TTM_PL_VRAM:
|
case TTM_PL_VRAM:
|
||||||
|
new_start = (u64)new_mem->start << PAGE_SHIFT;
|
||||||
new_start += bo->bdev->man[new_mem->mem_type].gpu_offset;
|
new_start += bo->bdev->man[new_mem->mem_type].gpu_offset;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -676,7 +676,6 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
|
|
||||||
if (!ttm->num_pages) {
|
if (!ttm->num_pages) {
|
||||||
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
|
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
|
||||||
ttm->num_pages, bo_mem, ttm);
|
ttm->num_pages, bo_mem, ttm);
|
||||||
|
@ -697,16 +696,25 @@ bool amdgpu_ttm_is_bound(struct ttm_tt *ttm)
|
||||||
return gtt && !list_empty(>t->list);
|
return gtt && !list_empty(>t->list);
|
||||||
}
|
}
|
||||||
|
|
||||||
int amdgpu_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
|
int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
|
||||||
{
|
{
|
||||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
struct ttm_tt *ttm = bo->ttm;
|
||||||
|
struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
|
||||||
uint32_t flags;
|
uint32_t flags;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (!ttm || amdgpu_ttm_is_bound(ttm))
|
if (!ttm || amdgpu_ttm_is_bound(ttm))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
r = amdgpu_gtt_mgr_alloc(&bo->bdev->man[TTM_PL_TT], bo,
|
||||||
|
NULL, bo_mem);
|
||||||
|
if (r) {
|
||||||
|
DRM_ERROR("Failed to allocate GTT address space (%d)\n", r);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
|
flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
|
||||||
|
gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
|
||||||
r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
|
r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
|
||||||
ttm->pages, gtt->ttm.dma_address, flags);
|
ttm->pages, gtt->ttm.dma_address, flags);
|
||||||
|
|
||||||
|
@ -750,6 +758,9 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
|
||||||
{
|
{
|
||||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||||
|
|
||||||
|
if (gtt->userptr)
|
||||||
|
amdgpu_ttm_tt_unpin_userptr(ttm);
|
||||||
|
|
||||||
if (!amdgpu_ttm_is_bound(ttm))
|
if (!amdgpu_ttm_is_bound(ttm))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -757,9 +768,6 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
|
||||||
if (gtt->adev->gart.ready)
|
if (gtt->adev->gart.ready)
|
||||||
amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages);
|
amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages);
|
||||||
|
|
||||||
if (gtt->userptr)
|
|
||||||
amdgpu_ttm_tt_unpin_userptr(ttm);
|
|
||||||
|
|
||||||
spin_lock(>t->adev->gtt_list_lock);
|
spin_lock(>t->adev->gtt_list_lock);
|
||||||
list_del_init(>t->list);
|
list_del_init(>t->list);
|
||||||
spin_unlock(>t->adev->gtt_list_lock);
|
spin_unlock(>t->adev->gtt_list_lock);
|
||||||
|
|
|
@ -65,6 +65,13 @@ struct amdgpu_mman {
|
||||||
struct amdgpu_mman_lru guard;
|
struct amdgpu_mman_lru guard;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
|
||||||
|
|
||||||
|
int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
|
||||||
|
struct ttm_buffer_object *tbo,
|
||||||
|
const struct ttm_place *place,
|
||||||
|
struct ttm_mem_reg *mem);
|
||||||
|
|
||||||
int amdgpu_copy_buffer(struct amdgpu_ring *ring,
|
int amdgpu_copy_buffer(struct amdgpu_ring *ring,
|
||||||
uint64_t src_offset,
|
uint64_t src_offset,
|
||||||
uint64_t dst_offset,
|
uint64_t dst_offset,
|
||||||
|
@ -78,6 +85,6 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
||||||
|
|
||||||
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
|
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||||
bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
|
bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
|
||||||
int amdgpu_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
|
int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -351,12 +351,12 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *rbo)
|
static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
for (i = 0; i < rbo->placement.num_placement; ++i) {
|
for (i = 0; i < abo->placement.num_placement; ++i) {
|
||||||
rbo->placements[i].fpfn = 0 >> PAGE_SHIFT;
|
abo->placements[i].fpfn = 0 >> PAGE_SHIFT;
|
||||||
rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
|
abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -210,6 +210,8 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
|
||||||
*/
|
*/
|
||||||
int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
|
int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
|
unsigned i;
|
||||||
|
|
||||||
if (adev->vce.vcpu_bo == NULL)
|
if (adev->vce.vcpu_bo == NULL)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -217,8 +219,8 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
|
||||||
|
|
||||||
amdgpu_bo_unref(&adev->vce.vcpu_bo);
|
amdgpu_bo_unref(&adev->vce.vcpu_bo);
|
||||||
|
|
||||||
amdgpu_ring_fini(&adev->vce.ring[0]);
|
for (i = 0; i < adev->vce.num_rings; i++)
|
||||||
amdgpu_ring_fini(&adev->vce.ring[1]);
|
amdgpu_ring_fini(&adev->vce.ring[i]);
|
||||||
|
|
||||||
release_firmware(adev->vce.fw);
|
release_firmware(adev->vce.fw);
|
||||||
mutex_destroy(&adev->vce.idle_mutex);
|
mutex_destroy(&adev->vce.idle_mutex);
|
||||||
|
@ -303,9 +305,12 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev =
|
struct amdgpu_device *adev =
|
||||||
container_of(work, struct amdgpu_device, vce.idle_work.work);
|
container_of(work, struct amdgpu_device, vce.idle_work.work);
|
||||||
|
unsigned i, count = 0;
|
||||||
|
|
||||||
if ((amdgpu_fence_count_emitted(&adev->vce.ring[0]) == 0) &&
|
for (i = 0; i < adev->vce.num_rings; i++)
|
||||||
(amdgpu_fence_count_emitted(&adev->vce.ring[1]) == 0)) {
|
count += amdgpu_fence_count_emitted(&adev->vce.ring[i]);
|
||||||
|
|
||||||
|
if (count == 0) {
|
||||||
if (adev->pm.dpm_enabled) {
|
if (adev->pm.dpm_enabled) {
|
||||||
amdgpu_dpm_enable_vce(adev, false);
|
amdgpu_dpm_enable_vce(adev, false);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -487,7 +487,7 @@ static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
|
||||||
unsigned count, uint32_t incr,
|
unsigned count, uint32_t incr,
|
||||||
uint32_t flags)
|
uint32_t flags)
|
||||||
{
|
{
|
||||||
trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
|
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
|
||||||
|
|
||||||
if (count < 3) {
|
if (count < 3) {
|
||||||
amdgpu_vm_write_pte(params->adev, params->ib, pe,
|
amdgpu_vm_write_pte(params->adev, params->ib, pe,
|
||||||
|
@ -516,10 +516,12 @@ static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
|
||||||
unsigned count, uint32_t incr,
|
unsigned count, uint32_t incr,
|
||||||
uint32_t flags)
|
uint32_t flags)
|
||||||
{
|
{
|
||||||
trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
|
uint64_t src = (params->src + (addr >> 12) * 8);
|
||||||
|
|
||||||
amdgpu_vm_copy_pte(params->adev, params->ib, pe,
|
|
||||||
(params->src + (addr >> 12) * 8), count);
|
trace_amdgpu_vm_copy_ptes(pe, src, count);
|
||||||
|
|
||||||
|
amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -552,6 +554,10 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
|
||||||
if (r)
|
if (r)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
|
r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
|
||||||
|
if (r)
|
||||||
|
goto error;
|
||||||
|
|
||||||
addr = amdgpu_bo_gpu_offset(bo);
|
addr = amdgpu_bo_gpu_offset(bo);
|
||||||
entries = amdgpu_bo_size(bo) / 8;
|
entries = amdgpu_bo_size(bo) / 8;
|
||||||
|
|
||||||
|
@ -625,6 +631,11 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
|
||||||
|
|
||||||
if (!pd)
|
if (!pd)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
r = amdgpu_ttm_bind(&pd->tbo, &pd->tbo.mem);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
pd_addr = amdgpu_bo_gpu_offset(pd);
|
pd_addr = amdgpu_bo_gpu_offset(pd);
|
||||||
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
|
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
|
||||||
|
|
||||||
|
@ -650,6 +661,14 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
|
||||||
if (bo == NULL)
|
if (bo == NULL)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
if (bo->shadow) {
|
||||||
|
struct amdgpu_bo *shadow = bo->shadow;
|
||||||
|
|
||||||
|
r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
pt = amdgpu_bo_gpu_offset(bo);
|
pt = amdgpu_bo_gpu_offset(bo);
|
||||||
if (!shadow) {
|
if (!shadow) {
|
||||||
if (vm->page_tables[pt_idx].addr == pt)
|
if (vm->page_tables[pt_idx].addr == pt)
|
||||||
|
@ -1000,6 +1019,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
||||||
AMDGPU_GPU_PAGE_SIZE);
|
AMDGPU_GPU_PAGE_SIZE);
|
||||||
pte[i] |= flags;
|
pte[i] |= flags;
|
||||||
}
|
}
|
||||||
|
addr = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_sync_fence(adev, &job->sync, exclusive);
|
r = amdgpu_sync_fence(adev, &job->sync, exclusive);
|
||||||
|
@ -1412,10 +1432,20 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
||||||
|
|
||||||
r = amdgpu_vm_clear_bo(adev, vm, pt);
|
r = amdgpu_vm_clear_bo(adev, vm, pt);
|
||||||
if (r) {
|
if (r) {
|
||||||
|
amdgpu_bo_unref(&pt->shadow);
|
||||||
amdgpu_bo_unref(&pt);
|
amdgpu_bo_unref(&pt);
|
||||||
goto error_free;
|
goto error_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (pt->shadow) {
|
||||||
|
r = amdgpu_vm_clear_bo(adev, vm, pt->shadow);
|
||||||
|
if (r) {
|
||||||
|
amdgpu_bo_unref(&pt->shadow);
|
||||||
|
amdgpu_bo_unref(&pt);
|
||||||
|
goto error_free;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
entry->robj = pt;
|
entry->robj = pt;
|
||||||
entry->priority = 0;
|
entry->priority = 0;
|
||||||
entry->tv.bo = &entry->robj->tbo;
|
entry->tv.bo = &entry->robj->tbo;
|
||||||
|
@ -1610,14 +1640,25 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||||
goto error_free_page_directory;
|
goto error_free_page_directory;
|
||||||
|
|
||||||
r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory);
|
r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory);
|
||||||
amdgpu_bo_unreserve(vm->page_directory);
|
|
||||||
if (r)
|
if (r)
|
||||||
goto error_free_page_directory;
|
goto error_unreserve;
|
||||||
|
|
||||||
|
if (vm->page_directory->shadow) {
|
||||||
|
r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory->shadow);
|
||||||
|
if (r)
|
||||||
|
goto error_unreserve;
|
||||||
|
}
|
||||||
|
|
||||||
vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
|
vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
|
||||||
|
amdgpu_bo_unreserve(vm->page_directory);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
error_unreserve:
|
||||||
|
amdgpu_bo_unreserve(vm->page_directory);
|
||||||
|
|
||||||
error_free_page_directory:
|
error_free_page_directory:
|
||||||
|
amdgpu_bo_unref(&vm->page_directory->shadow);
|
||||||
amdgpu_bo_unref(&vm->page_directory);
|
amdgpu_bo_unref(&vm->page_directory);
|
||||||
vm->page_directory = NULL;
|
vm->page_directory = NULL;
|
||||||
|
|
||||||
|
@ -1660,15 +1701,17 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) {
|
for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) {
|
||||||
if (vm->page_tables[i].entry.robj &&
|
struct amdgpu_bo *pt = vm->page_tables[i].entry.robj;
|
||||||
vm->page_tables[i].entry.robj->shadow)
|
|
||||||
amdgpu_bo_unref(&vm->page_tables[i].entry.robj->shadow);
|
if (!pt)
|
||||||
amdgpu_bo_unref(&vm->page_tables[i].entry.robj);
|
continue;
|
||||||
|
|
||||||
|
amdgpu_bo_unref(&pt->shadow);
|
||||||
|
amdgpu_bo_unref(&pt);
|
||||||
}
|
}
|
||||||
drm_free_large(vm->page_tables);
|
drm_free_large(vm->page_tables);
|
||||||
|
|
||||||
if (vm->page_directory->shadow)
|
amdgpu_bo_unref(&vm->page_directory->shadow);
|
||||||
amdgpu_bo_unref(&vm->page_directory->shadow);
|
|
||||||
amdgpu_bo_unref(&vm->page_directory);
|
amdgpu_bo_unref(&vm->page_directory);
|
||||||
fence_put(vm->page_directory_fence);
|
fence_put(vm->page_directory_fence);
|
||||||
}
|
}
|
||||||
|
|
|
@ -427,16 +427,6 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
|
||||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
|
|
||||||
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
|
|
||||||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
|
|
||||||
/* don't try to enable hpd on eDP or LVDS avoid breaking the
|
|
||||||
* aux dp channel on imac and help (but not completely fix)
|
|
||||||
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
|
|
||||||
* also avoid interrupt storms during dpms.
|
|
||||||
*/
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (amdgpu_connector->hpd.hpd) {
|
switch (amdgpu_connector->hpd.hpd) {
|
||||||
case AMDGPU_HPD_1:
|
case AMDGPU_HPD_1:
|
||||||
idx = 0;
|
idx = 0;
|
||||||
|
@ -460,6 +450,19 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
|
||||||
|
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
|
||||||
|
/* don't try to enable hpd on eDP or LVDS avoid breaking the
|
||||||
|
* aux dp channel on imac and help (but not completely fix)
|
||||||
|
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
|
||||||
|
* also avoid interrupt storms during dpms.
|
||||||
|
*/
|
||||||
|
tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
|
||||||
|
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
|
||||||
|
WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
|
tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
|
||||||
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
|
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
|
||||||
WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
|
WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
|
||||||
|
@ -2104,7 +2107,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
|
||||||
struct amdgpu_framebuffer *amdgpu_fb;
|
struct amdgpu_framebuffer *amdgpu_fb;
|
||||||
struct drm_framebuffer *target_fb;
|
struct drm_framebuffer *target_fb;
|
||||||
struct drm_gem_object *obj;
|
struct drm_gem_object *obj;
|
||||||
struct amdgpu_bo *rbo;
|
struct amdgpu_bo *abo;
|
||||||
uint64_t fb_location, tiling_flags;
|
uint64_t fb_location, tiling_flags;
|
||||||
uint32_t fb_format, fb_pitch_pixels;
|
uint32_t fb_format, fb_pitch_pixels;
|
||||||
u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
|
u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
|
||||||
|
@ -2132,23 +2135,23 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
|
||||||
* just update base pointers
|
* just update base pointers
|
||||||
*/
|
*/
|
||||||
obj = amdgpu_fb->obj;
|
obj = amdgpu_fb->obj;
|
||||||
rbo = gem_to_amdgpu_bo(obj);
|
abo = gem_to_amdgpu_bo(obj);
|
||||||
r = amdgpu_bo_reserve(rbo, false);
|
r = amdgpu_bo_reserve(abo, false);
|
||||||
if (unlikely(r != 0))
|
if (unlikely(r != 0))
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
if (atomic) {
|
if (atomic) {
|
||||||
fb_location = amdgpu_bo_gpu_offset(rbo);
|
fb_location = amdgpu_bo_gpu_offset(abo);
|
||||||
} else {
|
} else {
|
||||||
r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
|
r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
|
||||||
if (unlikely(r != 0)) {
|
if (unlikely(r != 0)) {
|
||||||
amdgpu_bo_unreserve(rbo);
|
amdgpu_bo_unreserve(abo);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
|
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
|
||||||
amdgpu_bo_unreserve(rbo);
|
amdgpu_bo_unreserve(abo);
|
||||||
|
|
||||||
pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
|
pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
|
||||||
|
|
||||||
|
@ -2323,12 +2326,12 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
|
||||||
|
|
||||||
if (!atomic && fb && fb != crtc->primary->fb) {
|
if (!atomic && fb && fb != crtc->primary->fb) {
|
||||||
amdgpu_fb = to_amdgpu_framebuffer(fb);
|
amdgpu_fb = to_amdgpu_framebuffer(fb);
|
||||||
rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
|
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
|
||||||
r = amdgpu_bo_reserve(rbo, false);
|
r = amdgpu_bo_reserve(abo, false);
|
||||||
if (unlikely(r != 0))
|
if (unlikely(r != 0))
|
||||||
return r;
|
return r;
|
||||||
amdgpu_bo_unpin(rbo);
|
amdgpu_bo_unpin(abo);
|
||||||
amdgpu_bo_unreserve(rbo);
|
amdgpu_bo_unreserve(abo);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Bytes per pixel may have changed */
|
/* Bytes per pixel may have changed */
|
||||||
|
@ -2808,16 +2811,16 @@ static void dce_v10_0_crtc_disable(struct drm_crtc *crtc)
|
||||||
if (crtc->primary->fb) {
|
if (crtc->primary->fb) {
|
||||||
int r;
|
int r;
|
||||||
struct amdgpu_framebuffer *amdgpu_fb;
|
struct amdgpu_framebuffer *amdgpu_fb;
|
||||||
struct amdgpu_bo *rbo;
|
struct amdgpu_bo *abo;
|
||||||
|
|
||||||
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
|
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
|
||||||
rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
|
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
|
||||||
r = amdgpu_bo_reserve(rbo, false);
|
r = amdgpu_bo_reserve(abo, false);
|
||||||
if (unlikely(r))
|
if (unlikely(r))
|
||||||
DRM_ERROR("failed to reserve rbo before unpin\n");
|
DRM_ERROR("failed to reserve abo before unpin\n");
|
||||||
else {
|
else {
|
||||||
amdgpu_bo_unpin(rbo);
|
amdgpu_bo_unpin(abo);
|
||||||
amdgpu_bo_unreserve(rbo);
|
amdgpu_bo_unreserve(abo);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* disable the GRPH */
|
/* disable the GRPH */
|
||||||
|
|
|
@ -443,16 +443,6 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
|
||||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
|
|
||||||
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
|
|
||||||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
|
|
||||||
/* don't try to enable hpd on eDP or LVDS avoid breaking the
|
|
||||||
* aux dp channel on imac and help (but not completely fix)
|
|
||||||
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
|
|
||||||
* also avoid interrupt storms during dpms.
|
|
||||||
*/
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (amdgpu_connector->hpd.hpd) {
|
switch (amdgpu_connector->hpd.hpd) {
|
||||||
case AMDGPU_HPD_1:
|
case AMDGPU_HPD_1:
|
||||||
idx = 0;
|
idx = 0;
|
||||||
|
@ -476,6 +466,19 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
|
||||||
|
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
|
||||||
|
/* don't try to enable hpd on eDP or LVDS avoid breaking the
|
||||||
|
* aux dp channel on imac and help (but not completely fix)
|
||||||
|
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
|
||||||
|
* also avoid interrupt storms during dpms.
|
||||||
|
*/
|
||||||
|
tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
|
||||||
|
tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
|
||||||
|
WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
|
tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
|
||||||
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
|
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
|
||||||
WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
|
WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
|
||||||
|
@ -2085,7 +2088,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
|
||||||
struct amdgpu_framebuffer *amdgpu_fb;
|
struct amdgpu_framebuffer *amdgpu_fb;
|
||||||
struct drm_framebuffer *target_fb;
|
struct drm_framebuffer *target_fb;
|
||||||
struct drm_gem_object *obj;
|
struct drm_gem_object *obj;
|
||||||
struct amdgpu_bo *rbo;
|
struct amdgpu_bo *abo;
|
||||||
uint64_t fb_location, tiling_flags;
|
uint64_t fb_location, tiling_flags;
|
||||||
uint32_t fb_format, fb_pitch_pixels;
|
uint32_t fb_format, fb_pitch_pixels;
|
||||||
u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
|
u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
|
||||||
|
@ -2113,23 +2116,23 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
|
||||||
* just update base pointers
|
* just update base pointers
|
||||||
*/
|
*/
|
||||||
obj = amdgpu_fb->obj;
|
obj = amdgpu_fb->obj;
|
||||||
rbo = gem_to_amdgpu_bo(obj);
|
abo = gem_to_amdgpu_bo(obj);
|
||||||
r = amdgpu_bo_reserve(rbo, false);
|
r = amdgpu_bo_reserve(abo, false);
|
||||||
if (unlikely(r != 0))
|
if (unlikely(r != 0))
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
if (atomic) {
|
if (atomic) {
|
||||||
fb_location = amdgpu_bo_gpu_offset(rbo);
|
fb_location = amdgpu_bo_gpu_offset(abo);
|
||||||
} else {
|
} else {
|
||||||
r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
|
r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
|
||||||
if (unlikely(r != 0)) {
|
if (unlikely(r != 0)) {
|
||||||
amdgpu_bo_unreserve(rbo);
|
amdgpu_bo_unreserve(abo);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
|
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
|
||||||
amdgpu_bo_unreserve(rbo);
|
amdgpu_bo_unreserve(abo);
|
||||||
|
|
||||||
pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
|
pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
|
||||||
|
|
||||||
|
@ -2304,12 +2307,12 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
|
||||||
|
|
||||||
if (!atomic && fb && fb != crtc->primary->fb) {
|
if (!atomic && fb && fb != crtc->primary->fb) {
|
||||||
amdgpu_fb = to_amdgpu_framebuffer(fb);
|
amdgpu_fb = to_amdgpu_framebuffer(fb);
|
||||||
rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
|
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
|
||||||
r = amdgpu_bo_reserve(rbo, false);
|
r = amdgpu_bo_reserve(abo, false);
|
||||||
if (unlikely(r != 0))
|
if (unlikely(r != 0))
|
||||||
return r;
|
return r;
|
||||||
amdgpu_bo_unpin(rbo);
|
amdgpu_bo_unpin(abo);
|
||||||
amdgpu_bo_unreserve(rbo);
|
amdgpu_bo_unreserve(abo);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Bytes per pixel may have changed */
|
/* Bytes per pixel may have changed */
|
||||||
|
@ -2824,16 +2827,16 @@ static void dce_v11_0_crtc_disable(struct drm_crtc *crtc)
|
||||||
if (crtc->primary->fb) {
|
if (crtc->primary->fb) {
|
||||||
int r;
|
int r;
|
||||||
struct amdgpu_framebuffer *amdgpu_fb;
|
struct amdgpu_framebuffer *amdgpu_fb;
|
||||||
struct amdgpu_bo *rbo;
|
struct amdgpu_bo *abo;
|
||||||
|
|
||||||
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
|
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
|
||||||
rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
|
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
|
||||||
r = amdgpu_bo_reserve(rbo, false);
|
r = amdgpu_bo_reserve(abo, false);
|
||||||
if (unlikely(r))
|
if (unlikely(r))
|
||||||
DRM_ERROR("failed to reserve rbo before unpin\n");
|
DRM_ERROR("failed to reserve abo before unpin\n");
|
||||||
else {
|
else {
|
||||||
amdgpu_bo_unpin(rbo);
|
amdgpu_bo_unpin(abo);
|
||||||
amdgpu_bo_unreserve(rbo);
|
amdgpu_bo_unreserve(abo);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* disable the GRPH */
|
/* disable the GRPH */
|
||||||
|
|
|
@ -375,15 +375,6 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
|
||||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
|
|
||||||
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
|
|
||||||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
|
|
||||||
/* don't try to enable hpd on eDP or LVDS avoid breaking the
|
|
||||||
* aux dp channel on imac and help (but not completely fix)
|
|
||||||
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
|
|
||||||
* also avoid interrupt storms during dpms.
|
|
||||||
*/
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
switch (amdgpu_connector->hpd.hpd) {
|
switch (amdgpu_connector->hpd.hpd) {
|
||||||
case AMDGPU_HPD_1:
|
case AMDGPU_HPD_1:
|
||||||
WREG32(DC_HPD1_CONTROL, tmp);
|
WREG32(DC_HPD1_CONTROL, tmp);
|
||||||
|
@ -406,6 +397,45 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
|
||||||
|
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
|
||||||
|
/* don't try to enable hpd on eDP or LVDS avoid breaking the
|
||||||
|
* aux dp channel on imac and help (but not completely fix)
|
||||||
|
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
|
||||||
|
* also avoid interrupt storms during dpms.
|
||||||
|
*/
|
||||||
|
u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
|
||||||
|
|
||||||
|
switch (amdgpu_connector->hpd.hpd) {
|
||||||
|
case AMDGPU_HPD_1:
|
||||||
|
dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL;
|
||||||
|
break;
|
||||||
|
case AMDGPU_HPD_2:
|
||||||
|
dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL;
|
||||||
|
break;
|
||||||
|
case AMDGPU_HPD_3:
|
||||||
|
dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL;
|
||||||
|
break;
|
||||||
|
case AMDGPU_HPD_4:
|
||||||
|
dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL;
|
||||||
|
break;
|
||||||
|
case AMDGPU_HPD_5:
|
||||||
|
dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL;
|
||||||
|
break;
|
||||||
|
case AMDGPU_HPD_6:
|
||||||
|
dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
|
||||||
|
dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
|
||||||
|
WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
|
dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
|
||||||
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
|
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
|
||||||
}
|
}
|
||||||
|
@ -1475,10 +1505,7 @@ static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
|
||||||
u32 vga_control;
|
u32 vga_control;
|
||||||
|
|
||||||
vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
|
vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
|
||||||
if (enable)
|
WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | (enable ? 1 : 0));
|
||||||
WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
|
|
||||||
else
|
|
||||||
WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
|
static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
|
||||||
|
@ -1487,10 +1514,7 @@ static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
|
||||||
struct drm_device *dev = crtc->dev;
|
struct drm_device *dev = crtc->dev;
|
||||||
struct amdgpu_device *adev = dev->dev_private;
|
struct amdgpu_device *adev = dev->dev_private;
|
||||||
|
|
||||||
if (enable)
|
WREG32(EVERGREEN_GRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
|
||||||
WREG32(EVERGREEN_GRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
|
|
||||||
else
|
|
||||||
WREG32(EVERGREEN_GRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
|
static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
|
||||||
|
@ -1503,7 +1527,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
|
||||||
struct amdgpu_framebuffer *amdgpu_fb;
|
struct amdgpu_framebuffer *amdgpu_fb;
|
||||||
struct drm_framebuffer *target_fb;
|
struct drm_framebuffer *target_fb;
|
||||||
struct drm_gem_object *obj;
|
struct drm_gem_object *obj;
|
||||||
struct amdgpu_bo *rbo;
|
struct amdgpu_bo *abo;
|
||||||
uint64_t fb_location, tiling_flags;
|
uint64_t fb_location, tiling_flags;
|
||||||
uint32_t fb_format, fb_pitch_pixels, pipe_config;
|
uint32_t fb_format, fb_pitch_pixels, pipe_config;
|
||||||
u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
|
u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
|
||||||
|
@ -1520,8 +1544,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
|
||||||
if (atomic) {
|
if (atomic) {
|
||||||
amdgpu_fb = to_amdgpu_framebuffer(fb);
|
amdgpu_fb = to_amdgpu_framebuffer(fb);
|
||||||
target_fb = fb;
|
target_fb = fb;
|
||||||
}
|
} else {
|
||||||
else {
|
|
||||||
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
|
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
|
||||||
target_fb = crtc->primary->fb;
|
target_fb = crtc->primary->fb;
|
||||||
}
|
}
|
||||||
|
@ -1530,23 +1553,23 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
|
||||||
* just update base pointers
|
* just update base pointers
|
||||||
*/
|
*/
|
||||||
obj = amdgpu_fb->obj;
|
obj = amdgpu_fb->obj;
|
||||||
rbo = gem_to_amdgpu_bo(obj);
|
abo = gem_to_amdgpu_bo(obj);
|
||||||
r = amdgpu_bo_reserve(rbo, false);
|
r = amdgpu_bo_reserve(abo, false);
|
||||||
if (unlikely(r != 0))
|
if (unlikely(r != 0))
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
if (atomic)
|
if (atomic) {
|
||||||
fb_location = amdgpu_bo_gpu_offset(rbo);
|
fb_location = amdgpu_bo_gpu_offset(abo);
|
||||||
else {
|
} else {
|
||||||
r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
|
r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
|
||||||
if (unlikely(r != 0)) {
|
if (unlikely(r != 0)) {
|
||||||
amdgpu_bo_unreserve(rbo);
|
amdgpu_bo_unreserve(abo);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
|
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
|
||||||
amdgpu_bo_unreserve(rbo);
|
amdgpu_bo_unreserve(abo);
|
||||||
|
|
||||||
switch (target_fb->pixel_format) {
|
switch (target_fb->pixel_format) {
|
||||||
case DRM_FORMAT_C8:
|
case DRM_FORMAT_C8:
|
||||||
|
@ -1633,8 +1656,9 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
|
||||||
fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
|
fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
|
||||||
fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
|
fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
|
||||||
fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect);
|
fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect);
|
||||||
} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1)
|
} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
|
||||||
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
|
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
|
||||||
|
}
|
||||||
|
|
||||||
pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
|
pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
|
||||||
fb_format |= SI_GRPH_PIPE_CONFIG(pipe_config);
|
fb_format |= SI_GRPH_PIPE_CONFIG(pipe_config);
|
||||||
|
@ -1698,12 +1722,12 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
|
||||||
|
|
||||||
if (!atomic && fb && fb != crtc->primary->fb) {
|
if (!atomic && fb && fb != crtc->primary->fb) {
|
||||||
amdgpu_fb = to_amdgpu_framebuffer(fb);
|
amdgpu_fb = to_amdgpu_framebuffer(fb);
|
||||||
rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
|
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
|
||||||
r = amdgpu_bo_reserve(rbo, false);
|
r = amdgpu_bo_reserve(abo, false);
|
||||||
if (unlikely(r != 0))
|
if (unlikely(r != 0))
|
||||||
return r;
|
return r;
|
||||||
amdgpu_bo_unpin(rbo);
|
amdgpu_bo_unpin(abo);
|
||||||
amdgpu_bo_unreserve(rbo);
|
amdgpu_bo_unreserve(abo);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Bytes per pixel may have changed */
|
/* Bytes per pixel may have changed */
|
||||||
|
@ -1798,26 +1822,13 @@ static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder)
|
||||||
|
|
||||||
switch (amdgpu_encoder->encoder_id) {
|
switch (amdgpu_encoder->encoder_id) {
|
||||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
|
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
|
||||||
if (dig->linkb)
|
return dig->linkb ? 1 : 0;
|
||||||
return 1;
|
|
||||||
else
|
|
||||||
return 0;
|
|
||||||
break;
|
|
||||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
|
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
|
||||||
if (dig->linkb)
|
return dig->linkb ? 3 : 2;
|
||||||
return 3;
|
|
||||||
else
|
|
||||||
return 2;
|
|
||||||
break;
|
|
||||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
|
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
|
||||||
if (dig->linkb)
|
return dig->linkb ? 5 : 4;
|
||||||
return 5;
|
|
||||||
else
|
|
||||||
return 4;
|
|
||||||
break;
|
|
||||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
|
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
|
||||||
return 6;
|
return 6;
|
||||||
break;
|
|
||||||
default:
|
default:
|
||||||
DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
|
DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2052,7 +2063,6 @@ static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
|
||||||
amdgpu_crtc->cursor_y);
|
amdgpu_crtc->cursor_y);
|
||||||
|
|
||||||
dce_v6_0_show_cursor(crtc);
|
dce_v6_0_show_cursor(crtc);
|
||||||
|
|
||||||
dce_v6_0_lock_cursor(crtc, false);
|
dce_v6_0_lock_cursor(crtc, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2151,16 +2161,16 @@ static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
|
||||||
if (crtc->primary->fb) {
|
if (crtc->primary->fb) {
|
||||||
int r;
|
int r;
|
||||||
struct amdgpu_framebuffer *amdgpu_fb;
|
struct amdgpu_framebuffer *amdgpu_fb;
|
||||||
struct amdgpu_bo *rbo;
|
struct amdgpu_bo *abo;
|
||||||
|
|
||||||
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
|
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
|
||||||
rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
|
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
|
||||||
r = amdgpu_bo_reserve(rbo, false);
|
r = amdgpu_bo_reserve(abo, false);
|
||||||
if (unlikely(r))
|
if (unlikely(r))
|
||||||
DRM_ERROR("failed to reserve rbo before unpin\n");
|
DRM_ERROR("failed to reserve abo before unpin\n");
|
||||||
else {
|
else {
|
||||||
amdgpu_bo_unpin(rbo);
|
amdgpu_bo_unpin(abo);
|
||||||
amdgpu_bo_unreserve(rbo);
|
amdgpu_bo_unreserve(abo);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* disable the GRPH */
|
/* disable the GRPH */
|
||||||
|
@ -2375,15 +2385,11 @@ static int dce_v6_0_sw_init(void *handle)
|
||||||
adev->mode_info.mode_config_initialized = true;
|
adev->mode_info.mode_config_initialized = true;
|
||||||
|
|
||||||
adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
|
adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
|
||||||
|
|
||||||
adev->ddev->mode_config.async_page_flip = true;
|
adev->ddev->mode_config.async_page_flip = true;
|
||||||
|
|
||||||
adev->ddev->mode_config.max_width = 16384;
|
adev->ddev->mode_config.max_width = 16384;
|
||||||
adev->ddev->mode_config.max_height = 16384;
|
adev->ddev->mode_config.max_height = 16384;
|
||||||
|
|
||||||
adev->ddev->mode_config.preferred_depth = 24;
|
adev->ddev->mode_config.preferred_depth = 24;
|
||||||
adev->ddev->mode_config.prefer_shadow = 1;
|
adev->ddev->mode_config.prefer_shadow = 1;
|
||||||
|
|
||||||
adev->ddev->mode_config.fb_base = adev->mc.aper_base;
|
adev->ddev->mode_config.fb_base = adev->mc.aper_base;
|
||||||
|
|
||||||
r = amdgpu_modeset_create_props(adev);
|
r = amdgpu_modeset_create_props(adev);
|
||||||
|
@ -2429,7 +2435,6 @@ static int dce_v6_0_sw_fini(void *handle)
|
||||||
drm_kms_helper_poll_fini(adev->ddev);
|
drm_kms_helper_poll_fini(adev->ddev);
|
||||||
|
|
||||||
dce_v6_0_audio_fini(adev);
|
dce_v6_0_audio_fini(adev);
|
||||||
|
|
||||||
dce_v6_0_afmt_fini(adev);
|
dce_v6_0_afmt_fini(adev);
|
||||||
|
|
||||||
drm_mode_config_cleanup(adev->ddev);
|
drm_mode_config_cleanup(adev->ddev);
|
||||||
|
@ -3057,7 +3062,6 @@ static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
|
||||||
}
|
}
|
||||||
|
|
||||||
amdgpu_encoder->enc_priv = NULL;
|
amdgpu_encoder->enc_priv = NULL;
|
||||||
|
|
||||||
amdgpu_encoder->encoder_enum = encoder_enum;
|
amdgpu_encoder->encoder_enum = encoder_enum;
|
||||||
amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
|
amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
|
||||||
amdgpu_encoder->devices = supported_device;
|
amdgpu_encoder->devices = supported_device;
|
||||||
|
|
|
@ -397,15 +397,6 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
|
||||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
|
|
||||||
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
|
|
||||||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
|
|
||||||
/* don't try to enable hpd on eDP or LVDS avoid breaking the
|
|
||||||
* aux dp channel on imac and help (but not completely fix)
|
|
||||||
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
|
|
||||||
* also avoid interrupt storms during dpms.
|
|
||||||
*/
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
switch (amdgpu_connector->hpd.hpd) {
|
switch (amdgpu_connector->hpd.hpd) {
|
||||||
case AMDGPU_HPD_1:
|
case AMDGPU_HPD_1:
|
||||||
WREG32(mmDC_HPD1_CONTROL, tmp);
|
WREG32(mmDC_HPD1_CONTROL, tmp);
|
||||||
|
@ -428,6 +419,45 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
|
||||||
|
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
|
||||||
|
/* don't try to enable hpd on eDP or LVDS avoid breaking the
|
||||||
|
* aux dp channel on imac and help (but not completely fix)
|
||||||
|
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
|
||||||
|
* also avoid interrupt storms during dpms.
|
||||||
|
*/
|
||||||
|
u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
|
||||||
|
|
||||||
|
switch (amdgpu_connector->hpd.hpd) {
|
||||||
|
case AMDGPU_HPD_1:
|
||||||
|
dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
|
||||||
|
break;
|
||||||
|
case AMDGPU_HPD_2:
|
||||||
|
dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
|
||||||
|
break;
|
||||||
|
case AMDGPU_HPD_3:
|
||||||
|
dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
|
||||||
|
break;
|
||||||
|
case AMDGPU_HPD_4:
|
||||||
|
dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
|
||||||
|
break;
|
||||||
|
case AMDGPU_HPD_5:
|
||||||
|
dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
|
||||||
|
break;
|
||||||
|
case AMDGPU_HPD_6:
|
||||||
|
dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
|
||||||
|
dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
|
||||||
|
WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
|
dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
|
||||||
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
|
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
|
||||||
}
|
}
|
||||||
|
@ -1992,7 +2022,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
|
||||||
struct amdgpu_framebuffer *amdgpu_fb;
|
struct amdgpu_framebuffer *amdgpu_fb;
|
||||||
struct drm_framebuffer *target_fb;
|
struct drm_framebuffer *target_fb;
|
||||||
struct drm_gem_object *obj;
|
struct drm_gem_object *obj;
|
||||||
struct amdgpu_bo *rbo;
|
struct amdgpu_bo *abo;
|
||||||
uint64_t fb_location, tiling_flags;
|
uint64_t fb_location, tiling_flags;
|
||||||
uint32_t fb_format, fb_pitch_pixels;
|
uint32_t fb_format, fb_pitch_pixels;
|
||||||
u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
|
u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
|
||||||
|
@ -2020,23 +2050,23 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
|
||||||
* just update base pointers
|
* just update base pointers
|
||||||
*/
|
*/
|
||||||
obj = amdgpu_fb->obj;
|
obj = amdgpu_fb->obj;
|
||||||
rbo = gem_to_amdgpu_bo(obj);
|
abo = gem_to_amdgpu_bo(obj);
|
||||||
r = amdgpu_bo_reserve(rbo, false);
|
r = amdgpu_bo_reserve(abo, false);
|
||||||
if (unlikely(r != 0))
|
if (unlikely(r != 0))
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
if (atomic) {
|
if (atomic) {
|
||||||
fb_location = amdgpu_bo_gpu_offset(rbo);
|
fb_location = amdgpu_bo_gpu_offset(abo);
|
||||||
} else {
|
} else {
|
||||||
r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
|
r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
|
||||||
if (unlikely(r != 0)) {
|
if (unlikely(r != 0)) {
|
||||||
amdgpu_bo_unreserve(rbo);
|
amdgpu_bo_unreserve(abo);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
|
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
|
||||||
amdgpu_bo_unreserve(rbo);
|
amdgpu_bo_unreserve(abo);
|
||||||
|
|
||||||
pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
|
pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
|
||||||
|
|
||||||
|
@ -2192,12 +2222,12 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
|
||||||
|
|
||||||
if (!atomic && fb && fb != crtc->primary->fb) {
|
if (!atomic && fb && fb != crtc->primary->fb) {
|
||||||
amdgpu_fb = to_amdgpu_framebuffer(fb);
|
amdgpu_fb = to_amdgpu_framebuffer(fb);
|
||||||
rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
|
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
|
||||||
r = amdgpu_bo_reserve(rbo, false);
|
r = amdgpu_bo_reserve(abo, false);
|
||||||
if (unlikely(r != 0))
|
if (unlikely(r != 0))
|
||||||
return r;
|
return r;
|
||||||
amdgpu_bo_unpin(rbo);
|
amdgpu_bo_unpin(abo);
|
||||||
amdgpu_bo_unreserve(rbo);
|
amdgpu_bo_unreserve(abo);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Bytes per pixel may have changed */
|
/* Bytes per pixel may have changed */
|
||||||
|
@ -2669,16 +2699,16 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
|
||||||
if (crtc->primary->fb) {
|
if (crtc->primary->fb) {
|
||||||
int r;
|
int r;
|
||||||
struct amdgpu_framebuffer *amdgpu_fb;
|
struct amdgpu_framebuffer *amdgpu_fb;
|
||||||
struct amdgpu_bo *rbo;
|
struct amdgpu_bo *abo;
|
||||||
|
|
||||||
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
|
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
|
||||||
rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
|
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
|
||||||
r = amdgpu_bo_reserve(rbo, false);
|
r = amdgpu_bo_reserve(abo, false);
|
||||||
if (unlikely(r))
|
if (unlikely(r))
|
||||||
DRM_ERROR("failed to reserve rbo before unpin\n");
|
DRM_ERROR("failed to reserve abo before unpin\n");
|
||||||
else {
|
else {
|
||||||
amdgpu_bo_unpin(rbo);
|
amdgpu_bo_unpin(abo);
|
||||||
amdgpu_bo_unreserve(rbo);
|
amdgpu_bo_unreserve(abo);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* disable the GRPH */
|
/* disable the GRPH */
|
||||||
|
|
|
@ -229,16 +229,16 @@ static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
|
||||||
if (crtc->primary->fb) {
|
if (crtc->primary->fb) {
|
||||||
int r;
|
int r;
|
||||||
struct amdgpu_framebuffer *amdgpu_fb;
|
struct amdgpu_framebuffer *amdgpu_fb;
|
||||||
struct amdgpu_bo *rbo;
|
struct amdgpu_bo *abo;
|
||||||
|
|
||||||
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
|
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
|
||||||
rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
|
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
|
||||||
r = amdgpu_bo_reserve(rbo, false);
|
r = amdgpu_bo_reserve(abo, false);
|
||||||
if (unlikely(r))
|
if (unlikely(r))
|
||||||
DRM_ERROR("failed to reserve rbo before unpin\n");
|
DRM_ERROR("failed to reserve abo before unpin\n");
|
||||||
else {
|
else {
|
||||||
amdgpu_bo_unpin(rbo);
|
amdgpu_bo_unpin(abo);
|
||||||
amdgpu_bo_unreserve(rbo);
|
amdgpu_bo_unreserve(abo);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3023,9 +3023,12 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
|
||||||
/* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
|
/* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
|
||||||
{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
|
{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
|
||||||
{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
|
{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
|
||||||
|
{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 },
|
||||||
{ PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
|
{ PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
|
||||||
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
|
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
|
||||||
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
|
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
|
||||||
|
{ PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
|
||||||
|
{ PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
|
||||||
{ 0, 0, 0, 0 },
|
{ 0, 0, 0, 0 },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -3486,6 +3489,16 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
|
||||||
}
|
}
|
||||||
++p;
|
++p;
|
||||||
}
|
}
|
||||||
|
/* limit mclk on all R7 370 parts for stability */
|
||||||
|
if (adev->pdev->device == 0x6811 &&
|
||||||
|
adev->pdev->revision == 0x81)
|
||||||
|
max_mclk = 120000;
|
||||||
|
/* limit sclk/mclk on Jet parts for stability */
|
||||||
|
if (adev->pdev->device == 0x6665 &&
|
||||||
|
adev->pdev->revision == 0xc3) {
|
||||||
|
max_sclk = 75000;
|
||||||
|
max_mclk = 80000;
|
||||||
|
}
|
||||||
|
|
||||||
if (rps->vce_active) {
|
if (rps->vce_active) {
|
||||||
rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
|
rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
|
||||||
|
@ -4580,7 +4593,7 @@ static int si_populate_smc_voltage_tables(struct amdgpu_device *adev,
|
||||||
&adev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
|
&adev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
|
||||||
si_populate_smc_voltage_table(adev, &si_pi->vddc_phase_shed_table, table);
|
si_populate_smc_voltage_table(adev, &si_pi->vddc_phase_shed_table, table);
|
||||||
|
|
||||||
table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
|
table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING] =
|
||||||
cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
|
cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
|
||||||
|
|
||||||
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
|
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
|
||||||
|
|
|
@ -194,6 +194,7 @@ typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE;
|
||||||
#define SISLANDS_SMC_VOLTAGEMASK_VDDC 0
|
#define SISLANDS_SMC_VOLTAGEMASK_VDDC 0
|
||||||
#define SISLANDS_SMC_VOLTAGEMASK_MVDD 1
|
#define SISLANDS_SMC_VOLTAGEMASK_MVDD 1
|
||||||
#define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2
|
#define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2
|
||||||
|
#define SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING 3
|
||||||
#define SISLANDS_SMC_VOLTAGEMASK_MAX 4
|
#define SISLANDS_SMC_VOLTAGEMASK_MAX 4
|
||||||
|
|
||||||
struct SISLANDS_SMC_VOLTAGEMASKTABLE
|
struct SISLANDS_SMC_VOLTAGEMASKTABLE
|
||||||
|
|
|
@ -716,7 +716,8 @@ static int vce_v3_0_set_clockgating_state(void *handle,
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if ((adev->asic_type == CHIP_POLARIS10) ||
|
if ((adev->asic_type == CHIP_POLARIS10) ||
|
||||||
(adev->asic_type == CHIP_TONGA))
|
(adev->asic_type == CHIP_TONGA) ||
|
||||||
|
(adev->asic_type == CHIP_FIJI))
|
||||||
vce_v3_0_set_bypass_mode(adev, enable);
|
vce_v3_0_set_bypass_mode(adev, enable);
|
||||||
|
|
||||||
if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
|
if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
|
||||||
|
|
|
@ -4278,7 +4278,7 @@ static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct pp_hwmgr_func smu7_hwmgr_funcs = {
|
static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
|
||||||
.backend_init = &smu7_hwmgr_backend_init,
|
.backend_init = &smu7_hwmgr_backend_init,
|
||||||
.backend_fini = &phm_hwmgr_backend_fini,
|
.backend_fini = &phm_hwmgr_backend_fini,
|
||||||
.asic_setup = &smu7_setup_asic_task,
|
.asic_setup = &smu7_setup_asic_task,
|
||||||
|
|
|
@ -31,7 +31,7 @@
|
||||||
|
|
||||||
static uint32_t DIDTBlock_Info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK;
|
static uint32_t DIDTBlock_Info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK;
|
||||||
|
|
||||||
static struct gpu_pt_config_reg GCCACConfig_Polaris10[] = {
|
static const struct gpu_pt_config_reg GCCACConfig_Polaris10[] = {
|
||||||
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
* Offset Mask Shift Value Type
|
* Offset Mask Shift Value Type
|
||||||
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
|
@ -62,7 +62,7 @@ static struct gpu_pt_config_reg GCCACConfig_Polaris10[] = {
|
||||||
{ 0xFFFFFFFF }
|
{ 0xFFFFFFFF }
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct gpu_pt_config_reg GCCACConfig_Polaris11[] = {
|
static const struct gpu_pt_config_reg GCCACConfig_Polaris11[] = {
|
||||||
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
* Offset Mask Shift Value Type
|
* Offset Mask Shift Value Type
|
||||||
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
|
@ -93,7 +93,7 @@ static struct gpu_pt_config_reg GCCACConfig_Polaris11[] = {
|
||||||
{ 0xFFFFFFFF }
|
{ 0xFFFFFFFF }
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct gpu_pt_config_reg DIDTConfig_Polaris10[] = {
|
static const struct gpu_pt_config_reg DIDTConfig_Polaris10[] = {
|
||||||
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
* Offset Mask Shift Value Type
|
* Offset Mask Shift Value Type
|
||||||
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
|
@ -235,7 +235,7 @@ static struct gpu_pt_config_reg DIDTConfig_Polaris10[] = {
|
||||||
{ 0xFFFFFFFF }
|
{ 0xFFFFFFFF }
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct gpu_pt_config_reg DIDTConfig_Polaris11[] = {
|
static const struct gpu_pt_config_reg DIDTConfig_Polaris11[] = {
|
||||||
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
* Offset Mask Shift Value Type
|
* Offset Mask Shift Value Type
|
||||||
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
|
@ -427,9 +427,9 @@ static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int smu7_program_pt_config_registers(struct pp_hwmgr *hwmgr,
|
static int smu7_program_pt_config_registers(struct pp_hwmgr *hwmgr,
|
||||||
struct gpu_pt_config_reg *cac_config_regs)
|
const struct gpu_pt_config_reg *cac_config_regs)
|
||||||
{
|
{
|
||||||
struct gpu_pt_config_reg *config_regs = cac_config_regs;
|
const struct gpu_pt_config_reg *config_regs = cac_config_regs;
|
||||||
uint32_t cache = 0;
|
uint32_t cache = 0;
|
||||||
uint32_t data = 0;
|
uint32_t data = 0;
|
||||||
|
|
||||||
|
|
|
@ -63,7 +63,7 @@
|
||||||
#define DEVICE_ID_VI_ICELAND_M_6902 0x6902
|
#define DEVICE_ID_VI_ICELAND_M_6902 0x6902
|
||||||
#define DEVICE_ID_VI_ICELAND_M_6903 0x6903
|
#define DEVICE_ID_VI_ICELAND_M_6903 0x6903
|
||||||
|
|
||||||
static struct iceland_pt_defaults defaults_iceland = {
|
static const struct iceland_pt_defaults defaults_iceland = {
|
||||||
/*
|
/*
|
||||||
* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc,
|
* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc,
|
||||||
* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
|
* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
|
||||||
|
@ -74,7 +74,7 @@ static struct iceland_pt_defaults defaults_iceland = {
|
||||||
};
|
};
|
||||||
|
|
||||||
/* 35W - XT, XTL */
|
/* 35W - XT, XTL */
|
||||||
static struct iceland_pt_defaults defaults_icelandxt = {
|
static const struct iceland_pt_defaults defaults_icelandxt = {
|
||||||
/*
|
/*
|
||||||
* sviLoadLIneEn, SviLoadLineVddC,
|
* sviLoadLIneEn, SviLoadLineVddC,
|
||||||
* TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
|
* TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
|
||||||
|
@ -87,7 +87,7 @@ static struct iceland_pt_defaults defaults_icelandxt = {
|
||||||
};
|
};
|
||||||
|
|
||||||
/* 25W - PRO, LE */
|
/* 25W - PRO, LE */
|
||||||
static struct iceland_pt_defaults defaults_icelandpro = {
|
static const struct iceland_pt_defaults defaults_icelandpro = {
|
||||||
/*
|
/*
|
||||||
* sviLoadLIneEn, SviLoadLineVddC,
|
* sviLoadLIneEn, SviLoadLineVddC,
|
||||||
* TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
|
* TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
|
||||||
|
@ -1740,11 +1740,11 @@ static int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
|
||||||
{
|
{
|
||||||
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
|
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
|
||||||
struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
|
struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
|
||||||
struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
|
const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
|
||||||
SMU71_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
|
SMU71_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
|
||||||
struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
|
struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
|
||||||
struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
|
struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
|
||||||
uint16_t *def1, *def2;
|
const uint16_t *def1, *def2;
|
||||||
int i, j, k;
|
int i, j, k;
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -62,7 +62,7 @@ struct iceland_smumgr {
|
||||||
struct SMU71_Discrete_DpmTable smc_state_table;
|
struct SMU71_Discrete_DpmTable smc_state_table;
|
||||||
struct SMU71_Discrete_PmFuses power_tune_table;
|
struct SMU71_Discrete_PmFuses power_tune_table;
|
||||||
struct SMU71_Discrete_Ulv ulv_setting;
|
struct SMU71_Discrete_Ulv ulv_setting;
|
||||||
struct iceland_pt_defaults *power_tune_defaults;
|
const struct iceland_pt_defaults *power_tune_defaults;
|
||||||
SMU71_Discrete_MCRegisters mc_regs;
|
SMU71_Discrete_MCRegisters mc_regs;
|
||||||
struct iceland_mc_reg_table mc_reg_table;
|
struct iceland_mc_reg_table mc_reg_table;
|
||||||
uint32_t activity_target[SMU71_MAX_LEVELS_GRAPHICS];
|
uint32_t activity_target[SMU71_MAX_LEVELS_GRAPHICS];
|
||||||
|
|
|
@ -58,7 +58,7 @@
|
||||||
#define VDDC_VDDCI_DELTA 200
|
#define VDDC_VDDCI_DELTA 200
|
||||||
|
|
||||||
|
|
||||||
static struct tonga_pt_defaults tonga_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
|
static const struct tonga_pt_defaults tonga_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
|
||||||
/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
|
/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
|
||||||
* TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
|
* TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
|
||||||
*/
|
*/
|
||||||
|
@ -1815,14 +1815,13 @@ static int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
|
||||||
{
|
{
|
||||||
struct tonga_smumgr *smu_data =
|
struct tonga_smumgr *smu_data =
|
||||||
(struct tonga_smumgr *)(hwmgr->smumgr->backend);
|
(struct tonga_smumgr *)(hwmgr->smumgr->backend);
|
||||||
struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
|
const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
|
||||||
SMU72_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
|
SMU72_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
|
||||||
struct phm_ppt_v1_information *table_info =
|
struct phm_ppt_v1_information *table_info =
|
||||||
(struct phm_ppt_v1_information *)(hwmgr->pptable);
|
(struct phm_ppt_v1_information *)(hwmgr->pptable);
|
||||||
struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
|
struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
|
||||||
int i, j, k;
|
int i, j, k;
|
||||||
uint16_t *pdef1;
|
const uint16_t *pdef1, *pdef2;
|
||||||
uint16_t *pdef2;
|
|
||||||
|
|
||||||
dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
|
dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
|
||||||
(uint16_t)(cac_dtp_table->usTDP * 256));
|
(uint16_t)(cac_dtp_table->usTDP * 256));
|
||||||
|
@ -1863,7 +1862,7 @@ static int tonga_populate_svi_load_line(struct pp_hwmgr *hwmgr)
|
||||||
{
|
{
|
||||||
struct tonga_smumgr *smu_data =
|
struct tonga_smumgr *smu_data =
|
||||||
(struct tonga_smumgr *)(hwmgr->smumgr->backend);
|
(struct tonga_smumgr *)(hwmgr->smumgr->backend);
|
||||||
struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
|
const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
|
||||||
|
|
||||||
smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
|
smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
|
||||||
smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddC;
|
smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddC;
|
||||||
|
@ -1878,7 +1877,7 @@ static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr)
|
||||||
uint16_t tdc_limit;
|
uint16_t tdc_limit;
|
||||||
struct tonga_smumgr *smu_data =
|
struct tonga_smumgr *smu_data =
|
||||||
(struct tonga_smumgr *)(hwmgr->smumgr->backend);
|
(struct tonga_smumgr *)(hwmgr->smumgr->backend);
|
||||||
struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
|
const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
|
||||||
struct phm_ppt_v1_information *table_info =
|
struct phm_ppt_v1_information *table_info =
|
||||||
(struct phm_ppt_v1_information *)(hwmgr->pptable);
|
(struct phm_ppt_v1_information *)(hwmgr->pptable);
|
||||||
|
|
||||||
|
@ -1899,7 +1898,7 @@ static int tonga_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset
|
||||||
{
|
{
|
||||||
struct tonga_smumgr *smu_data =
|
struct tonga_smumgr *smu_data =
|
||||||
(struct tonga_smumgr *)(hwmgr->smumgr->backend);
|
(struct tonga_smumgr *)(hwmgr->smumgr->backend);
|
||||||
struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
|
const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
|
||||||
uint32_t temp;
|
uint32_t temp;
|
||||||
|
|
||||||
if (smu7_read_smc_sram_dword(hwmgr->smumgr,
|
if (smu7_read_smc_sram_dword(hwmgr->smumgr,
|
||||||
|
|
|
@ -48,7 +48,7 @@ struct tonga_smumgr {
|
||||||
struct SMU72_Discrete_DpmTable smc_state_table;
|
struct SMU72_Discrete_DpmTable smc_state_table;
|
||||||
struct SMU72_Discrete_Ulv ulv_setting;
|
struct SMU72_Discrete_Ulv ulv_setting;
|
||||||
struct SMU72_Discrete_PmFuses power_tune_table;
|
struct SMU72_Discrete_PmFuses power_tune_table;
|
||||||
struct tonga_pt_defaults *power_tune_defaults;
|
const struct tonga_pt_defaults *power_tune_defaults;
|
||||||
SMU72_Discrete_MCRegisters mc_regs;
|
SMU72_Discrete_MCRegisters mc_regs;
|
||||||
struct tonga_mc_reg_table mc_reg_table;
|
struct tonga_mc_reg_table mc_reg_table;
|
||||||
|
|
||||||
|
|
|
@ -365,10 +365,11 @@ static void
|
||||||
radeon_pci_shutdown(struct pci_dev *pdev)
|
radeon_pci_shutdown(struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
/* if we are running in a VM, make sure the device
|
/* if we are running in a VM, make sure the device
|
||||||
* torn down properly on reboot/shutdown
|
* torn down properly on reboot/shutdown.
|
||||||
|
* unfortunately we can't detect certain
|
||||||
|
* hypervisors so just do this all the time.
|
||||||
*/
|
*/
|
||||||
if (radeon_device_is_virtual())
|
radeon_pci_remove(pdev);
|
||||||
radeon_pci_remove(pdev);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int radeon_pmops_suspend(struct device *dev)
|
static int radeon_pmops_suspend(struct device *dev)
|
||||||
|
|
|
@ -4106,7 +4106,7 @@ static int si_populate_smc_voltage_tables(struct radeon_device *rdev,
|
||||||
&rdev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
|
&rdev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
|
||||||
si_populate_smc_voltage_table(rdev, &si_pi->vddc_phase_shed_table, table);
|
si_populate_smc_voltage_table(rdev, &si_pi->vddc_phase_shed_table, table);
|
||||||
|
|
||||||
table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
|
table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING] =
|
||||||
cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
|
cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
|
||||||
|
|
||||||
si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
|
si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
|
||||||
|
|
|
@ -194,6 +194,7 @@ typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE;
|
||||||
#define SISLANDS_SMC_VOLTAGEMASK_VDDC 0
|
#define SISLANDS_SMC_VOLTAGEMASK_VDDC 0
|
||||||
#define SISLANDS_SMC_VOLTAGEMASK_MVDD 1
|
#define SISLANDS_SMC_VOLTAGEMASK_MVDD 1
|
||||||
#define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2
|
#define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2
|
||||||
|
#define SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING 3
|
||||||
#define SISLANDS_SMC_VOLTAGEMASK_MAX 4
|
#define SISLANDS_SMC_VOLTAGEMASK_MAX 4
|
||||||
|
|
||||||
struct SISLANDS_SMC_VOLTAGEMASKTABLE
|
struct SISLANDS_SMC_VOLTAGEMASKTABLE
|
||||||
|
|
Loading…
Reference in New Issue