Merge tag 'drm-fixes-for-v4.7-rc4' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie: "The main drm fixes pull for rc4: one regression fix in the connector refcounting, and an MST fix. There rest is nouveau, amdkfd, i915, etnaviv, and radeon/amdgpu fixes, mostly regression or black screen fixes" * tag 'drm-fixes-for-v4.7-rc4' of git://people.freedesktop.org/~airlied/linux: (23 commits) drm/etnaviv: initialize iommu domain page size drm/nouveau/iccsense: fix memory leak drm/nouveau/Revert "drm/nouveau/device/pci: set as non-CPU-coherent on ARM64" drm/amd/powerplay: select samu dpm 0 as boot level on polaris. drm/amd/powerplay: update powerplay table parsing drm/dp/mst: Always clear proposed vcpi table for port. drm/crtc: only store the necessary data for set_config rollback drm/crtc: fix connector reference counting mismatch in drm_crtc_helper_set_config drm/i915/ilk: Don't disable SSC source if it's in use Revert "drm/amdgpu: add pipeline sync while vmid switch in same ctx" drm/amdgpu/gfx7: fix broken condition check drm/radeon: fix asic initialization for virtualized environments amdgpu: fix asic initialization for virtualized environments (v2) drm/radeon: don't use fractional dividers on RS[78]80 if SS is enabled drm/radeon: do not hard reset GPU while freezing on r600/r700 family drm/i915: Extract physical display dimensions from VBT drm/i915: Check VBT for port presence in addition to the strap on VLV/CHV drm/i915: Only ignore eDP ports that are connected drm/i915: Silence "unexpected child device config size" for VBT on 845g drm/i915: Fix NULL pointer deference when out of PLLs in IVB ...
This commit is contained in:
commit
d325ea8594
|
@ -799,7 +799,6 @@ struct amdgpu_ring {
|
||||||
unsigned cond_exe_offs;
|
unsigned cond_exe_offs;
|
||||||
u64 cond_exe_gpu_addr;
|
u64 cond_exe_gpu_addr;
|
||||||
volatile u32 *cond_exe_cpu_addr;
|
volatile u32 *cond_exe_cpu_addr;
|
||||||
int vmid;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -937,8 +936,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
|
||||||
unsigned vm_id, uint64_t pd_addr,
|
unsigned vm_id, uint64_t pd_addr,
|
||||||
uint32_t gds_base, uint32_t gds_size,
|
uint32_t gds_base, uint32_t gds_size,
|
||||||
uint32_t gws_base, uint32_t gws_size,
|
uint32_t gws_base, uint32_t gws_size,
|
||||||
uint32_t oa_base, uint32_t oa_size,
|
uint32_t oa_base, uint32_t oa_size);
|
||||||
bool vmid_switch);
|
|
||||||
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
|
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
|
||||||
uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
|
uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
|
||||||
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
|
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
|
||||||
|
@ -1822,6 +1820,8 @@ struct amdgpu_asic_funcs {
|
||||||
/* MM block clocks */
|
/* MM block clocks */
|
||||||
int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
|
int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
|
||||||
int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
|
int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
|
||||||
|
/* query virtual capabilities */
|
||||||
|
u32 (*get_virtual_caps)(struct amdgpu_device *adev);
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1916,8 +1916,12 @@ void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
|
||||||
|
|
||||||
|
|
||||||
/* GPU virtualization */
|
/* GPU virtualization */
|
||||||
|
#define AMDGPU_VIRT_CAPS_SRIOV_EN (1 << 0)
|
||||||
|
#define AMDGPU_VIRT_CAPS_IS_VF (1 << 1)
|
||||||
struct amdgpu_virtualization {
|
struct amdgpu_virtualization {
|
||||||
bool supports_sr_iov;
|
bool supports_sr_iov;
|
||||||
|
bool is_virtual;
|
||||||
|
u32 caps;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2206,6 +2210,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
|
||||||
#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
|
#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
|
||||||
#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
|
#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
|
||||||
#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
|
#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
|
||||||
|
#define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev)))
|
||||||
#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
|
#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
|
||||||
#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
|
#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
|
||||||
#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
|
#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
|
||||||
|
|
|
@ -1385,6 +1385,15 @@ static int amdgpu_resume(struct amdgpu_device *adev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool amdgpu_device_is_virtual(void)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_X86
|
||||||
|
return boot_cpu_has(X86_FEATURE_HYPERVISOR);
|
||||||
|
#else
|
||||||
|
return false;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_device_init - initialize the driver
|
* amdgpu_device_init - initialize the driver
|
||||||
*
|
*
|
||||||
|
@ -1519,8 +1528,14 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||||
adev->virtualization.supports_sr_iov =
|
adev->virtualization.supports_sr_iov =
|
||||||
amdgpu_atombios_has_gpu_virtualization_table(adev);
|
amdgpu_atombios_has_gpu_virtualization_table(adev);
|
||||||
|
|
||||||
|
/* Check if we are executing in a virtualized environment */
|
||||||
|
adev->virtualization.is_virtual = amdgpu_device_is_virtual();
|
||||||
|
adev->virtualization.caps = amdgpu_asic_get_virtual_caps(adev);
|
||||||
|
|
||||||
/* Post card if necessary */
|
/* Post card if necessary */
|
||||||
if (!amdgpu_card_posted(adev)) {
|
if (!amdgpu_card_posted(adev) ||
|
||||||
|
(adev->virtualization.is_virtual &&
|
||||||
|
!adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN)) {
|
||||||
if (!adev->bios) {
|
if (!adev->bios) {
|
||||||
dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
|
dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
|
@ -122,7 +122,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||||
bool skip_preamble, need_ctx_switch;
|
bool skip_preamble, need_ctx_switch;
|
||||||
unsigned patch_offset = ~0;
|
unsigned patch_offset = ~0;
|
||||||
struct amdgpu_vm *vm;
|
struct amdgpu_vm *vm;
|
||||||
int vmid = 0, old_vmid = ring->vmid;
|
|
||||||
struct fence *hwf;
|
struct fence *hwf;
|
||||||
uint64_t ctx;
|
uint64_t ctx;
|
||||||
|
|
||||||
|
@ -136,11 +135,9 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||||
if (job) {
|
if (job) {
|
||||||
vm = job->vm;
|
vm = job->vm;
|
||||||
ctx = job->ctx;
|
ctx = job->ctx;
|
||||||
vmid = job->vm_id;
|
|
||||||
} else {
|
} else {
|
||||||
vm = NULL;
|
vm = NULL;
|
||||||
ctx = 0;
|
ctx = 0;
|
||||||
vmid = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!ring->ready) {
|
if (!ring->ready) {
|
||||||
|
@ -166,8 +163,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||||
r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr,
|
r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr,
|
||||||
job->gds_base, job->gds_size,
|
job->gds_base, job->gds_size,
|
||||||
job->gws_base, job->gws_size,
|
job->gws_base, job->gws_size,
|
||||||
job->oa_base, job->oa_size,
|
job->oa_base, job->oa_size);
|
||||||
(ring->current_ctx == ctx) && (old_vmid != vmid));
|
|
||||||
if (r) {
|
if (r) {
|
||||||
amdgpu_ring_undo(ring);
|
amdgpu_ring_undo(ring);
|
||||||
return r;
|
return r;
|
||||||
|
@ -184,6 +180,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||||
need_ctx_switch = ring->current_ctx != ctx;
|
need_ctx_switch = ring->current_ctx != ctx;
|
||||||
for (i = 0; i < num_ibs; ++i) {
|
for (i = 0; i < num_ibs; ++i) {
|
||||||
ib = &ibs[i];
|
ib = &ibs[i];
|
||||||
|
|
||||||
/* drop preamble IBs if we don't have a context switch */
|
/* drop preamble IBs if we don't have a context switch */
|
||||||
if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble)
|
if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble)
|
||||||
continue;
|
continue;
|
||||||
|
@ -191,7 +188,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||||
amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
|
amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
|
||||||
need_ctx_switch);
|
need_ctx_switch);
|
||||||
need_ctx_switch = false;
|
need_ctx_switch = false;
|
||||||
ring->vmid = vmid;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ring->funcs->emit_hdp_invalidate)
|
if (ring->funcs->emit_hdp_invalidate)
|
||||||
|
@ -202,7 +198,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||||
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
|
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
|
||||||
if (job && job->vm_id)
|
if (job && job->vm_id)
|
||||||
amdgpu_vm_reset_id(adev, job->vm_id);
|
amdgpu_vm_reset_id(adev, job->vm_id);
|
||||||
ring->vmid = old_vmid;
|
|
||||||
amdgpu_ring_undo(ring);
|
amdgpu_ring_undo(ring);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
|
@ -298,8 +298,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
|
||||||
unsigned vm_id, uint64_t pd_addr,
|
unsigned vm_id, uint64_t pd_addr,
|
||||||
uint32_t gds_base, uint32_t gds_size,
|
uint32_t gds_base, uint32_t gds_size,
|
||||||
uint32_t gws_base, uint32_t gws_size,
|
uint32_t gws_base, uint32_t gws_size,
|
||||||
uint32_t oa_base, uint32_t oa_size,
|
uint32_t oa_base, uint32_t oa_size)
|
||||||
bool vmid_switch)
|
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = ring->adev;
|
struct amdgpu_device *adev = ring->adev;
|
||||||
struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
|
struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
|
||||||
|
@ -313,7 +312,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (ring->funcs->emit_pipeline_sync && (
|
if (ring->funcs->emit_pipeline_sync && (
|
||||||
pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed || vmid_switch))
|
pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed ||
|
||||||
|
ring->type == AMDGPU_RING_TYPE_COMPUTE))
|
||||||
amdgpu_ring_emit_pipeline_sync(ring);
|
amdgpu_ring_emit_pipeline_sync(ring);
|
||||||
|
|
||||||
if (ring->funcs->emit_vm_flush &&
|
if (ring->funcs->emit_vm_flush &&
|
||||||
|
|
|
@ -962,6 +962,12 @@ static bool cik_read_bios_from_rom(struct amdgpu_device *adev,
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u32 cik_get_virtual_caps(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
/* CIK does not support SR-IOV */
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
|
static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
|
||||||
{mmGRBM_STATUS, false},
|
{mmGRBM_STATUS, false},
|
||||||
{mmGB_ADDR_CONFIG, false},
|
{mmGB_ADDR_CONFIG, false},
|
||||||
|
@ -2007,6 +2013,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
|
||||||
.get_xclk = &cik_get_xclk,
|
.get_xclk = &cik_get_xclk,
|
||||||
.set_uvd_clocks = &cik_set_uvd_clocks,
|
.set_uvd_clocks = &cik_set_uvd_clocks,
|
||||||
.set_vce_clocks = &cik_set_vce_clocks,
|
.set_vce_clocks = &cik_set_vce_clocks,
|
||||||
|
.get_virtual_caps = &cik_get_virtual_caps,
|
||||||
/* these should be moved to their own ip modules */
|
/* these should be moved to their own ip modules */
|
||||||
.get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
|
.get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
|
||||||
.wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle,
|
.wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle,
|
||||||
|
|
|
@ -4833,7 +4833,7 @@ static int gfx_v7_0_eop_irq(struct amdgpu_device *adev,
|
||||||
case 2:
|
case 2:
|
||||||
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
||||||
ring = &adev->gfx.compute_ring[i];
|
ring = &adev->gfx.compute_ring[i];
|
||||||
if ((ring->me == me_id) & (ring->pipe == pipe_id))
|
if ((ring->me == me_id) && (ring->pipe == pipe_id))
|
||||||
amdgpu_fence_process(ring);
|
amdgpu_fence_process(ring);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -421,6 +421,20 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u32 vi_get_virtual_caps(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
u32 caps = 0;
|
||||||
|
u32 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
|
||||||
|
|
||||||
|
if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
|
||||||
|
caps |= AMDGPU_VIRT_CAPS_SRIOV_EN;
|
||||||
|
|
||||||
|
if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
|
||||||
|
caps |= AMDGPU_VIRT_CAPS_IS_VF;
|
||||||
|
|
||||||
|
return caps;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
|
static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
|
||||||
{mmGB_MACROTILE_MODE7, true},
|
{mmGB_MACROTILE_MODE7, true},
|
||||||
};
|
};
|
||||||
|
@ -1118,6 +1132,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
|
||||||
.get_xclk = &vi_get_xclk,
|
.get_xclk = &vi_get_xclk,
|
||||||
.set_uvd_clocks = &vi_set_uvd_clocks,
|
.set_uvd_clocks = &vi_set_uvd_clocks,
|
||||||
.set_vce_clocks = &vi_set_vce_clocks,
|
.set_vce_clocks = &vi_set_vce_clocks,
|
||||||
|
.get_virtual_caps = &vi_get_virtual_caps,
|
||||||
/* these should be moved to their own ip modules */
|
/* these should be moved to their own ip modules */
|
||||||
.get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
|
.get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
|
||||||
.wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle,
|
.wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle,
|
||||||
|
|
|
@ -242,13 +242,19 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
|
||||||
pqm_uninit(&p->pqm);
|
pqm_uninit(&p->pqm);
|
||||||
|
|
||||||
/* Iterate over all process device data structure and check
|
/* Iterate over all process device data structure and check
|
||||||
* if we should reset all wavefronts */
|
* if we should delete debug managers and reset all wavefronts
|
||||||
list_for_each_entry(pdd, &p->per_device_data, per_device_list)
|
*/
|
||||||
|
list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
|
||||||
|
if ((pdd->dev->dbgmgr) &&
|
||||||
|
(pdd->dev->dbgmgr->pasid == p->pasid))
|
||||||
|
kfd_dbgmgr_destroy(pdd->dev->dbgmgr);
|
||||||
|
|
||||||
if (pdd->reset_wavefronts) {
|
if (pdd->reset_wavefronts) {
|
||||||
pr_warn("amdkfd: Resetting all wave fronts\n");
|
pr_warn("amdkfd: Resetting all wave fronts\n");
|
||||||
dbgdev_wave_reset_wavefronts(pdd->dev, p);
|
dbgdev_wave_reset_wavefronts(pdd->dev, p);
|
||||||
pdd->reset_wavefronts = false;
|
pdd->reset_wavefronts = false;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
mutex_unlock(&p->mutex);
|
mutex_unlock(&p->mutex);
|
||||||
|
|
||||||
|
@ -404,13 +410,17 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
|
||||||
|
|
||||||
idx = srcu_read_lock(&kfd_processes_srcu);
|
idx = srcu_read_lock(&kfd_processes_srcu);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Look for the process that matches the pasid. If there is no such
|
||||||
|
* process, we either released it in amdkfd's own notifier, or there
|
||||||
|
* is a bug. Unfortunately, there is no way to tell...
|
||||||
|
*/
|
||||||
hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes)
|
hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes)
|
||||||
if (p->pasid == pasid)
|
if (p->pasid == pasid) {
|
||||||
break;
|
|
||||||
|
|
||||||
srcu_read_unlock(&kfd_processes_srcu, idx);
|
srcu_read_unlock(&kfd_processes_srcu, idx);
|
||||||
|
|
||||||
BUG_ON(p->pasid != pasid);
|
pr_debug("Unbinding process %d from IOMMU\n", pasid);
|
||||||
|
|
||||||
mutex_lock(&p->mutex);
|
mutex_lock(&p->mutex);
|
||||||
|
|
||||||
|
@ -432,14 +442,20 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Just mark pdd as unbound, because we still need it to call
|
* Just mark pdd as unbound, because we still need it
|
||||||
* amd_iommu_unbind_pasid() in when the process exits.
|
* to call amd_iommu_unbind_pasid() in when the
|
||||||
|
* process exits.
|
||||||
* We don't call amd_iommu_unbind_pasid() here
|
* We don't call amd_iommu_unbind_pasid() here
|
||||||
* because the IOMMU called us.
|
* because the IOMMU called us.
|
||||||
*/
|
*/
|
||||||
pdd->bound = false;
|
pdd->bound = false;
|
||||||
|
|
||||||
mutex_unlock(&p->mutex);
|
mutex_unlock(&p->mutex);
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
srcu_read_unlock(&kfd_processes_srcu, idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)
|
struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)
|
||||||
|
|
|
@ -666,7 +666,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
|
||||||
dev->node_props.simd_count);
|
dev->node_props.simd_count);
|
||||||
|
|
||||||
if (dev->mem_bank_count < dev->node_props.mem_banks_count) {
|
if (dev->mem_bank_count < dev->node_props.mem_banks_count) {
|
||||||
pr_warn("kfd: mem_banks_count truncated from %d to %d\n",
|
pr_info_once("kfd: mem_banks_count truncated from %d to %d\n",
|
||||||
dev->node_props.mem_banks_count,
|
dev->node_props.mem_banks_count,
|
||||||
dev->mem_bank_count);
|
dev->mem_bank_count);
|
||||||
sysfs_show_32bit_prop(buffer, "mem_banks_count",
|
sysfs_show_32bit_prop(buffer, "mem_banks_count",
|
||||||
|
|
|
@ -39,6 +39,7 @@ struct phm_ppt_v1_clock_voltage_dependency_record {
|
||||||
uint8_t phases;
|
uint8_t phases;
|
||||||
uint8_t cks_enable;
|
uint8_t cks_enable;
|
||||||
uint8_t cks_voffset;
|
uint8_t cks_voffset;
|
||||||
|
uint32_t sclk_offset;
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef struct phm_ppt_v1_clock_voltage_dependency_record phm_ppt_v1_clock_voltage_dependency_record;
|
typedef struct phm_ppt_v1_clock_voltage_dependency_record phm_ppt_v1_clock_voltage_dependency_record;
|
||||||
|
|
|
@ -3520,10 +3520,11 @@ static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
|
||||||
ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
|
ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
|
||||||
ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
|
ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
|
||||||
(ATOM_Tonga_POWERPLAYTABLE *)pp_table;
|
(ATOM_Tonga_POWERPLAYTABLE *)pp_table;
|
||||||
ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table =
|
PPTable_Generic_SubTable_Header *sclk_dep_table =
|
||||||
(ATOM_Tonga_SCLK_Dependency_Table *)
|
(PPTable_Generic_SubTable_Header *)
|
||||||
(((unsigned long)powerplay_table) +
|
(((unsigned long)powerplay_table) +
|
||||||
le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
|
le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
|
||||||
|
|
||||||
ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
|
ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
|
||||||
(ATOM_Tonga_MCLK_Dependency_Table *)
|
(ATOM_Tonga_MCLK_Dependency_Table *)
|
||||||
(((unsigned long)powerplay_table) +
|
(((unsigned long)powerplay_table) +
|
||||||
|
@ -3575,7 +3576,11 @@ static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
|
||||||
/* Performance levels are arranged from low to high. */
|
/* Performance levels are arranged from low to high. */
|
||||||
performance_level->memory_clock = mclk_dep_table->entries
|
performance_level->memory_clock = mclk_dep_table->entries
|
||||||
[state_entry->ucMemoryClockIndexLow].ulMclk;
|
[state_entry->ucMemoryClockIndexLow].ulMclk;
|
||||||
performance_level->engine_clock = sclk_dep_table->entries
|
if (sclk_dep_table->ucRevId == 0)
|
||||||
|
performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
|
||||||
|
[state_entry->ucEngineClockIndexLow].ulSclk;
|
||||||
|
else if (sclk_dep_table->ucRevId == 1)
|
||||||
|
performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
|
||||||
[state_entry->ucEngineClockIndexLow].ulSclk;
|
[state_entry->ucEngineClockIndexLow].ulSclk;
|
||||||
performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
|
performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
|
||||||
state_entry->ucPCIEGenLow);
|
state_entry->ucPCIEGenLow);
|
||||||
|
@ -3586,8 +3591,14 @@ static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
|
||||||
[polaris10_power_state->performance_level_count++]);
|
[polaris10_power_state->performance_level_count++]);
|
||||||
performance_level->memory_clock = mclk_dep_table->entries
|
performance_level->memory_clock = mclk_dep_table->entries
|
||||||
[state_entry->ucMemoryClockIndexHigh].ulMclk;
|
[state_entry->ucMemoryClockIndexHigh].ulMclk;
|
||||||
performance_level->engine_clock = sclk_dep_table->entries
|
|
||||||
|
if (sclk_dep_table->ucRevId == 0)
|
||||||
|
performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
|
||||||
[state_entry->ucEngineClockIndexHigh].ulSclk;
|
[state_entry->ucEngineClockIndexHigh].ulSclk;
|
||||||
|
else if (sclk_dep_table->ucRevId == 1)
|
||||||
|
performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
|
||||||
|
[state_entry->ucEngineClockIndexHigh].ulSclk;
|
||||||
|
|
||||||
performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
|
performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
|
||||||
state_entry->ucPCIEGenHigh);
|
state_entry->ucPCIEGenHigh);
|
||||||
performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
|
performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
|
||||||
|
@ -3645,7 +3656,6 @@ static int polaris10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
|
||||||
switch (state->classification.ui_label) {
|
switch (state->classification.ui_label) {
|
||||||
case PP_StateUILabel_Performance:
|
case PP_StateUILabel_Performance:
|
||||||
data->use_pcie_performance_levels = true;
|
data->use_pcie_performance_levels = true;
|
||||||
|
|
||||||
for (i = 0; i < ps->performance_level_count; i++) {
|
for (i = 0; i < ps->performance_level_count; i++) {
|
||||||
if (data->pcie_gen_performance.max <
|
if (data->pcie_gen_performance.max <
|
||||||
ps->performance_levels[i].pcie_gen)
|
ps->performance_levels[i].pcie_gen)
|
||||||
|
@ -3661,7 +3671,6 @@ static int polaris10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
|
||||||
ps->performance_levels[i].pcie_lane)
|
ps->performance_levels[i].pcie_lane)
|
||||||
data->pcie_lane_performance.max =
|
data->pcie_lane_performance.max =
|
||||||
ps->performance_levels[i].pcie_lane;
|
ps->performance_levels[i].pcie_lane;
|
||||||
|
|
||||||
if (data->pcie_lane_performance.min >
|
if (data->pcie_lane_performance.min >
|
||||||
ps->performance_levels[i].pcie_lane)
|
ps->performance_levels[i].pcie_lane)
|
||||||
data->pcie_lane_performance.min =
|
data->pcie_lane_performance.min =
|
||||||
|
@ -4187,12 +4196,9 @@ int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
|
||||||
{
|
{
|
||||||
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
|
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
|
||||||
uint32_t mm_boot_level_offset, mm_boot_level_value;
|
uint32_t mm_boot_level_offset, mm_boot_level_value;
|
||||||
struct phm_ppt_v1_information *table_info =
|
|
||||||
(struct phm_ppt_v1_information *)(hwmgr->pptable);
|
|
||||||
|
|
||||||
if (!bgate) {
|
if (!bgate) {
|
||||||
data->smc_state_table.SamuBootLevel =
|
data->smc_state_table.SamuBootLevel = 0;
|
||||||
(uint8_t) (table_info->mm_dep_table->count - 1);
|
|
||||||
mm_boot_level_offset = data->dpm_table_start +
|
mm_boot_level_offset = data->dpm_table_start +
|
||||||
offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
|
offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
|
||||||
mm_boot_level_offset /= 4;
|
mm_boot_level_offset /= 4;
|
||||||
|
|
|
@ -197,6 +197,22 @@ typedef struct _ATOM_Tonga_SCLK_Dependency_Table {
|
||||||
ATOM_Tonga_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
|
ATOM_Tonga_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
|
||||||
} ATOM_Tonga_SCLK_Dependency_Table;
|
} ATOM_Tonga_SCLK_Dependency_Table;
|
||||||
|
|
||||||
|
typedef struct _ATOM_Polaris_SCLK_Dependency_Record {
|
||||||
|
UCHAR ucVddInd; /* Base voltage */
|
||||||
|
USHORT usVddcOffset; /* Offset relative to base voltage */
|
||||||
|
ULONG ulSclk;
|
||||||
|
USHORT usEdcCurrent;
|
||||||
|
UCHAR ucReliabilityTemperature;
|
||||||
|
UCHAR ucCKSVOffsetandDisable; /* Bits 0~6: Voltage offset for CKS, Bit 7: Disable/enable for the SCLK level. */
|
||||||
|
ULONG ulSclkOffset;
|
||||||
|
} ATOM_Polaris_SCLK_Dependency_Record;
|
||||||
|
|
||||||
|
typedef struct _ATOM_Polaris_SCLK_Dependency_Table {
|
||||||
|
UCHAR ucRevId;
|
||||||
|
UCHAR ucNumEntries; /* Number of entries. */
|
||||||
|
ATOM_Polaris_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
|
||||||
|
} ATOM_Polaris_SCLK_Dependency_Table;
|
||||||
|
|
||||||
typedef struct _ATOM_Tonga_PCIE_Record {
|
typedef struct _ATOM_Tonga_PCIE_Record {
|
||||||
UCHAR ucPCIEGenSpeed;
|
UCHAR ucPCIEGenSpeed;
|
||||||
UCHAR usPCIELaneWidth;
|
UCHAR usPCIELaneWidth;
|
||||||
|
|
|
@ -408,17 +408,21 @@ static int get_mclk_voltage_dependency_table(
|
||||||
static int get_sclk_voltage_dependency_table(
|
static int get_sclk_voltage_dependency_table(
|
||||||
struct pp_hwmgr *hwmgr,
|
struct pp_hwmgr *hwmgr,
|
||||||
phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_sclk_dep_table,
|
phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_sclk_dep_table,
|
||||||
const ATOM_Tonga_SCLK_Dependency_Table * sclk_dep_table
|
const PPTable_Generic_SubTable_Header *sclk_dep_table
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
uint32_t table_size, i;
|
uint32_t table_size, i;
|
||||||
phm_ppt_v1_clock_voltage_dependency_table *sclk_table;
|
phm_ppt_v1_clock_voltage_dependency_table *sclk_table;
|
||||||
|
|
||||||
PP_ASSERT_WITH_CODE((0 != sclk_dep_table->ucNumEntries),
|
if (sclk_dep_table->ucRevId < 1) {
|
||||||
|
const ATOM_Tonga_SCLK_Dependency_Table *tonga_table =
|
||||||
|
(ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table;
|
||||||
|
|
||||||
|
PP_ASSERT_WITH_CODE((0 != tonga_table->ucNumEntries),
|
||||||
"Invalid PowerPlay Table!", return -1);
|
"Invalid PowerPlay Table!", return -1);
|
||||||
|
|
||||||
table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
|
table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
|
||||||
* sclk_dep_table->ucNumEntries;
|
* tonga_table->ucNumEntries;
|
||||||
|
|
||||||
sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
|
sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
|
||||||
kzalloc(table_size, GFP_KERNEL);
|
kzalloc(table_size, GFP_KERNEL);
|
||||||
|
@ -428,21 +432,54 @@ static int get_sclk_voltage_dependency_table(
|
||||||
|
|
||||||
memset(sclk_table, 0x00, table_size);
|
memset(sclk_table, 0x00, table_size);
|
||||||
|
|
||||||
sclk_table->count = (uint32_t)sclk_dep_table->ucNumEntries;
|
sclk_table->count = (uint32_t)tonga_table->ucNumEntries;
|
||||||
|
|
||||||
for (i = 0; i < sclk_dep_table->ucNumEntries; i++) {
|
for (i = 0; i < tonga_table->ucNumEntries; i++) {
|
||||||
sclk_table->entries[i].vddInd =
|
sclk_table->entries[i].vddInd =
|
||||||
sclk_dep_table->entries[i].ucVddInd;
|
tonga_table->entries[i].ucVddInd;
|
||||||
sclk_table->entries[i].vdd_offset =
|
sclk_table->entries[i].vdd_offset =
|
||||||
sclk_dep_table->entries[i].usVddcOffset;
|
tonga_table->entries[i].usVddcOffset;
|
||||||
sclk_table->entries[i].clk =
|
sclk_table->entries[i].clk =
|
||||||
sclk_dep_table->entries[i].ulSclk;
|
tonga_table->entries[i].ulSclk;
|
||||||
sclk_table->entries[i].cks_enable =
|
sclk_table->entries[i].cks_enable =
|
||||||
(((sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
|
(((tonga_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
|
||||||
sclk_table->entries[i].cks_voffset =
|
sclk_table->entries[i].cks_voffset =
|
||||||
(sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
|
(tonga_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
const ATOM_Polaris_SCLK_Dependency_Table *polaris_table =
|
||||||
|
(ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table;
|
||||||
|
|
||||||
|
PP_ASSERT_WITH_CODE((0 != polaris_table->ucNumEntries),
|
||||||
|
"Invalid PowerPlay Table!", return -1);
|
||||||
|
|
||||||
|
table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
|
||||||
|
* polaris_table->ucNumEntries;
|
||||||
|
|
||||||
|
sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
|
||||||
|
kzalloc(table_size, GFP_KERNEL);
|
||||||
|
|
||||||
|
if (NULL == sclk_table)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
memset(sclk_table, 0x00, table_size);
|
||||||
|
|
||||||
|
sclk_table->count = (uint32_t)polaris_table->ucNumEntries;
|
||||||
|
|
||||||
|
for (i = 0; i < polaris_table->ucNumEntries; i++) {
|
||||||
|
sclk_table->entries[i].vddInd =
|
||||||
|
polaris_table->entries[i].ucVddInd;
|
||||||
|
sclk_table->entries[i].vdd_offset =
|
||||||
|
polaris_table->entries[i].usVddcOffset;
|
||||||
|
sclk_table->entries[i].clk =
|
||||||
|
polaris_table->entries[i].ulSclk;
|
||||||
|
sclk_table->entries[i].cks_enable =
|
||||||
|
(((polaris_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
|
||||||
|
sclk_table->entries[i].cks_voffset =
|
||||||
|
(polaris_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
|
||||||
|
sclk_table->entries[i].sclk_offset = polaris_table->entries[i].ulSclkOffset;
|
||||||
|
}
|
||||||
|
}
|
||||||
*pp_tonga_sclk_dep_table = sclk_table;
|
*pp_tonga_sclk_dep_table = sclk_table;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -708,8 +745,8 @@ static int init_clock_voltage_dependency(
|
||||||
const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
|
const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
|
||||||
(const ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long) powerplay_table) +
|
(const ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long) powerplay_table) +
|
||||||
le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
|
le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
|
||||||
const ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table =
|
const PPTable_Generic_SubTable_Header *sclk_dep_table =
|
||||||
(const ATOM_Tonga_SCLK_Dependency_Table *)(((unsigned long) powerplay_table) +
|
(const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) +
|
||||||
le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
|
le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
|
||||||
const ATOM_Tonga_Hard_Limit_Table *pHardLimits =
|
const ATOM_Tonga_Hard_Limit_Table *pHardLimits =
|
||||||
(const ATOM_Tonga_Hard_Limit_Table *)(((unsigned long) powerplay_table) +
|
(const ATOM_Tonga_Hard_Limit_Table *)(((unsigned long) powerplay_table) +
|
||||||
|
|
|
@ -528,11 +528,11 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
|
||||||
int drm_crtc_helper_set_config(struct drm_mode_set *set)
|
int drm_crtc_helper_set_config(struct drm_mode_set *set)
|
||||||
{
|
{
|
||||||
struct drm_device *dev;
|
struct drm_device *dev;
|
||||||
struct drm_crtc *new_crtc;
|
struct drm_crtc **save_encoder_crtcs, *new_crtc;
|
||||||
struct drm_encoder *save_encoders, *new_encoder, *encoder;
|
struct drm_encoder **save_connector_encoders, *new_encoder, *encoder;
|
||||||
bool mode_changed = false; /* if true do a full mode set */
|
bool mode_changed = false; /* if true do a full mode set */
|
||||||
bool fb_changed = false; /* if true and !mode_changed just do a flip */
|
bool fb_changed = false; /* if true and !mode_changed just do a flip */
|
||||||
struct drm_connector *save_connectors, *connector;
|
struct drm_connector *connector;
|
||||||
int count = 0, ro, fail = 0;
|
int count = 0, ro, fail = 0;
|
||||||
const struct drm_crtc_helper_funcs *crtc_funcs;
|
const struct drm_crtc_helper_funcs *crtc_funcs;
|
||||||
struct drm_mode_set save_set;
|
struct drm_mode_set save_set;
|
||||||
|
@ -574,15 +574,15 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
|
||||||
* Allocate space for the backup of all (non-pointer) encoder and
|
* Allocate space for the backup of all (non-pointer) encoder and
|
||||||
* connector data.
|
* connector data.
|
||||||
*/
|
*/
|
||||||
save_encoders = kzalloc(dev->mode_config.num_encoder *
|
save_encoder_crtcs = kzalloc(dev->mode_config.num_encoder *
|
||||||
sizeof(struct drm_encoder), GFP_KERNEL);
|
sizeof(struct drm_crtc *), GFP_KERNEL);
|
||||||
if (!save_encoders)
|
if (!save_encoder_crtcs)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
save_connectors = kzalloc(dev->mode_config.num_connector *
|
save_connector_encoders = kzalloc(dev->mode_config.num_connector *
|
||||||
sizeof(struct drm_connector), GFP_KERNEL);
|
sizeof(struct drm_encoder *), GFP_KERNEL);
|
||||||
if (!save_connectors) {
|
if (!save_connector_encoders) {
|
||||||
kfree(save_encoders);
|
kfree(save_encoder_crtcs);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -593,12 +593,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
|
||||||
*/
|
*/
|
||||||
count = 0;
|
count = 0;
|
||||||
drm_for_each_encoder(encoder, dev) {
|
drm_for_each_encoder(encoder, dev) {
|
||||||
save_encoders[count++] = *encoder;
|
save_encoder_crtcs[count++] = encoder->crtc;
|
||||||
}
|
}
|
||||||
|
|
||||||
count = 0;
|
count = 0;
|
||||||
drm_for_each_connector(connector, dev) {
|
drm_for_each_connector(connector, dev) {
|
||||||
save_connectors[count++] = *connector;
|
save_connector_encoders[count++] = connector->encoder;
|
||||||
}
|
}
|
||||||
|
|
||||||
save_set.crtc = set->crtc;
|
save_set.crtc = set->crtc;
|
||||||
|
@ -631,8 +631,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
|
||||||
mode_changed = true;
|
mode_changed = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* take a reference on all connectors in set */
|
/* take a reference on all unbound connectors in set, reuse the
|
||||||
|
* already taken reference for bound connectors
|
||||||
|
*/
|
||||||
for (ro = 0; ro < set->num_connectors; ro++) {
|
for (ro = 0; ro < set->num_connectors; ro++) {
|
||||||
|
if (set->connectors[ro]->encoder)
|
||||||
|
continue;
|
||||||
drm_connector_reference(set->connectors[ro]);
|
drm_connector_reference(set->connectors[ro]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -754,30 +758,28 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* after fail drop reference on all connectors in save set */
|
kfree(save_connector_encoders);
|
||||||
count = 0;
|
kfree(save_encoder_crtcs);
|
||||||
drm_for_each_connector(connector, dev) {
|
|
||||||
drm_connector_unreference(&save_connectors[count++]);
|
|
||||||
}
|
|
||||||
|
|
||||||
kfree(save_connectors);
|
|
||||||
kfree(save_encoders);
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
/* Restore all previous data. */
|
/* Restore all previous data. */
|
||||||
count = 0;
|
count = 0;
|
||||||
drm_for_each_encoder(encoder, dev) {
|
drm_for_each_encoder(encoder, dev) {
|
||||||
*encoder = save_encoders[count++];
|
encoder->crtc = save_encoder_crtcs[count++];
|
||||||
}
|
}
|
||||||
|
|
||||||
count = 0;
|
count = 0;
|
||||||
drm_for_each_connector(connector, dev) {
|
drm_for_each_connector(connector, dev) {
|
||||||
*connector = save_connectors[count++];
|
connector->encoder = save_connector_encoders[count++];
|
||||||
}
|
}
|
||||||
|
|
||||||
/* after fail drop reference on all connectors in set */
|
/* after fail drop reference on all unbound connectors in set, let
|
||||||
|
* bound connectors keep their reference
|
||||||
|
*/
|
||||||
for (ro = 0; ro < set->num_connectors; ro++) {
|
for (ro = 0; ro < set->num_connectors; ro++) {
|
||||||
|
if (set->connectors[ro]->encoder)
|
||||||
|
continue;
|
||||||
drm_connector_unreference(set->connectors[ro]);
|
drm_connector_unreference(set->connectors[ro]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -787,8 +789,8 @@ fail:
|
||||||
save_set.y, save_set.fb))
|
save_set.y, save_set.fb))
|
||||||
DRM_ERROR("failed to restore config after modeset failure\n");
|
DRM_ERROR("failed to restore config after modeset failure\n");
|
||||||
|
|
||||||
kfree(save_connectors);
|
kfree(save_connector_encoders);
|
||||||
kfree(save_encoders);
|
kfree(save_encoder_crtcs);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_crtc_helper_set_config);
|
EXPORT_SYMBOL(drm_crtc_helper_set_config);
|
||||||
|
|
|
@ -2927,12 +2927,10 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
|
||||||
drm_dp_port_teardown_pdt(port, port->pdt);
|
drm_dp_port_teardown_pdt(port, port->pdt);
|
||||||
|
|
||||||
if (!port->input && port->vcpi.vcpi > 0) {
|
if (!port->input && port->vcpi.vcpi > 0) {
|
||||||
if (mgr->mst_state) {
|
|
||||||
drm_dp_mst_reset_vcpi_slots(mgr, port);
|
drm_dp_mst_reset_vcpi_slots(mgr, port);
|
||||||
drm_dp_update_payload_part1(mgr);
|
drm_dp_update_payload_part1(mgr);
|
||||||
drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
|
drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
kref_put(&port->kref, drm_dp_free_mst_port);
|
kref_put(&port->kref, drm_dp_free_mst_port);
|
||||||
send_hotplug = true;
|
send_hotplug = true;
|
||||||
|
|
|
@ -225,6 +225,7 @@ struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu)
|
||||||
|
|
||||||
etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
|
etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
|
||||||
etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
|
etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
|
||||||
|
etnaviv_domain->domain.pgsize_bitmap = SZ_4K;
|
||||||
etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START;
|
etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START;
|
||||||
etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1;
|
etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1;
|
||||||
|
|
||||||
|
|
|
@ -3481,6 +3481,7 @@ int intel_bios_init(struct drm_i915_private *dev_priv);
|
||||||
bool intel_bios_is_valid_vbt(const void *buf, size_t size);
|
bool intel_bios_is_valid_vbt(const void *buf, size_t size);
|
||||||
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
|
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
|
||||||
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
|
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
|
||||||
|
bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
|
||||||
bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
|
bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
|
||||||
bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
|
bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
|
||||||
bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
|
bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
|
||||||
|
|
|
@ -139,6 +139,11 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
|
||||||
else
|
else
|
||||||
panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
|
panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
|
||||||
|
|
||||||
|
panel_fixed_mode->width_mm = (dvo_timing->himage_hi << 8) |
|
||||||
|
dvo_timing->himage_lo;
|
||||||
|
panel_fixed_mode->height_mm = (dvo_timing->vimage_hi << 8) |
|
||||||
|
dvo_timing->vimage_lo;
|
||||||
|
|
||||||
/* Some VBTs have bogus h/vtotal values */
|
/* Some VBTs have bogus h/vtotal values */
|
||||||
if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
|
if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
|
||||||
panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
|
panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
|
||||||
|
@ -1187,7 +1192,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
|
||||||
}
|
}
|
||||||
if (bdb->version < 106) {
|
if (bdb->version < 106) {
|
||||||
expected_size = 22;
|
expected_size = 22;
|
||||||
} else if (bdb->version < 109) {
|
} else if (bdb->version < 111) {
|
||||||
expected_size = 27;
|
expected_size = 27;
|
||||||
} else if (bdb->version < 195) {
|
} else if (bdb->version < 195) {
|
||||||
BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33);
|
BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33);
|
||||||
|
@ -1545,6 +1550,45 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* intel_bios_is_port_present - is the specified digital port present
|
||||||
|
* @dev_priv: i915 device instance
|
||||||
|
* @port: port to check
|
||||||
|
*
|
||||||
|
* Return true if the device in %port is present.
|
||||||
|
*/
|
||||||
|
bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port)
|
||||||
|
{
|
||||||
|
static const struct {
|
||||||
|
u16 dp, hdmi;
|
||||||
|
} port_mapping[] = {
|
||||||
|
[PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, },
|
||||||
|
[PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
|
||||||
|
[PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
|
||||||
|
[PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
|
||||||
|
};
|
||||||
|
int i;
|
||||||
|
|
||||||
|
/* FIXME maybe deal with port A as well? */
|
||||||
|
if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (!dev_priv->vbt.child_dev_num)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
|
||||||
|
const union child_device_config *p_child =
|
||||||
|
&dev_priv->vbt.child_dev[i];
|
||||||
|
if ((p_child->common.dvo_port == port_mapping[port].dp ||
|
||||||
|
p_child->common.dvo_port == port_mapping[port].hdmi) &&
|
||||||
|
(p_child->common.device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING |
|
||||||
|
DEVICE_TYPE_DISPLAYPORT_OUTPUT)))
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* intel_bios_is_port_edp - is the device in given port eDP
|
* intel_bios_is_port_edp - is the device in given port eDP
|
||||||
* @dev_priv: i915 device instance
|
* @dev_priv: i915 device instance
|
||||||
|
|
|
@ -8275,12 +8275,14 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
struct intel_encoder *encoder;
|
struct intel_encoder *encoder;
|
||||||
|
int i;
|
||||||
u32 val, final;
|
u32 val, final;
|
||||||
bool has_lvds = false;
|
bool has_lvds = false;
|
||||||
bool has_cpu_edp = false;
|
bool has_cpu_edp = false;
|
||||||
bool has_panel = false;
|
bool has_panel = false;
|
||||||
bool has_ck505 = false;
|
bool has_ck505 = false;
|
||||||
bool can_ssc = false;
|
bool can_ssc = false;
|
||||||
|
bool using_ssc_source = false;
|
||||||
|
|
||||||
/* We need to take the global config into account */
|
/* We need to take the global config into account */
|
||||||
for_each_intel_encoder(dev, encoder) {
|
for_each_intel_encoder(dev, encoder) {
|
||||||
|
@ -8307,8 +8309,22 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
|
||||||
can_ssc = true;
|
can_ssc = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
|
/* Check if any DPLLs are using the SSC source */
|
||||||
has_panel, has_lvds, has_ck505);
|
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
|
||||||
|
u32 temp = I915_READ(PCH_DPLL(i));
|
||||||
|
|
||||||
|
if (!(temp & DPLL_VCO_ENABLE))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if ((temp & PLL_REF_INPUT_MASK) ==
|
||||||
|
PLLB_REF_INPUT_SPREADSPECTRUMIN) {
|
||||||
|
using_ssc_source = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
|
||||||
|
has_panel, has_lvds, has_ck505, using_ssc_source);
|
||||||
|
|
||||||
/* Ironlake: try to setup display ref clock before DPLL
|
/* Ironlake: try to setup display ref clock before DPLL
|
||||||
* enabling. This is only under driver's control after
|
* enabling. This is only under driver's control after
|
||||||
|
@ -8345,9 +8361,9 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
|
||||||
final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
|
final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
|
||||||
} else
|
} else
|
||||||
final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
|
final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
|
||||||
} else {
|
} else if (using_ssc_source) {
|
||||||
final |= DREF_SSC_SOURCE_DISABLE;
|
final |= DREF_SSC_SOURCE_ENABLE;
|
||||||
final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
|
final |= DREF_SSC1_ENABLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (final == val)
|
if (final == val)
|
||||||
|
@ -8393,7 +8409,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
|
||||||
POSTING_READ(PCH_DREF_CONTROL);
|
POSTING_READ(PCH_DREF_CONTROL);
|
||||||
udelay(200);
|
udelay(200);
|
||||||
} else {
|
} else {
|
||||||
DRM_DEBUG_KMS("Disabling SSC entirely\n");
|
DRM_DEBUG_KMS("Disabling CPU source output\n");
|
||||||
|
|
||||||
val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
|
val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
|
||||||
|
|
||||||
|
@ -8404,6 +8420,9 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
|
||||||
POSTING_READ(PCH_DREF_CONTROL);
|
POSTING_READ(PCH_DREF_CONTROL);
|
||||||
udelay(200);
|
udelay(200);
|
||||||
|
|
||||||
|
if (!using_ssc_source) {
|
||||||
|
DRM_DEBUG_KMS("Disabling SSC source\n");
|
||||||
|
|
||||||
/* Turn off the SSC source */
|
/* Turn off the SSC source */
|
||||||
val &= ~DREF_SSC_SOURCE_MASK;
|
val &= ~DREF_SSC_SOURCE_MASK;
|
||||||
val |= DREF_SSC_SOURCE_DISABLE;
|
val |= DREF_SSC_SOURCE_DISABLE;
|
||||||
|
@ -8415,6 +8434,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
|
||||||
POSTING_READ(PCH_DREF_CONTROL);
|
POSTING_READ(PCH_DREF_CONTROL);
|
||||||
udelay(200);
|
udelay(200);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
BUG_ON(val != final);
|
BUG_ON(val != final);
|
||||||
}
|
}
|
||||||
|
@ -14554,6 +14574,8 @@ static void intel_setup_outputs(struct drm_device *dev)
|
||||||
if (I915_READ(PCH_DP_D) & DP_DETECTED)
|
if (I915_READ(PCH_DP_D) & DP_DETECTED)
|
||||||
intel_dp_init(dev, PCH_DP_D, PORT_D);
|
intel_dp_init(dev, PCH_DP_D, PORT_D);
|
||||||
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
|
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
|
||||||
|
bool has_edp, has_port;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The DP_DETECTED bit is the latched state of the DDC
|
* The DP_DETECTED bit is the latched state of the DDC
|
||||||
* SDA pin at boot. However since eDP doesn't require DDC
|
* SDA pin at boot. However since eDP doesn't require DDC
|
||||||
|
@ -14562,27 +14584,37 @@ static void intel_setup_outputs(struct drm_device *dev)
|
||||||
* Thus we can't rely on the DP_DETECTED bit alone to detect
|
* Thus we can't rely on the DP_DETECTED bit alone to detect
|
||||||
* eDP ports. Consult the VBT as well as DP_DETECTED to
|
* eDP ports. Consult the VBT as well as DP_DETECTED to
|
||||||
* detect eDP ports.
|
* detect eDP ports.
|
||||||
|
*
|
||||||
|
* Sadly the straps seem to be missing sometimes even for HDMI
|
||||||
|
* ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
|
||||||
|
* and VBT for the presence of the port. Additionally we can't
|
||||||
|
* trust the port type the VBT declares as we've seen at least
|
||||||
|
* HDMI ports that the VBT claim are DP or eDP.
|
||||||
*/
|
*/
|
||||||
if (I915_READ(VLV_HDMIB) & SDVO_DETECTED &&
|
has_edp = intel_dp_is_edp(dev, PORT_B);
|
||||||
!intel_dp_is_edp(dev, PORT_B))
|
has_port = intel_bios_is_port_present(dev_priv, PORT_B);
|
||||||
|
if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
|
||||||
|
has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
|
||||||
|
if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
|
||||||
intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
|
intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
|
||||||
if (I915_READ(VLV_DP_B) & DP_DETECTED ||
|
|
||||||
intel_dp_is_edp(dev, PORT_B))
|
|
||||||
intel_dp_init(dev, VLV_DP_B, PORT_B);
|
|
||||||
|
|
||||||
if (I915_READ(VLV_HDMIC) & SDVO_DETECTED &&
|
has_edp = intel_dp_is_edp(dev, PORT_C);
|
||||||
!intel_dp_is_edp(dev, PORT_C))
|
has_port = intel_bios_is_port_present(dev_priv, PORT_C);
|
||||||
|
if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
|
||||||
|
has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
|
||||||
|
if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
|
||||||
intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
|
intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
|
||||||
if (I915_READ(VLV_DP_C) & DP_DETECTED ||
|
|
||||||
intel_dp_is_edp(dev, PORT_C))
|
|
||||||
intel_dp_init(dev, VLV_DP_C, PORT_C);
|
|
||||||
|
|
||||||
if (IS_CHERRYVIEW(dev)) {
|
if (IS_CHERRYVIEW(dev)) {
|
||||||
/* eDP not supported on port D, so don't check VBT */
|
/*
|
||||||
if (I915_READ(CHV_HDMID) & SDVO_DETECTED)
|
* eDP not supported on port D,
|
||||||
intel_hdmi_init(dev, CHV_HDMID, PORT_D);
|
* so no need to worry about it
|
||||||
if (I915_READ(CHV_DP_D) & DP_DETECTED)
|
*/
|
||||||
|
has_port = intel_bios_is_port_present(dev_priv, PORT_D);
|
||||||
|
if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
|
||||||
intel_dp_init(dev, CHV_DP_D, PORT_D);
|
intel_dp_init(dev, CHV_DP_D, PORT_D);
|
||||||
|
if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
|
||||||
|
intel_hdmi_init(dev, CHV_HDMID, PORT_D);
|
||||||
}
|
}
|
||||||
|
|
||||||
intel_dsi_init(dev);
|
intel_dsi_init(dev);
|
||||||
|
|
|
@ -5725,8 +5725,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
|
||||||
if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
|
if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
|
||||||
fixed_mode = drm_mode_duplicate(dev,
|
fixed_mode = drm_mode_duplicate(dev,
|
||||||
dev_priv->vbt.lfp_lvds_vbt_mode);
|
dev_priv->vbt.lfp_lvds_vbt_mode);
|
||||||
if (fixed_mode)
|
if (fixed_mode) {
|
||||||
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
|
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
|
||||||
|
connector->display_info.width_mm = fixed_mode->width_mm;
|
||||||
|
connector->display_info.height_mm = fixed_mode->height_mm;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
mutex_unlock(&dev->mode_config.mutex);
|
mutex_unlock(&dev->mode_config.mutex);
|
||||||
|
|
||||||
|
@ -5923,9 +5926,9 @@ fail:
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
bool intel_dp_init(struct drm_device *dev,
|
||||||
intel_dp_init(struct drm_device *dev,
|
i915_reg_t output_reg,
|
||||||
i915_reg_t output_reg, enum port port)
|
enum port port)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
struct intel_digital_port *intel_dig_port;
|
struct intel_digital_port *intel_dig_port;
|
||||||
|
@ -5935,7 +5938,7 @@ intel_dp_init(struct drm_device *dev,
|
||||||
|
|
||||||
intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
|
intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
|
||||||
if (!intel_dig_port)
|
if (!intel_dig_port)
|
||||||
return;
|
return false;
|
||||||
|
|
||||||
intel_connector = intel_connector_alloc();
|
intel_connector = intel_connector_alloc();
|
||||||
if (!intel_connector)
|
if (!intel_connector)
|
||||||
|
@ -5992,7 +5995,7 @@ intel_dp_init(struct drm_device *dev,
|
||||||
if (!intel_dp_init_connector(intel_dig_port, intel_connector))
|
if (!intel_dp_init_connector(intel_dig_port, intel_connector))
|
||||||
goto err_init_connector;
|
goto err_init_connector;
|
||||||
|
|
||||||
return;
|
return true;
|
||||||
|
|
||||||
err_init_connector:
|
err_init_connector:
|
||||||
drm_encoder_cleanup(encoder);
|
drm_encoder_cleanup(encoder);
|
||||||
|
@ -6000,8 +6003,7 @@ err_encoder_init:
|
||||||
kfree(intel_connector);
|
kfree(intel_connector);
|
||||||
err_connector_alloc:
|
err_connector_alloc:
|
||||||
kfree(intel_dig_port);
|
kfree(intel_dig_port);
|
||||||
|
return false;
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void intel_dp_mst_suspend(struct drm_device *dev)
|
void intel_dp_mst_suspend(struct drm_device *dev)
|
||||||
|
|
|
@ -366,6 +366,9 @@ ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
|
||||||
DPLL_ID_PCH_PLL_B);
|
DPLL_ID_PCH_PLL_B);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!pll)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
/* reference the pll */
|
/* reference the pll */
|
||||||
intel_reference_shared_dpll(pll, crtc_state);
|
intel_reference_shared_dpll(pll, crtc_state);
|
||||||
|
|
||||||
|
|
|
@ -1284,7 +1284,7 @@ void intel_csr_ucode_suspend(struct drm_i915_private *);
|
||||||
void intel_csr_ucode_resume(struct drm_i915_private *);
|
void intel_csr_ucode_resume(struct drm_i915_private *);
|
||||||
|
|
||||||
/* intel_dp.c */
|
/* intel_dp.c */
|
||||||
void intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port);
|
bool intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port);
|
||||||
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
|
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
|
||||||
struct intel_connector *intel_connector);
|
struct intel_connector *intel_connector);
|
||||||
void intel_dp_set_link_params(struct intel_dp *intel_dp,
|
void intel_dp_set_link_params(struct intel_dp *intel_dp,
|
||||||
|
|
|
@ -1545,6 +1545,9 @@ void intel_dsi_init(struct drm_device *dev)
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
connector->display_info.width_mm = fixed_mode->width_mm;
|
||||||
|
connector->display_info.height_mm = fixed_mode->height_mm;
|
||||||
|
|
||||||
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
|
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
|
||||||
|
|
||||||
intel_dsi_add_properties(intel_connector);
|
intel_dsi_add_properties(intel_connector);
|
||||||
|
|
|
@ -2142,6 +2142,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
||||||
enum port port = intel_dig_port->port;
|
enum port port = intel_dig_port->port;
|
||||||
uint8_t alternate_ddc_pin;
|
uint8_t alternate_ddc_pin;
|
||||||
|
|
||||||
|
DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
|
||||||
|
port_name(port));
|
||||||
|
|
||||||
if (WARN(intel_dig_port->max_lanes < 4,
|
if (WARN(intel_dig_port->max_lanes < 4,
|
||||||
"Not enough lanes (%d) for HDMI on port %c\n",
|
"Not enough lanes (%d) for HDMI on port %c\n",
|
||||||
intel_dig_port->max_lanes, port_name(port)))
|
intel_dig_port->max_lanes, port_name(port)))
|
||||||
|
|
|
@ -1082,6 +1082,8 @@ void intel_lvds_init(struct drm_device *dev)
|
||||||
fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
|
fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
|
||||||
if (fixed_mode) {
|
if (fixed_mode) {
|
||||||
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
|
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
|
||||||
|
connector->display_info.width_mm = fixed_mode->width_mm;
|
||||||
|
connector->display_info.height_mm = fixed_mode->height_mm;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -403,9 +403,10 @@ struct lvds_dvo_timing {
|
||||||
u8 vsync_off:4;
|
u8 vsync_off:4;
|
||||||
u8 rsvd0:6;
|
u8 rsvd0:6;
|
||||||
u8 hsync_off_hi:2;
|
u8 hsync_off_hi:2;
|
||||||
u8 h_image;
|
u8 himage_lo;
|
||||||
u8 v_image;
|
u8 vimage_lo;
|
||||||
u8 max_hv;
|
u8 vimage_hi:4;
|
||||||
|
u8 himage_hi:4;
|
||||||
u8 h_border;
|
u8 h_border;
|
||||||
u8 v_border;
|
u8 v_border;
|
||||||
u8 rsvd1:3;
|
u8 rsvd1:3;
|
||||||
|
|
|
@ -1614,7 +1614,7 @@ nvkm_device_pci_func = {
|
||||||
.fini = nvkm_device_pci_fini,
|
.fini = nvkm_device_pci_fini,
|
||||||
.resource_addr = nvkm_device_pci_resource_addr,
|
.resource_addr = nvkm_device_pci_resource_addr,
|
||||||
.resource_size = nvkm_device_pci_resource_size,
|
.resource_size = nvkm_device_pci_resource_size,
|
||||||
.cpu_coherent = !IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_ARM64),
|
.cpu_coherent = !IS_ENABLED(CONFIG_ARM),
|
||||||
};
|
};
|
||||||
|
|
||||||
int
|
int
|
||||||
|
|
|
@ -276,6 +276,8 @@ nvkm_iccsense_oneinit(struct nvkm_subdev *subdev)
|
||||||
struct pwr_rail_t *r = &stbl.rail[i];
|
struct pwr_rail_t *r = &stbl.rail[i];
|
||||||
struct nvkm_iccsense_rail *rail;
|
struct nvkm_iccsense_rail *rail;
|
||||||
struct nvkm_iccsense_sensor *sensor;
|
struct nvkm_iccsense_sensor *sensor;
|
||||||
|
int (*read)(struct nvkm_iccsense *,
|
||||||
|
struct nvkm_iccsense_rail *);
|
||||||
|
|
||||||
if (!r->mode || r->resistor_mohm == 0)
|
if (!r->mode || r->resistor_mohm == 0)
|
||||||
continue;
|
continue;
|
||||||
|
@ -284,31 +286,31 @@ nvkm_iccsense_oneinit(struct nvkm_subdev *subdev)
|
||||||
if (!sensor)
|
if (!sensor)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
rail = kmalloc(sizeof(*rail), GFP_KERNEL);
|
|
||||||
if (!rail)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
switch (sensor->type) {
|
switch (sensor->type) {
|
||||||
case NVBIOS_EXTDEV_INA209:
|
case NVBIOS_EXTDEV_INA209:
|
||||||
if (r->rail != 0)
|
if (r->rail != 0)
|
||||||
continue;
|
continue;
|
||||||
rail->read = nvkm_iccsense_ina209_read;
|
read = nvkm_iccsense_ina209_read;
|
||||||
break;
|
break;
|
||||||
case NVBIOS_EXTDEV_INA219:
|
case NVBIOS_EXTDEV_INA219:
|
||||||
if (r->rail != 0)
|
if (r->rail != 0)
|
||||||
continue;
|
continue;
|
||||||
rail->read = nvkm_iccsense_ina219_read;
|
read = nvkm_iccsense_ina219_read;
|
||||||
break;
|
break;
|
||||||
case NVBIOS_EXTDEV_INA3221:
|
case NVBIOS_EXTDEV_INA3221:
|
||||||
if (r->rail >= 3)
|
if (r->rail >= 3)
|
||||||
continue;
|
continue;
|
||||||
rail->read = nvkm_iccsense_ina3221_read;
|
read = nvkm_iccsense_ina3221_read;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rail = kmalloc(sizeof(*rail), GFP_KERNEL);
|
||||||
|
if (!rail)
|
||||||
|
return -ENOMEM;
|
||||||
sensor->rail_mask |= 1 << r->rail;
|
sensor->rail_mask |= 1 << r->rail;
|
||||||
|
rail->read = read;
|
||||||
rail->sensor = sensor;
|
rail->sensor = sensor;
|
||||||
rail->idx = r->rail;
|
rail->idx = r->rail;
|
||||||
rail->mohm = r->resistor_mohm;
|
rail->mohm = r->resistor_mohm;
|
||||||
|
|
|
@ -589,7 +589,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
|
||||||
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev))
|
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev))
|
||||||
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
|
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
|
||||||
/* use frac fb div on RS780/RS880 */
|
/* use frac fb div on RS780/RS880 */
|
||||||
if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
|
if (((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
|
||||||
|
&& !radeon_crtc->ss_enabled)
|
||||||
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
|
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
|
||||||
if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
|
if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
|
||||||
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
|
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
|
||||||
|
@ -626,7 +627,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
|
||||||
if (radeon_crtc->ss.refdiv) {
|
if (radeon_crtc->ss.refdiv) {
|
||||||
radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
|
radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
|
||||||
radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv;
|
radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv;
|
||||||
if (ASIC_IS_AVIVO(rdev))
|
if (rdev->family >= CHIP_RV770)
|
||||||
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
|
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -630,6 +630,23 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
|
||||||
/*
|
/*
|
||||||
* GPU helpers function.
|
* GPU helpers function.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* radeon_device_is_virtual - check if we are running is a virtual environment
|
||||||
|
*
|
||||||
|
* Check if the asic has been passed through to a VM (all asics).
|
||||||
|
* Used at driver startup.
|
||||||
|
* Returns true if virtual or false if not.
|
||||||
|
*/
|
||||||
|
static bool radeon_device_is_virtual(void)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_X86
|
||||||
|
return boot_cpu_has(X86_FEATURE_HYPERVISOR);
|
||||||
|
#else
|
||||||
|
return false;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* radeon_card_posted - check if the hw has already been initialized
|
* radeon_card_posted - check if the hw has already been initialized
|
||||||
*
|
*
|
||||||
|
@ -643,6 +660,10 @@ bool radeon_card_posted(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
uint32_t reg;
|
uint32_t reg;
|
||||||
|
|
||||||
|
/* for pass through, always force asic_init */
|
||||||
|
if (radeon_device_is_virtual())
|
||||||
|
return false;
|
||||||
|
|
||||||
/* required for EFI mode on macbook2,1 which uses an r5xx asic */
|
/* required for EFI mode on macbook2,1 which uses an r5xx asic */
|
||||||
if (efi_enabled(EFI_BOOT) &&
|
if (efi_enabled(EFI_BOOT) &&
|
||||||
(rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
|
(rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
|
||||||
|
@ -1631,7 +1652,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
|
||||||
radeon_agp_suspend(rdev);
|
radeon_agp_suspend(rdev);
|
||||||
|
|
||||||
pci_save_state(dev->pdev);
|
pci_save_state(dev->pdev);
|
||||||
if (freeze && rdev->family >= CHIP_R600) {
|
if (freeze && rdev->family >= CHIP_CEDAR) {
|
||||||
rdev->asic->asic_reset(rdev, true);
|
rdev->asic->asic_reset(rdev, true);
|
||||||
pci_restore_state(dev->pdev);
|
pci_restore_state(dev->pdev);
|
||||||
} else if (suspend) {
|
} else if (suspend) {
|
||||||
|
|
Loading…
Reference in New Issue