Merge branch 'drm-next-5.2' of git://people.freedesktop.org/~agd5f/linux into drm-next
- Fix a crash on gpu reset at driver load time - ATPX hotplug fix for when the dGPU is powered off - PLL fix for r5xx asics - SR-IOV fixes Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190509230017.3566-1-alexander.deucher@amd.com
This commit is contained in:
commit
c01ad0fe1d
|
@ -464,8 +464,7 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
|
|||
}
|
||||
}
|
||||
if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
|
||||
if ((adev->flags & AMD_IS_PX) &&
|
||||
amdgpu_atpx_dgpu_req_power_for_displays()) {
|
||||
if (adev->flags & AMD_IS_PX) {
|
||||
pm_runtime_get_sync(adev->ddev->dev);
|
||||
/* Just fire off a uevent and let userspace tell us what to do */
|
||||
drm_helper_hpd_irq_event(adev->ddev);
|
||||
|
|
|
@ -342,6 +342,16 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
|
|||
if (current_level == level)
|
||||
return count;
|
||||
|
||||
/* profile_exit setting is valid only when current mode is in profile mode */
|
||||
if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
|
||||
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
|
||||
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
|
||||
AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
|
||||
(level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
|
||||
pr_err("Currently not in any profile mode!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (is_support_sw_smu(adev)) {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
if (adev->pm.dpm.thermal_active) {
|
||||
|
|
|
@ -38,18 +38,10 @@ static void psp_set_funcs(struct amdgpu_device *adev);
|
|||
static int psp_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct psp_context *psp = &adev->psp;
|
||||
|
||||
psp_set_funcs(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int psp_sw_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct psp_context *psp = &adev->psp;
|
||||
int ret;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_VEGA10:
|
||||
case CHIP_VEGA12:
|
||||
|
@ -67,6 +59,15 @@ static int psp_sw_init(void *handle)
|
|||
|
||||
psp->adev = adev;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int psp_sw_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct psp_context *psp = &adev->psp;
|
||||
int ret;
|
||||
|
||||
ret = psp_init_microcode(psp);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to load psp firmware!\n");
|
||||
|
|
|
@ -2756,6 +2756,37 @@ error_free_sched_entity:
|
|||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vm_check_clean_reserved - check if a VM is clean
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @vm: the VM to check
|
||||
*
|
||||
* check all entries of the root PD, if any subsequent PDs are allocated,
|
||||
* it means there are page table creating and filling, and is no a clean
|
||||
* VM
|
||||
*
|
||||
* Returns:
|
||||
* 0 if this VM is clean
|
||||
*/
|
||||
static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm)
|
||||
{
|
||||
enum amdgpu_vm_level root = adev->vm_manager.root_level;
|
||||
unsigned int entries = amdgpu_vm_num_entries(adev, root);
|
||||
unsigned int i = 0;
|
||||
|
||||
if (!(vm->root.entries))
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < entries; i++) {
|
||||
if (vm->root.entries[i].base.bo)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
|
||||
*
|
||||
|
@ -2786,10 +2817,9 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns
|
|||
return r;
|
||||
|
||||
/* Sanity checks */
|
||||
if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) {
|
||||
r = -EINVAL;
|
||||
r = amdgpu_vm_check_clean_reserved(adev, vm);
|
||||
if (r)
|
||||
goto unreserve_bo;
|
||||
}
|
||||
|
||||
if (pasid) {
|
||||
unsigned long flags;
|
||||
|
|
|
@ -372,6 +372,9 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
|
|||
if (amdgpu_sriov_runtime(adev))
|
||||
schedule_work(&adev->virt.flr_work);
|
||||
break;
|
||||
case IDH_QUERY_ALIVE:
|
||||
xgpu_ai_mailbox_send_ack(adev);
|
||||
break;
|
||||
/* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
|
||||
* it byfar since that polling thread will handle it,
|
||||
* other msg like flr complete is not handled here.
|
||||
|
|
|
@ -49,6 +49,7 @@ enum idh_event {
|
|||
IDH_FLR_NOTIFICATION_CMPL,
|
||||
IDH_SUCCESS,
|
||||
IDH_FAIL,
|
||||
IDH_QUERY_ALIVE,
|
||||
IDH_EVENT_MAX
|
||||
};
|
||||
|
||||
|
|
|
@ -787,10 +787,13 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
|
|||
0xFFFFFFFF, 0x00000004);
|
||||
/* mc resume*/
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
|
||||
lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
|
||||
upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
|
||||
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
|
||||
adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo);
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
|
||||
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
|
||||
adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi);
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0);
|
||||
offset = 0;
|
||||
} else {
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
|
||||
|
@ -798,10 +801,11 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
|
|||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
|
||||
upper_32_bits(adev->uvd.inst[i].gpu_addr));
|
||||
offset = size;
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
|
||||
AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
|
||||
|
||||
}
|
||||
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
|
||||
AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size);
|
||||
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
|
||||
|
|
|
@ -244,13 +244,18 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
|
|||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL1), 0);
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0);
|
||||
|
||||
offset = AMDGPU_VCE_FIRMWARE_OFFSET;
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
||||
uint32_t low = adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_lo;
|
||||
uint32_t hi = adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_hi;
|
||||
uint64_t tmr_mc_addr = (uint64_t)(hi) << 32 | low;
|
||||
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
|
||||
mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
|
||||
adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8);
|
||||
mmVCE_LMI_VCPU_CACHE_40BIT_BAR0), tmr_mc_addr >> 8);
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
|
||||
mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
|
||||
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 40) & 0xff);
|
||||
(tmr_mc_addr >> 40) & 0xff);
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), 0);
|
||||
} else {
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
|
||||
mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
|
||||
|
@ -258,6 +263,9 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
|
|||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
|
||||
mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
|
||||
(adev->vce.gpu_addr >> 40) & 0xff);
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0),
|
||||
offset & ~0x0f000000);
|
||||
|
||||
}
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
|
||||
mmVCE_LMI_VCPU_CACHE_40BIT_BAR1),
|
||||
|
@ -272,10 +280,7 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
|
|||
mmVCE_LMI_VCPU_CACHE_64BIT_BAR2),
|
||||
(adev->vce.gpu_addr >> 40) & 0xff);
|
||||
|
||||
offset = AMDGPU_VCE_FIRMWARE_OFFSET;
|
||||
size = VCE_V4_0_FW_SIZE;
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0),
|
||||
offset & ~0x0f000000);
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size);
|
||||
|
||||
offset = (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) ? offset + size : 0;
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
#include "soc15_common.h"
|
||||
#include "vega10_ih.h"
|
||||
|
||||
|
||||
#define MAX_REARM_RETRY 10
|
||||
|
||||
static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
|
||||
|
||||
|
@ -381,6 +381,38 @@ static void vega10_ih_decode_iv(struct amdgpu_device *adev,
|
|||
ih->rptr += 32;
|
||||
}
|
||||
|
||||
/**
|
||||
* vega10_ih_irq_rearm - rearm IRQ if lost
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
*/
|
||||
static void vega10_ih_irq_rearm(struct amdgpu_device *adev,
|
||||
struct amdgpu_ih_ring *ih)
|
||||
{
|
||||
uint32_t reg_rptr = 0;
|
||||
uint32_t v = 0;
|
||||
uint32_t i = 0;
|
||||
|
||||
if (ih == &adev->irq.ih)
|
||||
reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
|
||||
else if (ih == &adev->irq.ih1)
|
||||
reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
|
||||
else if (ih == &adev->irq.ih2)
|
||||
reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
|
||||
else
|
||||
return;
|
||||
|
||||
/* Rearm IRQ / re-wwrite doorbell if doorbell write is lost */
|
||||
for (i = 0; i < MAX_REARM_RETRY; i++) {
|
||||
v = RREG32_NO_KIQ(reg_rptr);
|
||||
if ((v < ih->ring_size) && (v != ih->rptr))
|
||||
WDOORBELL32(ih->doorbell_index, ih->rptr);
|
||||
else
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* vega10_ih_set_rptr - set the IH ring buffer rptr
|
||||
*
|
||||
|
@ -395,6 +427,9 @@ static void vega10_ih_set_rptr(struct amdgpu_device *adev,
|
|||
/* XXX check if swapping is necessary on BE */
|
||||
*ih->rptr_cpu = ih->rptr;
|
||||
WDOORBELL32(ih->doorbell_index, ih->rptr);
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
vega10_ih_irq_rearm(adev, ih);
|
||||
} else if (ih == &adev->irq.ih) {
|
||||
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
|
||||
} else if (ih == &adev->irq.ih1) {
|
||||
|
|
|
@ -5242,7 +5242,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
|||
struct drm_crtc *pcrtc,
|
||||
bool wait_for_vblank)
|
||||
{
|
||||
uint32_t i, r;
|
||||
uint32_t i;
|
||||
uint64_t timestamp_ns;
|
||||
struct drm_plane *plane;
|
||||
struct drm_plane_state *old_plane_state, *new_plane_state;
|
||||
|
@ -5253,6 +5253,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
|||
struct dm_crtc_state *dm_old_crtc_state =
|
||||
to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
|
||||
int planes_count = 0, vpos, hpos;
|
||||
long r;
|
||||
unsigned long flags;
|
||||
struct amdgpu_bo *abo;
|
||||
uint64_t tiling_flags;
|
||||
|
|
|
@ -922,12 +922,12 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
|
|||
ref_div_max = max(min(100 / post_div, ref_div_max), 1u);
|
||||
|
||||
/* get matching reference and feedback divider */
|
||||
*ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
|
||||
*ref_div = min(max(den/post_div, 1u), ref_div_max);
|
||||
*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
|
||||
|
||||
/* limit fb divider to its maximum */
|
||||
if (*fb_div > fb_div_max) {
|
||||
*ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div);
|
||||
*ref_div = (*ref_div * fb_div_max)/(*fb_div);
|
||||
*fb_div = fb_div_max;
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue