Merge branch 'drm-next-4.20' of git://people.freedesktop.org/~agd5f/linux into drm-next
Add a new list.h helper for doing bulk updates. Used by ttm. - Fixes for display underflow on VI APUs at 4K with UVD running - Endian fixes for powerplay on vega - DC fixes for interlaced video - Vega20 powerplay fixes - RV/RV2/PCO powerplay fixes - Fix for spurious ACPI events on HG laptops - Fix a memory leak in DC on driver unload - Fixes for manual fan control mode switching - Suspend/resume robustness fixes - Fix display handling on RV2 - VCN fixes for DPG on PCO - Misc code cleanups and warning fixes Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20181011014739.3117-1-alexander.deucher@amd.com
This commit is contained in:
commit
ca4b869240
|
@ -81,6 +81,23 @@
|
|||
#include "amdgpu_bo_list.h"
|
||||
#include "amdgpu_gem.h"
|
||||
|
||||
#define MAX_GPU_INSTANCE 16
|
||||
|
||||
struct amdgpu_gpu_instance
|
||||
{
|
||||
struct amdgpu_device *adev;
|
||||
int mgpu_fan_enabled;
|
||||
};
|
||||
|
||||
struct amdgpu_mgpu_info
|
||||
{
|
||||
struct amdgpu_gpu_instance gpu_ins[MAX_GPU_INSTANCE];
|
||||
struct mutex mutex;
|
||||
uint32_t num_gpu;
|
||||
uint32_t num_dgpu;
|
||||
uint32_t num_apu;
|
||||
};
|
||||
|
||||
/*
|
||||
* Modules parameters.
|
||||
*/
|
||||
|
@ -134,6 +151,7 @@ extern int amdgpu_compute_multipipe;
|
|||
extern int amdgpu_gpu_recovery;
|
||||
extern int amdgpu_emu_mode;
|
||||
extern uint amdgpu_smu_memory_pool_size;
|
||||
extern struct amdgpu_mgpu_info mgpu_info;
|
||||
|
||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||
extern int amdgpu_si_support;
|
||||
|
@ -598,31 +616,6 @@ void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
|
|||
*/
|
||||
void amdgpu_test_moves(struct amdgpu_device *adev);
|
||||
|
||||
|
||||
/*
|
||||
* amdgpu smumgr functions
|
||||
*/
|
||||
struct amdgpu_smumgr_funcs {
|
||||
int (*check_fw_load_finish)(struct amdgpu_device *adev, uint32_t fwtype);
|
||||
int (*request_smu_load_fw)(struct amdgpu_device *adev);
|
||||
int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype);
|
||||
};
|
||||
|
||||
/*
|
||||
* amdgpu smumgr
|
||||
*/
|
||||
struct amdgpu_smumgr {
|
||||
struct amdgpu_bo *toc_buf;
|
||||
struct amdgpu_bo *smu_buf;
|
||||
/* asic priv smu data */
|
||||
void *priv;
|
||||
spinlock_t smu_lock;
|
||||
/* smumgr functions */
|
||||
const struct amdgpu_smumgr_funcs *smumgr_funcs;
|
||||
/* ucode loading complete flag */
|
||||
uint32_t fw_flags;
|
||||
};
|
||||
|
||||
/*
|
||||
* ASIC specific register table accessible by UMD
|
||||
*/
|
||||
|
@ -958,9 +951,6 @@ struct amdgpu_device {
|
|||
u32 cg_flags;
|
||||
u32 pg_flags;
|
||||
|
||||
/* amdgpu smumgr */
|
||||
struct amdgpu_smumgr smu;
|
||||
|
||||
/* gfx */
|
||||
struct amdgpu_gfx gfx;
|
||||
|
||||
|
@ -1025,6 +1015,9 @@ struct amdgpu_device {
|
|||
bool has_hw_reset;
|
||||
u8 reset_magic[AMDGPU_RESET_MAGIC_NUM];
|
||||
|
||||
/* s3/s4 mask */
|
||||
bool in_suspend;
|
||||
|
||||
/* record last mm index being written through WREG32*/
|
||||
unsigned long last_mm_index;
|
||||
bool in_gpu_reset;
|
||||
|
|
|
@ -359,7 +359,9 @@ out:
|
|||
*
|
||||
* Checks the acpi event and if it matches an atif event,
|
||||
* handles it.
|
||||
* Returns NOTIFY code
|
||||
*
|
||||
* Returns:
|
||||
* NOTIFY_BAD or NOTIFY_DONE, depending on the event.
|
||||
*/
|
||||
static int amdgpu_atif_handler(struct amdgpu_device *adev,
|
||||
struct acpi_bus_event *event)
|
||||
|
@ -373,11 +375,16 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
|
|||
if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
/* Is this actually our event? */
|
||||
if (!atif ||
|
||||
!atif->notification_cfg.enabled ||
|
||||
event->type != atif->notification_cfg.command_code)
|
||||
/* Not our event */
|
||||
return NOTIFY_DONE;
|
||||
event->type != atif->notification_cfg.command_code) {
|
||||
/* These events will generate keypresses otherwise */
|
||||
if (event->type == ACPI_VIDEO_NOTIFY_PROBE)
|
||||
return NOTIFY_BAD;
|
||||
else
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
if (atif->functions.sbios_requests) {
|
||||
struct atif_sbios_requests req;
|
||||
|
@ -386,7 +393,7 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
|
|||
count = amdgpu_atif_get_sbios_requests(atif, &req);
|
||||
|
||||
if (count <= 0)
|
||||
return NOTIFY_DONE;
|
||||
return NOTIFY_BAD;
|
||||
|
||||
DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
|
||||
|
||||
|
|
|
@ -1525,6 +1525,92 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, r;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_blocks[i].status.sw)
|
||||
continue;
|
||||
if (adev->ip_blocks[i].status.hw)
|
||||
continue;
|
||||
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
|
||||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
|
||||
r = adev->ip_blocks[i].version->funcs->hw_init(adev);
|
||||
if (r) {
|
||||
DRM_ERROR("hw_init of IP block <%s> failed %d\n",
|
||||
adev->ip_blocks[i].version->funcs->name, r);
|
||||
return r;
|
||||
}
|
||||
adev->ip_blocks[i].status.hw = true;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, r;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_blocks[i].status.sw)
|
||||
continue;
|
||||
if (adev->ip_blocks[i].status.hw)
|
||||
continue;
|
||||
r = adev->ip_blocks[i].version->funcs->hw_init(adev);
|
||||
if (r) {
|
||||
DRM_ERROR("hw_init of IP block <%s> failed %d\n",
|
||||
adev->ip_blocks[i].version->funcs->name, r);
|
||||
return r;
|
||||
}
|
||||
adev->ip_blocks[i].status.hw = true;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
|
||||
{
|
||||
int r = 0;
|
||||
int i;
|
||||
|
||||
if (adev->asic_type >= CHIP_VEGA10) {
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
|
||||
if (adev->in_gpu_reset || adev->in_suspend) {
|
||||
if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset)
|
||||
break; /* sriov gpu reset, psp need to do hw_init before IH because of hw limit */
|
||||
r = adev->ip_blocks[i].version->funcs->resume(adev);
|
||||
if (r) {
|
||||
DRM_ERROR("resume of IP block <%s> failed %d\n",
|
||||
adev->ip_blocks[i].version->funcs->name, r);
|
||||
return r;
|
||||
}
|
||||
} else {
|
||||
r = adev->ip_blocks[i].version->funcs->hw_init(adev);
|
||||
if (r) {
|
||||
DRM_ERROR("hw_init of IP block <%s> failed %d\n",
|
||||
adev->ip_blocks[i].version->funcs->name, r);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
adev->ip_blocks[i].status.hw = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (adev->powerplay.pp_funcs->load_firmware) {
|
||||
r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
|
||||
if (r) {
|
||||
pr_err("firmware loading failed\n");
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_device_ip_init - run init for hardware IPs
|
||||
*
|
||||
|
@ -1581,19 +1667,21 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_blocks[i].status.sw)
|
||||
continue;
|
||||
if (adev->ip_blocks[i].status.hw)
|
||||
continue;
|
||||
r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
|
||||
if (r) {
|
||||
DRM_ERROR("hw_init of IP block <%s> failed %d\n",
|
||||
adev->ip_blocks[i].version->funcs->name, r);
|
||||
return r;
|
||||
}
|
||||
adev->ip_blocks[i].status.hw = true;
|
||||
}
|
||||
r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_device_ip_hw_init_phase1(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_device_fw_loading(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_device_ip_hw_init_phase2(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
amdgpu_xgmi_add_device(adev);
|
||||
amdgpu_amdkfd_device_init(adev);
|
||||
|
@ -1656,7 +1744,7 @@ static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
|
|||
|
||||
for (j = 0; j < adev->num_ip_blocks; j++) {
|
||||
i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
if (!adev->ip_blocks[i].status.late_initialized)
|
||||
continue;
|
||||
/* skip CG for VCE/UVD, it's handled specially */
|
||||
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
|
||||
|
@ -1686,7 +1774,7 @@ static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_power
|
|||
|
||||
for (j = 0; j < adev->num_ip_blocks; j++) {
|
||||
i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
if (!adev->ip_blocks[i].status.late_initialized)
|
||||
continue;
|
||||
/* skip CG for VCE/UVD, it's handled specially */
|
||||
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
|
||||
|
@ -1723,7 +1811,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
|
|||
int i = 0, r;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
if (!adev->ip_blocks[i].status.hw)
|
||||
continue;
|
||||
if (adev->ip_blocks[i].version->funcs->late_init) {
|
||||
r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
|
||||
|
@ -1732,8 +1820,8 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
|
|||
adev->ip_blocks[i].version->funcs->name, r);
|
||||
return r;
|
||||
}
|
||||
adev->ip_blocks[i].status.late_initialized = true;
|
||||
}
|
||||
adev->ip_blocks[i].status.late_initialized = true;
|
||||
}
|
||||
|
||||
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
|
||||
|
@ -1803,6 +1891,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
|
|||
continue;
|
||||
|
||||
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
|
||||
amdgpu_ucode_free_bo(adev);
|
||||
amdgpu_free_static_csa(adev);
|
||||
amdgpu_device_wb_fini(adev);
|
||||
amdgpu_device_vram_scratch_fini(adev);
|
||||
|
@ -1833,6 +1922,43 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_device_enable_mgpu_fan_boost(void)
|
||||
{
|
||||
struct amdgpu_gpu_instance *gpu_ins;
|
||||
struct amdgpu_device *adev;
|
||||
int i, ret = 0;
|
||||
|
||||
mutex_lock(&mgpu_info.mutex);
|
||||
|
||||
/*
|
||||
* MGPU fan boost feature should be enabled
|
||||
* only when there are two or more dGPUs in
|
||||
* the system
|
||||
*/
|
||||
if (mgpu_info.num_dgpu < 2)
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < mgpu_info.num_dgpu; i++) {
|
||||
gpu_ins = &(mgpu_info.gpu_ins[i]);
|
||||
adev = gpu_ins->adev;
|
||||
if (!(adev->flags & AMD_IS_APU) &&
|
||||
!gpu_ins->mgpu_fan_enabled &&
|
||||
adev->powerplay.pp_funcs &&
|
||||
adev->powerplay.pp_funcs->enable_mgpu_fan_boost) {
|
||||
ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
gpu_ins->mgpu_fan_enabled = 1;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&mgpu_info.mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_device_ip_late_init_func_handler - work handler for ib test
|
||||
*
|
||||
|
@ -1847,6 +1973,10 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
|
|||
r = amdgpu_ib_ring_tests(adev);
|
||||
if (r)
|
||||
DRM_ERROR("ib ring test failed (%d).\n", r);
|
||||
|
||||
r = amdgpu_device_enable_mgpu_fan_boost();
|
||||
if (r)
|
||||
DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
|
||||
}
|
||||
|
||||
static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
|
||||
|
@ -2082,7 +2212,8 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
|
|||
continue;
|
||||
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
|
||||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
|
||||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)
|
||||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
|
||||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
|
||||
continue;
|
||||
r = adev->ip_blocks[i].version->funcs->resume(adev);
|
||||
if (r) {
|
||||
|
@ -2114,6 +2245,11 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
|
|||
r = amdgpu_device_ip_resume_phase1(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_device_fw_loading(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_device_ip_resume_phase2(adev);
|
||||
|
||||
return r;
|
||||
|
@ -2608,6 +2744,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
|
|||
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||
return 0;
|
||||
|
||||
adev->in_suspend = true;
|
||||
drm_kms_helper_poll_disable(dev);
|
||||
|
||||
if (fbcon)
|
||||
|
@ -2793,6 +2930,8 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
|
|||
#ifdef CONFIG_PM
|
||||
dev->dev->power.disable_depth--;
|
||||
#endif
|
||||
adev->in_suspend = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3061,6 +3200,10 @@ retry:
|
|||
if (r)
|
||||
goto out;
|
||||
|
||||
r = amdgpu_device_fw_loading(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_device_ip_resume_phase2(adev);
|
||||
if (r)
|
||||
goto out;
|
||||
|
@ -3117,6 +3260,10 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
|
|||
/* we need recover gart prior to run SMC/CP/SDMA resume */
|
||||
amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
|
||||
|
||||
r = amdgpu_device_fw_loading(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* now we are okay to resume SMC/CP/SDMA */
|
||||
r = amdgpu_device_ip_reinit_late_sriov(adev);
|
||||
if (r)
|
||||
|
|
|
@ -278,6 +278,9 @@ enum amdgpu_pcie_gen {
|
|||
#define amdgpu_dpm_get_fan_speed_rpm(adev, s) \
|
||||
((adev)->powerplay.pp_funcs->get_fan_speed_rpm)((adev)->powerplay.pp_handle, (s))
|
||||
|
||||
#define amdgpu_dpm_set_fan_speed_rpm(adev, s) \
|
||||
((adev)->powerplay.pp_funcs->set_fan_speed_rpm)((adev)->powerplay.pp_handle, (s))
|
||||
|
||||
#define amdgpu_dpm_get_sclk(adev, l) \
|
||||
((adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)))
|
||||
|
||||
|
@ -357,6 +360,10 @@ enum amdgpu_pcie_gen {
|
|||
((adev)->powerplay.pp_funcs->odn_edit_dpm_table(\
|
||||
(adev)->powerplay.pp_handle, type, parameter, size))
|
||||
|
||||
#define amdgpu_dpm_enable_mgpu_fan_boost(adev) \
|
||||
((adev)->powerplay.pp_funcs->enable_mgpu_fan_boost(\
|
||||
(adev)->powerplay.pp_handle))
|
||||
|
||||
struct amdgpu_dpm {
|
||||
struct amdgpu_ps *ps;
|
||||
/* number of valid power states */
|
||||
|
|
|
@ -127,6 +127,9 @@ int amdgpu_compute_multipipe = -1;
|
|||
int amdgpu_gpu_recovery = -1; /* auto */
|
||||
int amdgpu_emu_mode = 0;
|
||||
uint amdgpu_smu_memory_pool_size = 0;
|
||||
struct amdgpu_mgpu_info mgpu_info = {
|
||||
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
|
||||
};
|
||||
|
||||
/**
|
||||
* DOC: vramlimit (int)
|
||||
|
|
|
@ -297,8 +297,7 @@ struct amdgpu_gfx {
|
|||
/* reset mask */
|
||||
uint32_t grbm_soft_reset;
|
||||
uint32_t srbm_soft_reset;
|
||||
/* s3/s4 mask */
|
||||
bool in_suspend;
|
||||
|
||||
/* NGG */
|
||||
struct amdgpu_ngg ngg;
|
||||
|
||||
|
|
|
@ -146,6 +146,8 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
|
|||
{
|
||||
const uint64_t four_gb = 0x100000000ULL;
|
||||
u64 size_af, size_bf;
|
||||
/*To avoid the hole, limit the max mc address to AMDGPU_GMC_HOLE_START*/
|
||||
u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1);
|
||||
|
||||
mc->gart_size += adev->pm.smu_prv_buffer_size;
|
||||
|
||||
|
@ -153,7 +155,7 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
|
|||
* the GART base on a 4GB boundary as well.
|
||||
*/
|
||||
size_bf = mc->fb_start;
|
||||
size_af = adev->gmc.mc_mask + 1 - ALIGN(mc->fb_end + 1, four_gb);
|
||||
size_af = max_mc_address + 1 - ALIGN(mc->fb_end + 1, four_gb);
|
||||
|
||||
if (mc->gart_size > max(size_bf, size_af)) {
|
||||
dev_warn(adev->dev, "limiting GART\n");
|
||||
|
@ -164,7 +166,7 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
|
|||
(size_af < mc->gart_size))
|
||||
mc->gart_start = 0;
|
||||
else
|
||||
mc->gart_start = mc->mc_mask - mc->gart_size + 1;
|
||||
mc->gart_start = max_mc_address - mc->gart_size + 1;
|
||||
|
||||
mc->gart_start &= ~(four_gb - 1);
|
||||
mc->gart_end = mc->gart_start + mc->gart_size - 1;
|
||||
|
@ -200,16 +202,13 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
|
|||
}
|
||||
|
||||
if (size_bf > size_af) {
|
||||
mc->agp_start = mc->fb_start > mc->gart_start ?
|
||||
mc->gart_end + 1 : 0;
|
||||
mc->agp_start = (mc->fb_start - size_bf) & sixteen_gb_mask;
|
||||
mc->agp_size = size_bf;
|
||||
} else {
|
||||
mc->agp_start = (mc->fb_start > mc->gart_start ?
|
||||
mc->fb_end : mc->gart_end) + 1,
|
||||
mc->agp_start = ALIGN(mc->fb_end + 1, sixteen_gb);
|
||||
mc->agp_size = size_af;
|
||||
}
|
||||
|
||||
mc->agp_start = ALIGN(mc->agp_start, sixteen_gb);
|
||||
mc->agp_end = mc->agp_start + mc->agp_size - 1;
|
||||
dev_info(adev->dev, "AGP: %lluM 0x%016llX - 0x%016llX\n",
|
||||
mc->agp_size >> 20, mc->agp_start, mc->agp_end);
|
||||
|
|
|
@ -354,6 +354,14 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
|
|||
if (!ring || !ring->ready)
|
||||
continue;
|
||||
|
||||
/* skip IB tests for KIQ in general for the below reasons:
|
||||
* 1. We never submit IBs to the KIQ
|
||||
* 2. KIQ doesn't use the EOP interrupts,
|
||||
* we use some other CP interrupt.
|
||||
*/
|
||||
if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
|
||||
continue;
|
||||
|
||||
/* MM engine need more time */
|
||||
if (ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
|
||||
ring->funcs->type == AMDGPU_RING_TYPE_VCE ||
|
||||
|
|
|
@ -40,6 +40,30 @@
|
|||
#include "amdgpu_gem.h"
|
||||
#include "amdgpu_display.h"
|
||||
|
||||
static void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_gpu_instance *gpu_instance;
|
||||
int i;
|
||||
|
||||
mutex_lock(&mgpu_info.mutex);
|
||||
|
||||
for (i = 0; i < mgpu_info.num_gpu; i++) {
|
||||
gpu_instance = &(mgpu_info.gpu_ins[i]);
|
||||
if (gpu_instance->adev == adev) {
|
||||
mgpu_info.gpu_ins[i] =
|
||||
mgpu_info.gpu_ins[mgpu_info.num_gpu - 1];
|
||||
mgpu_info.num_gpu--;
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
mgpu_info.num_apu--;
|
||||
else
|
||||
mgpu_info.num_dgpu--;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&mgpu_info.mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_driver_unload_kms - Main unload function for KMS.
|
||||
*
|
||||
|
@ -55,6 +79,8 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
|
|||
if (adev == NULL)
|
||||
return;
|
||||
|
||||
amdgpu_unregister_gpu_instance(adev);
|
||||
|
||||
if (adev->rmmio == NULL)
|
||||
goto done_free;
|
||||
|
||||
|
@ -75,6 +101,31 @@ done_free:
|
|||
dev->dev_private = NULL;
|
||||
}
|
||||
|
||||
static void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_gpu_instance *gpu_instance;
|
||||
|
||||
mutex_lock(&mgpu_info.mutex);
|
||||
|
||||
if (mgpu_info.num_gpu >= MAX_GPU_INSTANCE) {
|
||||
DRM_ERROR("Cannot register more gpu instance\n");
|
||||
mutex_unlock(&mgpu_info.mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
gpu_instance = &(mgpu_info.gpu_ins[mgpu_info.num_gpu]);
|
||||
gpu_instance->adev = adev;
|
||||
gpu_instance->mgpu_fan_enabled = 0;
|
||||
|
||||
mgpu_info.num_gpu++;
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
mgpu_info.num_apu++;
|
||||
else
|
||||
mgpu_info.num_dgpu++;
|
||||
|
||||
mutex_unlock(&mgpu_info.mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_driver_load_kms - Main load function for KMS.
|
||||
*
|
||||
|
@ -169,6 +220,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
|
|||
pm_runtime_put_autosuspend(dev->dev);
|
||||
}
|
||||
|
||||
amdgpu_register_gpu_instance(adev);
|
||||
out:
|
||||
if (r) {
|
||||
/* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
|
||||
|
|
|
@ -1120,12 +1120,19 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
|
|||
struct amdgpu_device *adev = dev_get_drvdata(dev);
|
||||
int err;
|
||||
u32 value;
|
||||
u32 pwm_mode;
|
||||
|
||||
/* Can't adjust fan when the card is off */
|
||||
if ((adev->flags & AMD_IS_PX) &&
|
||||
(adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
|
||||
return -EINVAL;
|
||||
|
||||
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
|
||||
if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
|
||||
pr_info("manual fan speed control should be enabled first\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = kstrtou32(buf, 10, &value);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -1187,6 +1194,148 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
|
|||
return sprintf(buf, "%i\n", speed);
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct amdgpu_device *adev = dev_get_drvdata(dev);
|
||||
u32 min_rpm = 0;
|
||||
u32 size = sizeof(min_rpm);
|
||||
int r;
|
||||
|
||||
if (!adev->powerplay.pp_funcs->read_sensor)
|
||||
return -EINVAL;
|
||||
|
||||
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
|
||||
(void *)&min_rpm, &size);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm);
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct amdgpu_device *adev = dev_get_drvdata(dev);
|
||||
u32 max_rpm = 0;
|
||||
u32 size = sizeof(max_rpm);
|
||||
int r;
|
||||
|
||||
if (!adev->powerplay.pp_funcs->read_sensor)
|
||||
return -EINVAL;
|
||||
|
||||
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
|
||||
(void *)&max_rpm, &size);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm);
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct amdgpu_device *adev = dev_get_drvdata(dev);
|
||||
int err;
|
||||
u32 rpm = 0;
|
||||
|
||||
/* Can't adjust fan when the card is off */
|
||||
if ((adev->flags & AMD_IS_PX) &&
|
||||
(adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
|
||||
return -EINVAL;
|
||||
|
||||
if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
|
||||
err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
return sprintf(buf, "%i\n", rpm);
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct amdgpu_device *adev = dev_get_drvdata(dev);
|
||||
int err;
|
||||
u32 value;
|
||||
u32 pwm_mode;
|
||||
|
||||
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
|
||||
if (pwm_mode != AMD_FAN_CTRL_MANUAL)
|
||||
return -ENODATA;
|
||||
|
||||
/* Can't adjust fan when the card is off */
|
||||
if ((adev->flags & AMD_IS_PX) &&
|
||||
(adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
|
||||
return -EINVAL;
|
||||
|
||||
err = kstrtou32(buf, 10, &value);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (adev->powerplay.pp_funcs->set_fan_speed_rpm) {
|
||||
err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct amdgpu_device *adev = dev_get_drvdata(dev);
|
||||
u32 pwm_mode = 0;
|
||||
|
||||
if (!adev->powerplay.pp_funcs->get_fan_control_mode)
|
||||
return -EINVAL;
|
||||
|
||||
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
|
||||
|
||||
return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
struct amdgpu_device *adev = dev_get_drvdata(dev);
|
||||
int err;
|
||||
int value;
|
||||
u32 pwm_mode;
|
||||
|
||||
/* Can't adjust fan when the card is off */
|
||||
if ((adev->flags & AMD_IS_PX) &&
|
||||
(adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
|
||||
return -EINVAL;
|
||||
|
||||
if (!adev->powerplay.pp_funcs->set_fan_control_mode)
|
||||
return -EINVAL;
|
||||
|
||||
err = kstrtoint(buf, 10, &value);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (value == 0)
|
||||
pwm_mode = AMD_FAN_CTRL_AUTO;
|
||||
else if (value == 1)
|
||||
pwm_mode = AMD_FAN_CTRL_MANUAL;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
|
@ -1406,8 +1555,16 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
|
|||
*
|
||||
* - pwm1_max: pulse width modulation fan control maximum level (255)
|
||||
*
|
||||
* - fan1_min: an minimum value Unit: revolution/min (RPM)
|
||||
*
|
||||
* - fan1_max: an maxmum value Unit: revolution/max (RPM)
|
||||
*
|
||||
* - fan1_input: fan speed in RPM
|
||||
*
|
||||
* - fan[1-*]_target: Desired fan speed Unit: revolution/min (RPM)
|
||||
*
|
||||
* - fan[1-*]_enable: Enable or disable the sensors.1: Enable 0: Disable
|
||||
*
|
||||
* You can use hwmon tools like sensors to view this information on your system.
|
||||
*
|
||||
*/
|
||||
|
@ -1420,6 +1577,10 @@ static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_
|
|||
static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
|
||||
static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
|
||||
static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
|
||||
static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
|
||||
static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
|
||||
static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
|
||||
static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
|
||||
static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
|
||||
static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
|
||||
static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
|
||||
|
@ -1438,6 +1599,10 @@ static struct attribute *hwmon_attributes[] = {
|
|||
&sensor_dev_attr_pwm1_min.dev_attr.attr,
|
||||
&sensor_dev_attr_pwm1_max.dev_attr.attr,
|
||||
&sensor_dev_attr_fan1_input.dev_attr.attr,
|
||||
&sensor_dev_attr_fan1_min.dev_attr.attr,
|
||||
&sensor_dev_attr_fan1_max.dev_attr.attr,
|
||||
&sensor_dev_attr_fan1_target.dev_attr.attr,
|
||||
&sensor_dev_attr_fan1_enable.dev_attr.attr,
|
||||
&sensor_dev_attr_in0_input.dev_attr.attr,
|
||||
&sensor_dev_attr_in0_label.dev_attr.attr,
|
||||
&sensor_dev_attr_in1_input.dev_attr.attr,
|
||||
|
@ -1456,13 +1621,16 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
|
|||
struct amdgpu_device *adev = dev_get_drvdata(dev);
|
||||
umode_t effective_mode = attr->mode;
|
||||
|
||||
|
||||
/* Skip fan attributes if fan is not present */
|
||||
if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_fan1_input.dev_attr.attr))
|
||||
attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
|
||||
return 0;
|
||||
|
||||
/* Skip limit attributes if DPM is not enabled */
|
||||
|
@ -1472,7 +1640,12 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
|
|||
attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
|
||||
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
|
||||
return 0;
|
||||
|
||||
/* mask fan attributes if we have no bindings for this asic to expose */
|
||||
|
@ -1497,10 +1670,18 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
|
|||
/* hide max/min values if we can't both query and manage the fan */
|
||||
if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
|
||||
!adev->powerplay.pp_funcs->get_fan_speed_percent) &&
|
||||
(!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
|
||||
!adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
|
||||
(attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
|
||||
return 0;
|
||||
|
||||
if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
|
||||
!adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
|
||||
(attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
|
||||
return 0;
|
||||
|
||||
/* only APUs have vddnb */
|
||||
if (!(adev->flags & AMD_IS_APU) &&
|
||||
(attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
|
||||
|
|
|
@ -452,8 +452,6 @@ static int psp_hw_fini(void *handle)
|
|||
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
|
||||
return 0;
|
||||
|
||||
amdgpu_ucode_fini_bo(adev);
|
||||
|
||||
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
|
||||
|
||||
amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
|
||||
|
|
|
@ -46,10 +46,6 @@ struct amdgpu_sdma_instance {
|
|||
|
||||
struct amdgpu_sdma {
|
||||
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
|
||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||
//SI DMA has a difference trap irq number for the second engine
|
||||
struct amdgpu_irq_src trap_irq_1;
|
||||
#endif
|
||||
struct amdgpu_irq_src trap_irq;
|
||||
struct amdgpu_irq_src illegal_inst_irq;
|
||||
int num_instances;
|
||||
|
|
|
@ -297,10 +297,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
|
|||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS12:
|
||||
case CHIP_VEGAM:
|
||||
if (!load_type)
|
||||
return AMDGPU_FW_LOAD_DIRECT;
|
||||
else
|
||||
return AMDGPU_FW_LOAD_SMU;
|
||||
return AMDGPU_FW_LOAD_SMU;
|
||||
case CHIP_VEGA10:
|
||||
case CHIP_RAVEN:
|
||||
case CHIP_VEGA12:
|
||||
|
@ -423,32 +420,41 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_ucode_create_bo(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT) {
|
||||
amdgpu_bo_create_kernel(adev, adev->firmware.fw_size, PAGE_SIZE,
|
||||
amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
|
||||
&adev->firmware.fw_buf,
|
||||
&adev->firmware.fw_buf_mc,
|
||||
&adev->firmware.fw_buf_ptr);
|
||||
if (!adev->firmware.fw_buf) {
|
||||
dev_err(adev->dev, "failed to create kernel buffer for firmware.fw_buf\n");
|
||||
return -ENOMEM;
|
||||
} else if (amdgpu_sriov_vf(adev)) {
|
||||
memset(adev->firmware.fw_buf_ptr, 0, adev->firmware.fw_size);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void amdgpu_ucode_free_bo(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT)
|
||||
amdgpu_bo_free_kernel(&adev->firmware.fw_buf,
|
||||
&adev->firmware.fw_buf_mc,
|
||||
&adev->firmware.fw_buf_ptr);
|
||||
}
|
||||
|
||||
int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
|
||||
{
|
||||
uint64_t fw_offset = 0;
|
||||
int i, err;
|
||||
int i;
|
||||
struct amdgpu_firmware_info *ucode = NULL;
|
||||
const struct common_firmware_header *header = NULL;
|
||||
|
||||
if (!adev->firmware.fw_size) {
|
||||
dev_warn(adev->dev, "No ip firmware need to load\n");
|
||||
/* for baremetal, the ucode is allocated in gtt, so don't need to fill the bo when reset/suspend */
|
||||
if (!amdgpu_sriov_vf(adev) && (adev->in_gpu_reset || adev->in_suspend))
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!adev->in_gpu_reset) {
|
||||
err = amdgpu_bo_create_kernel(adev, adev->firmware.fw_size, PAGE_SIZE,
|
||||
amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
|
||||
&adev->firmware.fw_buf,
|
||||
&adev->firmware.fw_buf_mc,
|
||||
&adev->firmware.fw_buf_ptr);
|
||||
if (err) {
|
||||
dev_err(adev->dev, "failed to create kernel buffer for firmware.fw_buf\n");
|
||||
goto failed;
|
||||
}
|
||||
}
|
||||
|
||||
memset(adev->firmware.fw_buf_ptr, 0, adev->firmware.fw_size);
|
||||
|
||||
/*
|
||||
* if SMU loaded firmware, it needn't add SMC, UVD, and VCE
|
||||
* ucode info here
|
||||
|
@ -465,7 +471,6 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
|
|||
for (i = 0; i < adev->firmware.max_ucodes; i++) {
|
||||
ucode = &adev->firmware.ucode[i];
|
||||
if (ucode->fw) {
|
||||
header = (const struct common_firmware_header *)ucode->fw->data;
|
||||
amdgpu_ucode_init_single_fw(adev, ucode, adev->firmware.fw_buf_mc + fw_offset,
|
||||
adev->firmware.fw_buf_ptr + fw_offset);
|
||||
if (i == AMDGPU_UCODE_ID_CP_MEC1 &&
|
||||
|
@ -480,33 +485,4 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
failed:
|
||||
if (err)
|
||||
adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int amdgpu_ucode_fini_bo(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
struct amdgpu_firmware_info *ucode = NULL;
|
||||
|
||||
if (!adev->firmware.fw_size)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < adev->firmware.max_ucodes; i++) {
|
||||
ucode = &adev->firmware.ucode[i];
|
||||
if (ucode->fw) {
|
||||
ucode->mc_addr = 0;
|
||||
ucode->kaddr = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
amdgpu_bo_free_kernel(&adev->firmware.fw_buf,
|
||||
&adev->firmware.fw_buf_mc,
|
||||
&adev->firmware.fw_buf_ptr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -276,8 +276,10 @@ void amdgpu_ucode_print_gpu_info_hdr(const struct common_firmware_header *hdr);
|
|||
int amdgpu_ucode_validate(const struct firmware *fw);
|
||||
bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr,
|
||||
uint16_t hdr_major, uint16_t hdr_minor);
|
||||
|
||||
int amdgpu_ucode_init_bo(struct amdgpu_device *adev);
|
||||
int amdgpu_ucode_fini_bo(struct amdgpu_device *adev);
|
||||
int amdgpu_ucode_create_bo(struct amdgpu_device *adev);
|
||||
void amdgpu_ucode_free_bo(struct amdgpu_device *adev);
|
||||
|
||||
enum amdgpu_firmware_load_type
|
||||
amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type);
|
||||
|
|
|
@ -121,8 +121,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
|||
version_major, version_minor, family_id);
|
||||
}
|
||||
|
||||
bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
|
||||
+ AMDGPU_VCN_SESSION_SIZE * 40;
|
||||
bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
|
||||
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
|
||||
bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
|
||||
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
|
||||
|
@ -263,7 +262,7 @@ static int amdgpu_vcn_pause_dpg_mode(struct amdgpu_device *adev,
|
|||
|
||||
ring = &adev->vcn.ring_dec;
|
||||
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
|
||||
lower_32_bits(ring->wptr) | 0x80000000);
|
||||
RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2));
|
||||
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
|
||||
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
|
||||
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
|
||||
|
@ -309,18 +308,21 @@ static int amdgpu_vcn_pause_dpg_mode(struct amdgpu_device *adev,
|
|||
/* Restore */
|
||||
ring = &adev->vcn.ring_jpeg;
|
||||
WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
|
||||
WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000001L | 0x00000002L);
|
||||
WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
|
||||
UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
|
||||
UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
|
||||
WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
|
||||
lower_32_bits(ring->gpu_addr));
|
||||
lower_32_bits(ring->gpu_addr));
|
||||
WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
|
||||
upper_32_bits(ring->gpu_addr));
|
||||
upper_32_bits(ring->gpu_addr));
|
||||
WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, ring->wptr);
|
||||
WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, ring->wptr);
|
||||
WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
|
||||
WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
|
||||
UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
|
||||
|
||||
ring = &adev->vcn.ring_dec;
|
||||
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
|
||||
lower_32_bits(ring->wptr) | 0x80000000);
|
||||
RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2));
|
||||
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
|
||||
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
|
||||
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
|
||||
|
|
|
@ -24,9 +24,9 @@
|
|||
#ifndef __AMDGPU_VCN_H__
|
||||
#define __AMDGPU_VCN_H__
|
||||
|
||||
#define AMDGPU_VCN_STACK_SIZE (200*1024)
|
||||
#define AMDGPU_VCN_HEAP_SIZE (256*1024)
|
||||
#define AMDGPU_VCN_SESSION_SIZE (50*1024)
|
||||
#define AMDGPU_VCN_STACK_SIZE (128*1024)
|
||||
#define AMDGPU_VCN_CONTEXT_SIZE (512*1024)
|
||||
|
||||
#define AMDGPU_VCN_FIRMWARE_OFFSET 256
|
||||
#define AMDGPU_VCN_MAX_ENC_RINGS 3
|
||||
|
||||
|
|
|
@ -2002,6 +2002,8 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
|
|||
amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
|
||||
if (amdgpu_dpm == -1)
|
||||
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
|
||||
else
|
||||
|
@ -2014,8 +2016,6 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
|
|||
#endif
|
||||
else
|
||||
amdgpu_device_ip_block_add(adev, &dce_v8_2_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
|
||||
break;
|
||||
|
@ -2023,6 +2023,8 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
|
|||
amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v7_3_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
|
||||
if (amdgpu_dpm == -1)
|
||||
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
|
||||
else
|
||||
|
@ -2035,8 +2037,6 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
|
|||
#endif
|
||||
else
|
||||
amdgpu_device_ip_block_add(adev, &dce_v8_5_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v7_3_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
|
||||
break;
|
||||
|
@ -2044,6 +2044,8 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
|
|||
amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v7_1_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &kv_smu_ip_block);
|
||||
if (adev->enable_virtual_display)
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
|
@ -2053,8 +2055,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
|
|||
#endif
|
||||
else
|
||||
amdgpu_device_ip_block_add(adev, &dce_v8_1_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v7_1_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
|
||||
|
||||
amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
|
||||
break;
|
||||
|
@ -2063,6 +2064,8 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
|
|||
amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &kv_smu_ip_block);
|
||||
if (adev->enable_virtual_display)
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
|
@ -2072,8 +2075,6 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
|
|||
#endif
|
||||
else
|
||||
amdgpu_device_ip_block_add(adev, &dce_v8_3_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
|
||||
break;
|
||||
|
|
|
@ -1173,64 +1173,61 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
|
||||
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
|
||||
info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
|
||||
info->fw = adev->gfx.pfp_fw;
|
||||
header = (const struct common_firmware_header *)info->fw->data;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
|
||||
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
|
||||
info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
|
||||
info->fw = adev->gfx.pfp_fw;
|
||||
header = (const struct common_firmware_header *)info->fw->data;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
|
||||
|
||||
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
|
||||
info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
|
||||
info->fw = adev->gfx.me_fw;
|
||||
header = (const struct common_firmware_header *)info->fw->data;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
|
||||
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
|
||||
info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
|
||||
info->fw = adev->gfx.me_fw;
|
||||
header = (const struct common_firmware_header *)info->fw->data;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
|
||||
|
||||
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
|
||||
info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
|
||||
info->fw = adev->gfx.ce_fw;
|
||||
header = (const struct common_firmware_header *)info->fw->data;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
|
||||
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
|
||||
info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
|
||||
info->fw = adev->gfx.ce_fw;
|
||||
header = (const struct common_firmware_header *)info->fw->data;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
|
||||
|
||||
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
|
||||
info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
|
||||
info->fw = adev->gfx.rlc_fw;
|
||||
header = (const struct common_firmware_header *)info->fw->data;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
|
||||
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
|
||||
info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
|
||||
info->fw = adev->gfx.rlc_fw;
|
||||
header = (const struct common_firmware_header *)info->fw->data;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
|
||||
|
||||
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
|
||||
info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
|
||||
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
|
||||
info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
|
||||
info->fw = adev->gfx.mec_fw;
|
||||
header = (const struct common_firmware_header *)info->fw->data;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
|
||||
|
||||
/* we need account JT in */
|
||||
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_STORAGE];
|
||||
info->ucode_id = AMDGPU_UCODE_ID_STORAGE;
|
||||
info->fw = adev->gfx.mec_fw;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE);
|
||||
}
|
||||
|
||||
if (adev->gfx.mec2_fw) {
|
||||
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
|
||||
info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
|
||||
info->fw = adev->gfx.mec2_fw;
|
||||
header = (const struct common_firmware_header *)info->fw->data;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
|
||||
|
||||
/* we need account JT in */
|
||||
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_STORAGE];
|
||||
info->ucode_id = AMDGPU_UCODE_ID_STORAGE;
|
||||
info->fw = adev->gfx.mec_fw;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE);
|
||||
}
|
||||
|
||||
if (adev->gfx.mec2_fw) {
|
||||
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
|
||||
info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
|
||||
info->fw = adev->gfx.mec2_fw;
|
||||
header = (const struct common_firmware_header *)info->fw->data;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
out:
|
||||
|
@ -2048,11 +2045,6 @@ static int gfx_v8_0_sw_init(void *handle)
|
|||
adev->gfx.mec.num_pipe_per_mec = 4;
|
||||
adev->gfx.mec.num_queue_per_pipe = 8;
|
||||
|
||||
/* KIQ event */
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_INT_IB2, &adev->gfx.kiq.irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* EOP Event */
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_END_OF_PIPE, &adev->gfx.eop_irq);
|
||||
if (r)
|
||||
|
@ -4181,45 +4173,11 @@ static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
|
|||
udelay(50);
|
||||
}
|
||||
|
||||
static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev)
|
||||
{
|
||||
const struct rlc_firmware_header_v2_0 *hdr;
|
||||
const __le32 *fw_data;
|
||||
unsigned i, fw_size;
|
||||
|
||||
if (!adev->gfx.rlc_fw)
|
||||
return -EINVAL;
|
||||
|
||||
hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
|
||||
amdgpu_ucode_print_rlc_hdr(&hdr->header);
|
||||
|
||||
fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
|
||||
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
|
||||
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
|
||||
|
||||
WREG32(mmRLC_GPM_UCODE_ADDR, 0);
|
||||
for (i = 0; i < fw_size; i++)
|
||||
WREG32(mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
|
||||
WREG32(mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
gfx_v8_0_rlc_stop(adev);
|
||||
gfx_v8_0_rlc_reset(adev);
|
||||
gfx_v8_0_init_pg(adev);
|
||||
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
|
||||
/* legacy rlc firmware loading */
|
||||
r = gfx_v8_0_rlc_load_microcode(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
gfx_v8_0_rlc_start(adev);
|
||||
|
||||
return 0;
|
||||
|
@ -4245,63 +4203,6 @@ static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
|
|||
udelay(50);
|
||||
}
|
||||
|
||||
static int gfx_v8_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
|
||||
{
|
||||
const struct gfx_firmware_header_v1_0 *pfp_hdr;
|
||||
const struct gfx_firmware_header_v1_0 *ce_hdr;
|
||||
const struct gfx_firmware_header_v1_0 *me_hdr;
|
||||
const __le32 *fw_data;
|
||||
unsigned i, fw_size;
|
||||
|
||||
if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
|
||||
return -EINVAL;
|
||||
|
||||
pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
|
||||
adev->gfx.pfp_fw->data;
|
||||
ce_hdr = (const struct gfx_firmware_header_v1_0 *)
|
||||
adev->gfx.ce_fw->data;
|
||||
me_hdr = (const struct gfx_firmware_header_v1_0 *)
|
||||
adev->gfx.me_fw->data;
|
||||
|
||||
amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
|
||||
amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
|
||||
amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
|
||||
|
||||
gfx_v8_0_cp_gfx_enable(adev, false);
|
||||
|
||||
/* PFP */
|
||||
fw_data = (const __le32 *)
|
||||
(adev->gfx.pfp_fw->data +
|
||||
le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
|
||||
fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
|
||||
WREG32(mmCP_PFP_UCODE_ADDR, 0);
|
||||
for (i = 0; i < fw_size; i++)
|
||||
WREG32(mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
|
||||
WREG32(mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
|
||||
|
||||
/* CE */
|
||||
fw_data = (const __le32 *)
|
||||
(adev->gfx.ce_fw->data +
|
||||
le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
|
||||
fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
|
||||
WREG32(mmCP_CE_UCODE_ADDR, 0);
|
||||
for (i = 0; i < fw_size; i++)
|
||||
WREG32(mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
|
||||
WREG32(mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
|
||||
|
||||
/* ME */
|
||||
fw_data = (const __le32 *)
|
||||
(adev->gfx.me_fw->data +
|
||||
le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
|
||||
fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
|
||||
WREG32(mmCP_ME_RAM_WADDR, 0);
|
||||
for (i = 0; i < fw_size; i++)
|
||||
WREG32(mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
|
||||
WREG32(mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 count = 0;
|
||||
|
@ -4501,52 +4402,6 @@ static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
|
|||
udelay(50);
|
||||
}
|
||||
|
||||
static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
|
||||
{
|
||||
const struct gfx_firmware_header_v1_0 *mec_hdr;
|
||||
const __le32 *fw_data;
|
||||
unsigned i, fw_size;
|
||||
|
||||
if (!adev->gfx.mec_fw)
|
||||
return -EINVAL;
|
||||
|
||||
gfx_v8_0_cp_compute_enable(adev, false);
|
||||
|
||||
mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
|
||||
amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
|
||||
|
||||
fw_data = (const __le32 *)
|
||||
(adev->gfx.mec_fw->data +
|
||||
le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
|
||||
fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
|
||||
|
||||
/* MEC1 */
|
||||
WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
|
||||
for (i = 0; i < fw_size; i++)
|
||||
WREG32(mmCP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data+i));
|
||||
WREG32(mmCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version);
|
||||
|
||||
/* Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
|
||||
if (adev->gfx.mec2_fw) {
|
||||
const struct gfx_firmware_header_v1_0 *mec2_hdr;
|
||||
|
||||
mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
|
||||
amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
|
||||
|
||||
fw_data = (const __le32 *)
|
||||
(adev->gfx.mec2_fw->data +
|
||||
le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes));
|
||||
fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4;
|
||||
|
||||
WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
|
||||
for (i = 0; i < fw_size; i++)
|
||||
WREG32(mmCP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data+i));
|
||||
WREG32(mmCP_MEC_ME2_UCODE_ADDR, adev->gfx.mec2_fw_version);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* KIQ functions */
|
||||
static void gfx_v8_0_kiq_setting(struct amdgpu_ring *ring)
|
||||
{
|
||||
|
@ -4872,7 +4727,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
|
|||
struct vi_mqd *mqd = ring->mqd_ptr;
|
||||
int mqd_idx = ring - &adev->gfx.compute_ring[0];
|
||||
|
||||
if (!adev->in_gpu_reset && !adev->gfx.in_suspend) {
|
||||
if (!adev->in_gpu_reset && !adev->in_suspend) {
|
||||
memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
|
||||
((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
|
||||
((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
|
||||
|
@ -4980,17 +4835,6 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
|
|||
if (!(adev->flags & AMD_IS_APU))
|
||||
gfx_v8_0_enable_gui_idle_interrupt(adev, false);
|
||||
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
|
||||
/* legacy firmware loading */
|
||||
r = gfx_v8_0_cp_gfx_load_microcode(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = gfx_v8_0_cp_compute_load_microcode(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
r = gfx_v8_0_kiq_resume(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -5142,19 +4986,12 @@ static int gfx_v8_0_hw_fini(void *handle)
|
|||
|
||||
static int gfx_v8_0_suspend(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
adev->gfx.in_suspend = true;
|
||||
return gfx_v8_0_hw_fini(adev);
|
||||
return gfx_v8_0_hw_fini(handle);
|
||||
}
|
||||
|
||||
static int gfx_v8_0_resume(void *handle)
|
||||
{
|
||||
int r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
r = gfx_v8_0_hw_init(adev);
|
||||
adev->gfx.in_suspend = false;
|
||||
return r;
|
||||
return gfx_v8_0_hw_init(handle);
|
||||
}
|
||||
|
||||
static bool gfx_v8_0_check_soft_reset(void *handle)
|
||||
|
@ -7025,52 +6862,6 @@ static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int gfx_v8_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *src,
|
||||
unsigned int type,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
|
||||
|
||||
switch (type) {
|
||||
case AMDGPU_CP_KIQ_IRQ_DRIVER0:
|
||||
WREG32_FIELD(CPC_INT_CNTL, GENERIC2_INT_ENABLE,
|
||||
state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
|
||||
if (ring->me == 1)
|
||||
WREG32_FIELD_OFFSET(CP_ME1_PIPE0_INT_CNTL,
|
||||
ring->pipe,
|
||||
GENERIC2_INT_ENABLE,
|
||||
state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
|
||||
else
|
||||
WREG32_FIELD_OFFSET(CP_ME2_PIPE0_INT_CNTL,
|
||||
ring->pipe,
|
||||
GENERIC2_INT_ENABLE,
|
||||
state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
|
||||
break;
|
||||
default:
|
||||
BUG(); /* kiq only support GENERIC2_INT now */
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gfx_v8_0_kiq_irq(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
u8 me_id, pipe_id, queue_id;
|
||||
struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
|
||||
|
||||
me_id = (entry->ring_id & 0x0c) >> 2;
|
||||
pipe_id = (entry->ring_id & 0x03) >> 0;
|
||||
queue_id = (entry->ring_id & 0x70) >> 4;
|
||||
DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n",
|
||||
me_id, pipe_id, queue_id);
|
||||
|
||||
amdgpu_fence_process(ring);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
|
||||
.name = "gfx_v8_0",
|
||||
.early_init = gfx_v8_0_early_init,
|
||||
|
@ -7221,11 +7012,6 @@ static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_inst_irq_funcs = {
|
|||
.process = gfx_v8_0_priv_inst_irq,
|
||||
};
|
||||
|
||||
static const struct amdgpu_irq_src_funcs gfx_v8_0_kiq_irq_funcs = {
|
||||
.set = gfx_v8_0_kiq_set_interrupt_state,
|
||||
.process = gfx_v8_0_kiq_irq,
|
||||
};
|
||||
|
||||
static const struct amdgpu_irq_src_funcs gfx_v8_0_cp_ecc_error_irq_funcs = {
|
||||
.set = gfx_v8_0_set_cp_ecc_int_state,
|
||||
.process = gfx_v8_0_cp_ecc_error_irq,
|
||||
|
@ -7247,9 +7033,6 @@ static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev)
|
|||
adev->gfx.priv_inst_irq.num_types = 1;
|
||||
adev->gfx.priv_inst_irq.funcs = &gfx_v8_0_priv_inst_irq_funcs;
|
||||
|
||||
adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
|
||||
adev->gfx.kiq.irq.funcs = &gfx_v8_0_kiq_irq_funcs;
|
||||
|
||||
adev->gfx.cp_ecc_error_irq.num_types = 1;
|
||||
adev->gfx.cp_ecc_error_irq.funcs = &gfx_v8_0_cp_ecc_error_irq_funcs;
|
||||
|
||||
|
|
|
@ -133,7 +133,10 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
|
|||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
|
||||
};
|
||||
|
||||
static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
|
||||
|
@ -173,7 +176,10 @@ static const struct soc15_reg_golden golden_settings_gc_9_1[] =
|
|||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
|
||||
};
|
||||
|
||||
static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
|
||||
|
@ -247,7 +253,10 @@ static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
|
|||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000)
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
|
||||
};
|
||||
|
||||
static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
|
||||
|
@ -908,6 +917,50 @@ static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
|
|||
buffer[count++] = cpu_to_le32(0);
|
||||
}
|
||||
|
||||
static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
|
||||
uint32_t pg_always_on_cu_num = 2;
|
||||
uint32_t always_on_cu_num;
|
||||
uint32_t i, j, k;
|
||||
uint32_t mask, cu_bitmap, counter;
|
||||
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
always_on_cu_num = 4;
|
||||
else if (adev->asic_type == CHIP_VEGA12)
|
||||
always_on_cu_num = 8;
|
||||
else
|
||||
always_on_cu_num = 12;
|
||||
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
|
||||
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
|
||||
mask = 1;
|
||||
cu_bitmap = 0;
|
||||
counter = 0;
|
||||
gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
|
||||
|
||||
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
|
||||
if (cu_info->bitmap[i][j] & mask) {
|
||||
if (counter == pg_always_on_cu_num)
|
||||
WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
|
||||
if (counter < always_on_cu_num)
|
||||
cu_bitmap |= mask;
|
||||
else
|
||||
break;
|
||||
counter++;
|
||||
}
|
||||
mask <<= 1;
|
||||
}
|
||||
|
||||
WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, cu_bitmap);
|
||||
cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
|
||||
}
|
||||
}
|
||||
gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
}
|
||||
|
||||
static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t data;
|
||||
|
@ -941,8 +994,10 @@ static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
|
|||
data |= 0x00C00000;
|
||||
WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
|
||||
|
||||
/* set RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF */
|
||||
WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, 0xFFF);
|
||||
/*
|
||||
* RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xF (4 CUs AON for Raven),
|
||||
* programmed in gfx_v9_0_init_always_on_cu_mask()
|
||||
*/
|
||||
|
||||
/* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
|
||||
* but used for RLC_LB_CNTL configuration */
|
||||
|
@ -951,6 +1006,57 @@ static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
|
|||
data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
|
||||
WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
|
||||
gfx_v9_0_init_always_on_cu_mask(adev);
|
||||
}
|
||||
|
||||
static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t data;
|
||||
|
||||
/* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
|
||||
WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
|
||||
WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x033388F8);
|
||||
WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
|
||||
WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x10 | 0x27 << 8 | 0x02FA << 16));
|
||||
|
||||
/* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
|
||||
WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
|
||||
|
||||
/* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
|
||||
WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000800);
|
||||
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
/* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
|
||||
gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
|
||||
WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
|
||||
|
||||
/* set mmRLC_LB_PARAMS = 0x003F_1006 */
|
||||
data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
|
||||
data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
|
||||
data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
|
||||
WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
|
||||
|
||||
/* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
|
||||
data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
|
||||
data &= 0x0000FFFF;
|
||||
data |= 0x00C00000;
|
||||
WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
|
||||
|
||||
/*
|
||||
* RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF (12 CUs AON),
|
||||
* programmed in gfx_v9_0_init_always_on_cu_mask()
|
||||
*/
|
||||
|
||||
/* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
|
||||
* but used for RLC_LB_CNTL configuration */
|
||||
data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
|
||||
data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
|
||||
data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
|
||||
WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
|
||||
gfx_v9_0_init_always_on_cu_mask(adev);
|
||||
}
|
||||
|
||||
static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
|
||||
|
@ -1084,8 +1190,17 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
|
|||
rv_init_cp_jump_table(adev);
|
||||
amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
|
||||
amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
|
||||
}
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_RAVEN:
|
||||
gfx_v9_0_init_lbpw(adev);
|
||||
break;
|
||||
case CHIP_VEGA20:
|
||||
gfx_v9_4_init_lbpw(adev);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1605,11 +1720,6 @@ static int gfx_v9_0_sw_init(void *handle)
|
|||
adev->gfx.mec.num_pipe_per_mec = 4;
|
||||
adev->gfx.mec.num_queue_per_pipe = 8;
|
||||
|
||||
/* KIQ event */
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_IB2_INTERRUPT_PKT, &adev->gfx.kiq.irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* EOP Event */
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
|
||||
if (r)
|
||||
|
@ -2403,7 +2513,8 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
|
|||
return r;
|
||||
}
|
||||
|
||||
if (adev->asic_type == CHIP_RAVEN) {
|
||||
if (adev->asic_type == CHIP_RAVEN ||
|
||||
adev->asic_type == CHIP_VEGA20) {
|
||||
if (amdgpu_lbpw != 0)
|
||||
gfx_v9_0_enable_lbpw(adev, true);
|
||||
else
|
||||
|
@ -3091,7 +3202,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
|
|||
struct v9_mqd *mqd = ring->mqd_ptr;
|
||||
int mqd_idx = ring - &adev->gfx.compute_ring[0];
|
||||
|
||||
if (!adev->in_gpu_reset && !adev->gfx.in_suspend) {
|
||||
if (!adev->in_gpu_reset && !adev->in_suspend) {
|
||||
memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
|
||||
((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
|
||||
((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
|
||||
|
@ -3310,7 +3421,7 @@ static int gfx_v9_0_hw_fini(void *handle)
|
|||
/* Use deinitialize sequence from CAIL when unbinding device from driver,
|
||||
* otherwise KIQ is hanging when binding back
|
||||
*/
|
||||
if (!adev->in_gpu_reset && !adev->gfx.in_suspend) {
|
||||
if (!adev->in_gpu_reset && !adev->in_suspend) {
|
||||
mutex_lock(&adev->srbm_mutex);
|
||||
soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
|
||||
adev->gfx.kiq.ring.pipe,
|
||||
|
@ -3330,20 +3441,12 @@ static int gfx_v9_0_hw_fini(void *handle)
|
|||
|
||||
static int gfx_v9_0_suspend(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
adev->gfx.in_suspend = true;
|
||||
return gfx_v9_0_hw_fini(adev);
|
||||
return gfx_v9_0_hw_fini(handle);
|
||||
}
|
||||
|
||||
static int gfx_v9_0_resume(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int r;
|
||||
|
||||
r = gfx_v9_0_hw_init(adev);
|
||||
adev->gfx.in_suspend = false;
|
||||
return r;
|
||||
return gfx_v9_0_hw_init(handle);
|
||||
}
|
||||
|
||||
static bool gfx_v9_0_is_idle(void *handle)
|
||||
|
@ -4609,68 +4712,6 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int gfx_v9_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *src,
|
||||
unsigned int type,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
uint32_t tmp, target;
|
||||
struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
|
||||
|
||||
if (ring->me == 1)
|
||||
target = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
|
||||
else
|
||||
target = SOC15_REG_OFFSET(GC, 0, mmCP_ME2_PIPE0_INT_CNTL);
|
||||
target += ring->pipe;
|
||||
|
||||
switch (type) {
|
||||
case AMDGPU_CP_KIQ_IRQ_DRIVER0:
|
||||
if (state == AMDGPU_IRQ_STATE_DISABLE) {
|
||||
tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
|
||||
tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
|
||||
GENERIC2_INT_ENABLE, 0);
|
||||
WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
|
||||
|
||||
tmp = RREG32(target);
|
||||
tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
|
||||
GENERIC2_INT_ENABLE, 0);
|
||||
WREG32(target, tmp);
|
||||
} else {
|
||||
tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
|
||||
tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
|
||||
GENERIC2_INT_ENABLE, 1);
|
||||
WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
|
||||
|
||||
tmp = RREG32(target);
|
||||
tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
|
||||
GENERIC2_INT_ENABLE, 1);
|
||||
WREG32(target, tmp);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
BUG(); /* kiq only support GENERIC2_INT now */
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gfx_v9_0_kiq_irq(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
u8 me_id, pipe_id, queue_id;
|
||||
struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
|
||||
|
||||
me_id = (entry->ring_id & 0x0c) >> 2;
|
||||
pipe_id = (entry->ring_id & 0x03) >> 0;
|
||||
queue_id = (entry->ring_id & 0x70) >> 4;
|
||||
DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n",
|
||||
me_id, pipe_id, queue_id);
|
||||
|
||||
amdgpu_fence_process(ring);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
|
||||
.name = "gfx_v9_0",
|
||||
.early_init = gfx_v9_0_early_init,
|
||||
|
@ -4819,11 +4860,6 @@ static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
|
|||
adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
|
||||
}
|
||||
|
||||
static const struct amdgpu_irq_src_funcs gfx_v9_0_kiq_irq_funcs = {
|
||||
.set = gfx_v9_0_kiq_set_interrupt_state,
|
||||
.process = gfx_v9_0_kiq_irq,
|
||||
};
|
||||
|
||||
static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
|
||||
.set = gfx_v9_0_set_eop_interrupt_state,
|
||||
.process = gfx_v9_0_eop_irq,
|
||||
|
@ -4849,9 +4885,6 @@ static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
|
|||
|
||||
adev->gfx.priv_inst_irq.num_types = 1;
|
||||
adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
|
||||
|
||||
adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
|
||||
adev->gfx.kiq.irq.funcs = &gfx_v9_0_kiq_irq_funcs;
|
||||
}
|
||||
|
||||
static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
|
||||
|
|
|
@ -306,11 +306,8 @@ static int psp_v11_0_ring_stop(struct psp_context *psp,
|
|||
enum psp_ring_type ring_type)
|
||||
{
|
||||
int ret = 0;
|
||||
struct psp_ring *ring;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
|
||||
ring = &psp->km_ring;
|
||||
|
||||
/* Write the ring destroy command to C2PMSG_64 */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_DESTROY_RINGS);
|
||||
|
||||
|
|
|
@ -504,41 +504,6 @@ static int sdma_v2_4_rlc_resume(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* sdma_v2_4_load_microcode - load the sDMA ME ucode
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Loads the sDMA0/1 ucode.
|
||||
* Returns 0 for success, -EINVAL if the ucode is not available.
|
||||
*/
|
||||
static int sdma_v2_4_load_microcode(struct amdgpu_device *adev)
|
||||
{
|
||||
const struct sdma_firmware_header_v1_0 *hdr;
|
||||
const __le32 *fw_data;
|
||||
u32 fw_size;
|
||||
int i, j;
|
||||
|
||||
/* halt the MEs */
|
||||
sdma_v2_4_enable(adev, false);
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
if (!adev->sdma.instance[i].fw)
|
||||
return -EINVAL;
|
||||
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
|
||||
amdgpu_ucode_print_sdma_hdr(&hdr->header);
|
||||
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
|
||||
fw_data = (const __le32 *)
|
||||
(adev->sdma.instance[i].fw->data +
|
||||
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
|
||||
WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
|
||||
for (j = 0; j < fw_size; j++)
|
||||
WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
|
||||
WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* sdma_v2_4_start - setup and start the async dma engines
|
||||
|
@ -552,13 +517,6 @@ static int sdma_v2_4_start(struct amdgpu_device *adev)
|
|||
{
|
||||
int r;
|
||||
|
||||
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
|
||||
r = sdma_v2_4_load_microcode(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
/* halt the engine before programing */
|
||||
sdma_v2_4_enable(adev, false);
|
||||
|
||||
|
|
|
@ -318,14 +318,13 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
|
|||
if (adev->sdma.instance[i].feature_version >= 20)
|
||||
adev->sdma.instance[i].burst_nop = true;
|
||||
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
|
||||
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
|
||||
info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
|
||||
info->fw = adev->sdma.instance[i].fw;
|
||||
header = (const struct common_firmware_header *)info->fw->data;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
|
||||
}
|
||||
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
|
||||
info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
|
||||
info->fw = adev->sdma.instance[i].fw;
|
||||
header = (const struct common_firmware_header *)info->fw->data;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
|
||||
|
||||
}
|
||||
out:
|
||||
if (err) {
|
||||
|
@ -777,42 +776,6 @@ static int sdma_v3_0_rlc_resume(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* sdma_v3_0_load_microcode - load the sDMA ME ucode
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Loads the sDMA0/1 ucode.
|
||||
* Returns 0 for success, -EINVAL if the ucode is not available.
|
||||
*/
|
||||
static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
|
||||
{
|
||||
const struct sdma_firmware_header_v1_0 *hdr;
|
||||
const __le32 *fw_data;
|
||||
u32 fw_size;
|
||||
int i, j;
|
||||
|
||||
/* halt the MEs */
|
||||
sdma_v3_0_enable(adev, false);
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
if (!adev->sdma.instance[i].fw)
|
||||
return -EINVAL;
|
||||
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
|
||||
amdgpu_ucode_print_sdma_hdr(&hdr->header);
|
||||
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
|
||||
fw_data = (const __le32 *)
|
||||
(adev->sdma.instance[i].fw->data +
|
||||
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
|
||||
WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
|
||||
for (j = 0; j < fw_size; j++)
|
||||
WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
|
||||
WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* sdma_v3_0_start - setup and start the async dma engines
|
||||
*
|
||||
|
@ -825,12 +788,6 @@ static int sdma_v3_0_start(struct amdgpu_device *adev)
|
|||
{
|
||||
int r;
|
||||
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
|
||||
r = sdma_v3_0_load_microcode(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
/* disable sdma engine before programing it */
|
||||
sdma_v3_0_ctx_switch_enable(adev, false);
|
||||
sdma_v3_0_enable(adev, false);
|
||||
|
|
|
@ -818,7 +818,7 @@ sdma_v4_1_update_power_gating(struct amdgpu_device *adev, bool enable)
|
|||
uint32_t def, data;
|
||||
|
||||
if (enable && (adev->pg_flags & AMD_PG_SUPPORT_SDMA)) {
|
||||
/* disable idle interrupt */
|
||||
/* enable idle interrupt */
|
||||
def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL));
|
||||
data |= SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK;
|
||||
|
||||
|
@ -1364,6 +1364,9 @@ static int sdma_v4_0_hw_init(void *handle)
|
|||
int r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs->set_powergating_by_smu)
|
||||
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false);
|
||||
|
||||
sdma_v4_0_init_golden_registers(adev);
|
||||
|
||||
r = sdma_v4_0_start(adev);
|
||||
|
@ -1381,6 +1384,9 @@ static int sdma_v4_0_hw_fini(void *handle)
|
|||
sdma_v4_0_ctx_switch_enable(adev, false);
|
||||
sdma_v4_0_enable(adev, false);
|
||||
|
||||
if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs->set_powergating_by_smu)
|
||||
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -2057,13 +2057,13 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
|
|||
amdgpu_device_ip_block_add(adev, &si_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
|
||||
if (adev->enable_virtual_display)
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
else
|
||||
amdgpu_device_ip_block_add(adev, &dce_v6_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
|
||||
/* amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block); */
|
||||
/* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */
|
||||
break;
|
||||
|
@ -2071,13 +2071,14 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
|
|||
amdgpu_device_ip_block_add(adev, &si_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
|
||||
if (adev->enable_virtual_display)
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
else
|
||||
amdgpu_device_ip_block_add(adev, &dce_v6_4_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
|
||||
|
||||
/* amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block); */
|
||||
/* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */
|
||||
break;
|
||||
|
@ -2085,11 +2086,11 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
|
|||
amdgpu_device_ip_block_add(adev, &si_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
|
||||
if (adev->enable_virtual_display)
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
|
|
|
@ -502,12 +502,14 @@ static int si_dma_sw_init(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/* DMA0 trap event */
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224, &adev->sdma.trap_irq);
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224,
|
||||
&adev->sdma.trap_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* DMA1 trap event */
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 244, &adev->sdma.trap_irq_1);
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 244,
|
||||
&adev->sdma.trap_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -649,17 +651,10 @@ static int si_dma_process_trap_irq(struct amdgpu_device *adev,
|
|||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
amdgpu_fence_process(&adev->sdma.instance[0].ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int si_dma_process_trap_irq_1(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
amdgpu_fence_process(&adev->sdma.instance[1].ring);
|
||||
|
||||
if (entry->src_id == 224)
|
||||
amdgpu_fence_process(&adev->sdma.instance[0].ring);
|
||||
else
|
||||
amdgpu_fence_process(&adev->sdma.instance[1].ring);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -786,11 +781,6 @@ static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = {
|
|||
.process = si_dma_process_trap_irq,
|
||||
};
|
||||
|
||||
static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs_1 = {
|
||||
.set = si_dma_set_trap_irq_state,
|
||||
.process = si_dma_process_trap_irq_1,
|
||||
};
|
||||
|
||||
static const struct amdgpu_irq_src_funcs si_dma_illegal_inst_irq_funcs = {
|
||||
.process = si_dma_process_illegal_inst_irq,
|
||||
};
|
||||
|
@ -799,7 +789,6 @@ static void si_dma_set_irq_funcs(struct amdgpu_device *adev)
|
|||
{
|
||||
adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
|
||||
adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs;
|
||||
adev->sdma.trap_irq_1.funcs = &si_dma_trap_irq_funcs_1;
|
||||
adev->sdma.illegal_inst_irq.funcs = &si_dma_illegal_inst_irq_funcs;
|
||||
}
|
||||
|
||||
|
|
|
@ -529,6 +529,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
|
|||
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
|
||||
else
|
||||
amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
|
||||
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
|
||||
|
@ -539,8 +541,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
|
|||
#else
|
||||
# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
|
||||
#endif
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
|
||||
if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) {
|
||||
amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
|
||||
|
@ -551,6 +551,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
|
|||
amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
|
||||
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
|
@ -560,8 +562,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
|
|||
#else
|
||||
# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
|
||||
#endif
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -274,7 +274,7 @@ err:
|
|||
*/
|
||||
static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
|
||||
uint32_t handle,
|
||||
bool direct, struct dma_fence **fence)
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
const unsigned ib_size_dw = 16;
|
||||
struct amdgpu_job *job;
|
||||
|
@ -310,11 +310,7 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
|
|||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||
ib->ptr[i] = 0x0;
|
||||
|
||||
if (direct)
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
else
|
||||
r = amdgpu_job_submit(job, &ring->adev->vce.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
|
@ -345,7 +341,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|||
goto error;
|
||||
}
|
||||
|
||||
r = uvd_v6_0_enc_get_destroy_msg(ring, 1, true, &fence);
|
||||
r = uvd_v6_0_enc_get_destroy_msg(ring, 1, &fence);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
|
||||
goto error;
|
||||
|
|
|
@ -280,8 +280,8 @@ err:
|
|||
*
|
||||
* Close up a stream for HW test or if userspace failed to do so
|
||||
*/
|
||||
int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
bool direct, struct dma_fence **fence)
|
||||
static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
const unsigned ib_size_dw = 16;
|
||||
struct amdgpu_job *job;
|
||||
|
@ -317,11 +317,7 @@ int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||
ib->ptr[i] = 0x0;
|
||||
|
||||
if (direct)
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
else
|
||||
r = amdgpu_job_submit(job, &ring->adev->vce.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
|
@ -352,7 +348,7 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|||
goto error;
|
||||
}
|
||||
|
||||
r = uvd_v7_0_enc_get_destroy_msg(ring, 1, true, &fence);
|
||||
r = uvd_v7_0_enc_get_destroy_msg(ring, 1, &fence);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ring->me, r);
|
||||
goto error;
|
||||
|
|
|
@ -278,6 +278,7 @@ static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
|
|||
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
|
||||
uint32_t offset;
|
||||
|
||||
/* cache window 0: fw */
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
||||
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
|
||||
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
|
||||
|
@ -297,20 +298,21 @@ static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
|
|||
|
||||
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
|
||||
|
||||
/* cache window 1: stack */
|
||||
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
|
||||
lower_32_bits(adev->vcn.gpu_addr + offset));
|
||||
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
|
||||
upper_32_bits(adev->vcn.gpu_addr + offset));
|
||||
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
|
||||
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_HEAP_SIZE);
|
||||
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
|
||||
|
||||
/* cache window 2: context */
|
||||
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
|
||||
lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE));
|
||||
lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
|
||||
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
|
||||
upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE));
|
||||
upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
|
||||
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
|
||||
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2,
|
||||
AMDGPU_VCN_STACK_SIZE + (AMDGPU_VCN_SESSION_SIZE * 40));
|
||||
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
|
||||
|
||||
WREG32_SOC15(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
|
||||
adev->gfx.config.gb_addr_config);
|
||||
|
@ -325,6 +327,7 @@ static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
|
|||
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
|
||||
uint32_t offset;
|
||||
|
||||
/* cache window 0: fw */
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
||||
WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
|
||||
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo),
|
||||
|
@ -347,24 +350,25 @@ static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
|
|||
|
||||
WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size, 0xFFFFFFFF, 0);
|
||||
|
||||
/* cache window 1: stack */
|
||||
WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
|
||||
lower_32_bits(adev->vcn.gpu_addr + offset), 0xFFFFFFFF, 0);
|
||||
WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
|
||||
upper_32_bits(adev->vcn.gpu_addr + offset), 0xFFFFFFFF, 0);
|
||||
WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0,
|
||||
0xFFFFFFFF, 0);
|
||||
WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_HEAP_SIZE,
|
||||
WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE,
|
||||
0xFFFFFFFF, 0);
|
||||
|
||||
/* cache window 2: context */
|
||||
WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
|
||||
lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE),
|
||||
lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
|
||||
0xFFFFFFFF, 0);
|
||||
WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
|
||||
upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE),
|
||||
upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
|
||||
0xFFFFFFFF, 0);
|
||||
WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0, 0xFFFFFFFF, 0);
|
||||
WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE2,
|
||||
AMDGPU_VCN_STACK_SIZE + (AMDGPU_VCN_SESSION_SIZE * 40),
|
||||
WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE,
|
||||
0xFFFFFFFF, 0);
|
||||
|
||||
WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
|
||||
|
@ -601,8 +605,6 @@ static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t s
|
|||
reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
|
||||
reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
|
||||
reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
|
||||
WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel);
|
||||
|
||||
reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
|
||||
UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
|
||||
UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
|
||||
|
@ -812,12 +814,12 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
|
|||
|
||||
for (j = 0; j < 100; ++j) {
|
||||
status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
|
||||
if (status & 2)
|
||||
if (status & UVD_STATUS__IDLE)
|
||||
break;
|
||||
mdelay(10);
|
||||
}
|
||||
r = 0;
|
||||
if (status & 2)
|
||||
if (status & UVD_STATUS__IDLE)
|
||||
break;
|
||||
|
||||
DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
|
||||
|
@ -875,6 +877,8 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
|
|||
/* Initialize the ring buffer's read and write pointers */
|
||||
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
|
||||
|
||||
WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
|
||||
|
||||
ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
|
||||
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
|
||||
lower_32_bits(ring->wptr));
|
||||
|
@ -898,12 +902,13 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
|
|||
|
||||
ring = &adev->vcn.ring_jpeg;
|
||||
WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
|
||||
WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
|
||||
WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
|
||||
UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
|
||||
WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr));
|
||||
WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr));
|
||||
WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0);
|
||||
WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0);
|
||||
WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
|
||||
WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
|
||||
|
||||
/* initialize wptr */
|
||||
ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
|
||||
|
@ -1051,6 +1056,8 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
|
|||
/* Initialize the ring buffer's read and write pointers */
|
||||
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
|
||||
|
||||
WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
|
||||
|
||||
ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
|
||||
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
|
||||
lower_32_bits(ring->wptr));
|
||||
|
@ -1120,8 +1127,9 @@ static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev)
|
|||
{
|
||||
int ret_code;
|
||||
|
||||
/* Wait for power status to be 1 */
|
||||
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 0x1,
|
||||
/* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */
|
||||
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
|
||||
UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
|
||||
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
|
||||
|
||||
/* disable dynamic power gating mode */
|
||||
|
@ -1147,7 +1155,7 @@ static bool vcn_v1_0_is_idle(void *handle)
|
|||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == 0x2);
|
||||
return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
|
||||
}
|
||||
|
||||
static int vcn_v1_0_wait_for_idle(void *handle)
|
||||
|
@ -1155,7 +1163,8 @@ static int vcn_v1_0_wait_for_idle(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int ret = 0;
|
||||
|
||||
SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, 0x2, 0x2, ret);
|
||||
SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE,
|
||||
UVD_STATUS__IDLE, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1217,6 +1226,10 @@ static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
|
|||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
|
||||
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
|
||||
WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2,
|
||||
lower_32_bits(ring->wptr) | 0x80000000);
|
||||
|
||||
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
|
||||
}
|
||||
|
||||
|
|
|
@ -1596,16 +1596,18 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
|
|||
amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
|
||||
if (adev->enable_virtual_display)
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block);
|
||||
break;
|
||||
case CHIP_FIJI:
|
||||
amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
|
||||
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
|
@ -1615,8 +1617,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
|
|||
#endif
|
||||
else
|
||||
amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
|
||||
|
@ -1626,6 +1626,8 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
|
|||
amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
|
||||
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
|
@ -1635,8 +1637,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
|
|||
#endif
|
||||
else
|
||||
amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
|
||||
|
@ -1649,6 +1649,8 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
|
|||
amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
|
||||
if (adev->enable_virtual_display)
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
|
@ -1658,8 +1660,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
|
|||
#endif
|
||||
else
|
||||
amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
|
||||
break;
|
||||
|
@ -1667,6 +1667,8 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
|
|||
amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
|
||||
if (adev->enable_virtual_display)
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
|
@ -1676,8 +1678,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
|
|||
#endif
|
||||
else
|
||||
amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block);
|
||||
#if defined(CONFIG_DRM_AMD_ACP)
|
||||
|
@ -1688,6 +1688,8 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
|
|||
amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
|
||||
if (adev->enable_virtual_display)
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
|
@ -1697,8 +1699,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
|
|||
#endif
|
||||
else
|
||||
amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
|
||||
#if defined(CONFIG_DRM_AMD_ACP)
|
||||
|
|
|
@ -358,8 +358,8 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
|
|||
struct queue *q,
|
||||
struct qcm_process_device *qpd)
|
||||
{
|
||||
int retval;
|
||||
struct mqd_manager *mqd_mgr;
|
||||
int retval;
|
||||
|
||||
mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
|
||||
if (!mqd_mgr)
|
||||
|
@ -387,8 +387,12 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
|
|||
if (!q->properties.is_active)
|
||||
return 0;
|
||||
|
||||
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
|
||||
&q->properties, q->process->mm);
|
||||
if (WARN(q->process->mm != current->mm,
|
||||
"should only run in user thread"))
|
||||
retval = -EFAULT;
|
||||
else
|
||||
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
|
||||
&q->properties, current->mm);
|
||||
if (retval)
|
||||
goto out_uninit_mqd;
|
||||
|
||||
|
@ -545,9 +549,15 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
|
|||
retval = map_queues_cpsch(dqm);
|
||||
else if (q->properties.is_active &&
|
||||
(q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
|
||||
q->properties.type == KFD_QUEUE_TYPE_SDMA))
|
||||
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
|
||||
&q->properties, q->process->mm);
|
||||
q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
|
||||
if (WARN(q->process->mm != current->mm,
|
||||
"should only run in user thread"))
|
||||
retval = -EFAULT;
|
||||
else
|
||||
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
|
||||
q->pipe, q->queue,
|
||||
&q->properties, current->mm);
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
dqm_unlock(dqm);
|
||||
|
@ -653,6 +663,7 @@ out:
|
|||
static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
|
||||
struct qcm_process_device *qpd)
|
||||
{
|
||||
struct mm_struct *mm = NULL;
|
||||
struct queue *q;
|
||||
struct mqd_manager *mqd_mgr;
|
||||
struct kfd_process_device *pdd;
|
||||
|
@ -686,6 +697,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
|
|||
kfd_flush_tlb(pdd);
|
||||
}
|
||||
|
||||
/* Take a safe reference to the mm_struct, which may otherwise
|
||||
* disappear even while the kfd_process is still referenced.
|
||||
*/
|
||||
mm = get_task_mm(pdd->process->lead_thread);
|
||||
if (!mm) {
|
||||
retval = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* activate all active queues on the qpd */
|
||||
list_for_each_entry(q, &qpd->queues_list, list) {
|
||||
if (!q->properties.is_evicted)
|
||||
|
@ -700,14 +720,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
|
|||
q->properties.is_evicted = false;
|
||||
q->properties.is_active = true;
|
||||
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
|
||||
q->queue, &q->properties,
|
||||
q->process->mm);
|
||||
q->queue, &q->properties, mm);
|
||||
if (retval)
|
||||
goto out;
|
||||
dqm->queue_count++;
|
||||
}
|
||||
qpd->evicted = 0;
|
||||
out:
|
||||
if (mm)
|
||||
mmput(mm);
|
||||
dqm_unlock(dqm);
|
||||
return retval;
|
||||
}
|
||||
|
@ -1343,9 +1364,6 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
|
|||
{
|
||||
int retval;
|
||||
struct mqd_manager *mqd_mgr;
|
||||
bool preempt_all_queues;
|
||||
|
||||
preempt_all_queues = false;
|
||||
|
||||
retval = 0;
|
||||
|
||||
|
|
|
@ -4740,12 +4740,18 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
|||
}
|
||||
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
|
||||
|
||||
/* Signal HW programming completion */
|
||||
drm_atomic_helper_commit_hw_done(state);
|
||||
|
||||
if (wait_for_vblank)
|
||||
drm_atomic_helper_wait_for_flip_done(dev, state);
|
||||
|
||||
/*
|
||||
* FIXME:
|
||||
* Delay hw_done() until flip_done() is signaled. This is to block
|
||||
* another commit from freeing the CRTC state while we're still
|
||||
* waiting on flip_done.
|
||||
*/
|
||||
drm_atomic_helper_commit_hw_done(state);
|
||||
|
||||
drm_atomic_helper_cleanup_planes(dev, state);
|
||||
|
||||
/*
|
||||
|
|
|
@ -36,17 +36,13 @@
|
|||
* Private declarations.
|
||||
*****************************************************************************/
|
||||
|
||||
struct handler_common_data {
|
||||
struct amdgpu_dm_irq_handler_data {
|
||||
struct list_head list;
|
||||
interrupt_handler handler;
|
||||
void *handler_arg;
|
||||
|
||||
/* DM which this handler belongs to */
|
||||
struct amdgpu_display_manager *dm;
|
||||
};
|
||||
|
||||
struct amdgpu_dm_irq_handler_data {
|
||||
struct handler_common_data hcd;
|
||||
/* DAL irq source which registered for this interrupt. */
|
||||
enum dc_irq_source irq_source;
|
||||
};
|
||||
|
@ -61,7 +57,7 @@ struct amdgpu_dm_irq_handler_data {
|
|||
* Private functions.
|
||||
*****************************************************************************/
|
||||
|
||||
static void init_handler_common_data(struct handler_common_data *hcd,
|
||||
static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
|
||||
void (*ih)(void *),
|
||||
void *args,
|
||||
struct amdgpu_display_manager *dm)
|
||||
|
@ -85,11 +81,9 @@ static void dm_irq_work_func(struct work_struct *work)
|
|||
struct amdgpu_dm_irq_handler_data *handler_data;
|
||||
|
||||
list_for_each(entry, handler_list) {
|
||||
handler_data =
|
||||
list_entry(
|
||||
entry,
|
||||
struct amdgpu_dm_irq_handler_data,
|
||||
hcd.list);
|
||||
handler_data = list_entry(entry,
|
||||
struct amdgpu_dm_irq_handler_data,
|
||||
list);
|
||||
|
||||
DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
|
||||
handler_data->irq_source);
|
||||
|
@ -97,7 +91,7 @@ static void dm_irq_work_func(struct work_struct *work)
|
|||
DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n",
|
||||
handler_data->irq_source);
|
||||
|
||||
handler_data->hcd.handler(handler_data->hcd.handler_arg);
|
||||
handler_data->handler(handler_data->handler_arg);
|
||||
}
|
||||
|
||||
/* Call a DAL subcomponent which registered for interrupt notification
|
||||
|
@ -137,11 +131,11 @@ static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
|
|||
list_for_each_safe(entry, tmp, hnd_list) {
|
||||
|
||||
handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
|
||||
hcd.list);
|
||||
list);
|
||||
|
||||
if (ih == handler) {
|
||||
/* Found our handler. Remove it from the list. */
|
||||
list_del(&handler->hcd.list);
|
||||
list_del(&handler->list);
|
||||
handler_removed = true;
|
||||
break;
|
||||
}
|
||||
|
@ -230,8 +224,7 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
|
|||
|
||||
memset(handler_data, 0, sizeof(*handler_data));
|
||||
|
||||
init_handler_common_data(&handler_data->hcd, ih, handler_args,
|
||||
&adev->dm);
|
||||
init_handler_common_data(handler_data, ih, handler_args, &adev->dm);
|
||||
|
||||
irq_source = int_params->irq_source;
|
||||
|
||||
|
@ -250,7 +243,7 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
|
|||
break;
|
||||
}
|
||||
|
||||
list_add_tail(&handler_data->hcd.list, hnd_list);
|
||||
list_add_tail(&handler_data->list, hnd_list);
|
||||
|
||||
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
|
||||
|
||||
|
@ -462,15 +455,13 @@ static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
|
|||
entry,
|
||||
&adev->dm.irq_handler_list_high_tab[irq_source]) {
|
||||
|
||||
handler_data =
|
||||
list_entry(
|
||||
entry,
|
||||
struct amdgpu_dm_irq_handler_data,
|
||||
hcd.list);
|
||||
handler_data = list_entry(entry,
|
||||
struct amdgpu_dm_irq_handler_data,
|
||||
list);
|
||||
|
||||
/* Call a subcomponent which registered for immediate
|
||||
* interrupt notification */
|
||||
handler_data->hcd.handler(handler_data->hcd.handler_arg);
|
||||
handler_data->handler(handler_data->handler_arg);
|
||||
}
|
||||
|
||||
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
|
||||
|
|
|
@ -60,6 +60,7 @@
|
|||
#define DC_LOGGER \
|
||||
dc->ctx->logger
|
||||
|
||||
const static char DC_BUILD_ID[] = "production-build";
|
||||
|
||||
/*******************************************************************************
|
||||
* Private functions
|
||||
|
@ -758,6 +759,8 @@ struct dc *dc_create(const struct dc_init_data *init_params)
|
|||
|
||||
dc->config = init_params->flags;
|
||||
|
||||
dc->build_id = DC_BUILD_ID;
|
||||
|
||||
DC_LOG_DC("Display Core initialized\n");
|
||||
|
||||
|
||||
|
@ -1110,32 +1113,6 @@ static bool is_surface_in_context(
|
|||
return false;
|
||||
}
|
||||
|
||||
static unsigned int pixel_format_to_bpp(enum surface_pixel_format format)
|
||||
{
|
||||
switch (format) {
|
||||
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
|
||||
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
|
||||
return 12;
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
|
||||
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
|
||||
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
|
||||
return 16;
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
|
||||
return 32;
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
|
||||
return 64;
|
||||
default:
|
||||
ASSERT_CRITICAL(false);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
|
||||
{
|
||||
union surface_update_flags *update_flags = &u->surface->update_flags;
|
||||
|
@ -1169,8 +1146,8 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
|
|||
|| u->plane_info->dcc.grph.meta_pitch != u->surface->dcc.grph.meta_pitch)
|
||||
update_flags->bits.dcc_change = 1;
|
||||
|
||||
if (pixel_format_to_bpp(u->plane_info->format) !=
|
||||
pixel_format_to_bpp(u->surface->format))
|
||||
if (resource_pixel_format_to_bpp(u->plane_info->format) !=
|
||||
resource_pixel_format_to_bpp(u->surface->format))
|
||||
/* different bytes per element will require full bandwidth
|
||||
* and DML calculation
|
||||
*/
|
||||
|
@ -1859,3 +1836,16 @@ void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
|
||||
{
|
||||
info->displayClock = (unsigned int)state->bw.dcn.clk.dispclk_khz;
|
||||
info->engineClock = (unsigned int)state->bw.dcn.clk.dcfclk_khz;
|
||||
info->memoryClock = (unsigned int)state->bw.dcn.clk.dramclk_khz;
|
||||
info->maxSupportedDppClock = (unsigned int)state->bw.dcn.clk.max_supported_dppclk_khz;
|
||||
info->dppClock = (unsigned int)state->bw.dcn.clk.dppclk_khz;
|
||||
info->socClock = (unsigned int)state->bw.dcn.clk.socclk_khz;
|
||||
info->dcfClockDeepSleep = (unsigned int)state->bw.dcn.clk.dcfclk_deep_sleep_khz;
|
||||
info->fClock = (unsigned int)state->bw.dcn.clk.fclk_khz;
|
||||
info->phyClock = (unsigned int)state->bw.dcn.clk.phyclk_khz;
|
||||
}
|
|
@ -1975,6 +1975,9 @@ static void calculate_phy_pix_clks(struct dc_stream_state *stream)
|
|||
else
|
||||
stream->phy_pix_clk =
|
||||
stream->timing.pix_clk_khz;
|
||||
|
||||
if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
|
||||
stream->phy_pix_clk *= 2;
|
||||
}
|
||||
|
||||
enum dc_status resource_map_pool_resources(
|
||||
|
@ -2096,6 +2099,14 @@ enum dc_status dc_validate_global_state(
|
|||
if (pipe_ctx->stream != stream)
|
||||
continue;
|
||||
|
||||
if (dc->res_pool->funcs->get_default_swizzle_mode &&
|
||||
pipe_ctx->plane_state &&
|
||||
pipe_ctx->plane_state->tiling_info.gfx9.swizzle == DC_SW_UNKNOWN) {
|
||||
result = dc->res_pool->funcs->get_default_swizzle_mode(pipe_ctx->plane_state);
|
||||
if (result != DC_OK)
|
||||
return result;
|
||||
}
|
||||
|
||||
/* Switch to dp clock source only if there is
|
||||
* no non dp stream that shares the same timing
|
||||
* with the dp stream.
|
||||
|
@ -2885,3 +2896,32 @@ enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *pla
|
|||
|
||||
return res;
|
||||
}
|
||||
|
||||
unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format)
|
||||
{
|
||||
switch (format) {
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
|
||||
return 8;
|
||||
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
|
||||
case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
|
||||
return 12;
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
|
||||
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
|
||||
case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
|
||||
return 16;
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
|
||||
return 32;
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
|
||||
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
|
||||
return 64;
|
||||
default:
|
||||
ASSERT_CRITICAL(false);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,13 +38,12 @@
|
|||
#include "inc/compressor.h"
|
||||
#include "dml/display_mode_lib.h"
|
||||
|
||||
#define DC_VER "3.1.67"
|
||||
#define DC_VER "3.1.68"
|
||||
|
||||
#define MAX_SURFACES 3
|
||||
#define MAX_STREAMS 6
|
||||
#define MAX_SINKS_PER_LINK 4
|
||||
|
||||
|
||||
/*******************************************************************************
|
||||
* Display Core Interfaces
|
||||
******************************************************************************/
|
||||
|
@ -208,6 +207,7 @@ struct dc_clocks {
|
|||
int dcfclk_deep_sleep_khz;
|
||||
int fclk_khz;
|
||||
int phyclk_khz;
|
||||
int dramclk_khz;
|
||||
};
|
||||
|
||||
struct dc_debug_options {
|
||||
|
@ -315,6 +315,8 @@ struct dc {
|
|||
struct compressor *fbc_compressor;
|
||||
|
||||
struct dc_debug_data debug_data;
|
||||
|
||||
const char *build_id;
|
||||
};
|
||||
|
||||
enum frame_buffer_mode {
|
||||
|
@ -599,6 +601,8 @@ struct dc_validation_set {
|
|||
|
||||
enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *plane_state);
|
||||
|
||||
void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info);
|
||||
|
||||
enum dc_status dc_validate_global_state(
|
||||
struct dc *dc,
|
||||
struct dc_state *new_ctx);
|
||||
|
|
|
@ -289,7 +289,8 @@ enum swizzle_mode_values {
|
|||
DC_SW_VAR_S_X = 29,
|
||||
DC_SW_VAR_D_X = 30,
|
||||
DC_SW_VAR_R_X = 31,
|
||||
DC_SW_MAX
|
||||
DC_SW_MAX = 32,
|
||||
DC_SW_UNKNOWN = DC_SW_MAX
|
||||
};
|
||||
|
||||
union dc_tiling_info {
|
||||
|
|
|
@ -659,4 +659,16 @@ enum i2c_mot_mode {
|
|||
I2C_MOT_FALSE
|
||||
};
|
||||
|
||||
struct AsicStateEx {
|
||||
unsigned int memoryClock;
|
||||
unsigned int displayClock;
|
||||
unsigned int engineClock;
|
||||
unsigned int maxSupportedDppClock;
|
||||
unsigned int dppClock;
|
||||
unsigned int socClock;
|
||||
unsigned int dcfClockDeepSleep;
|
||||
unsigned int fClock;
|
||||
unsigned int phyClock;
|
||||
};
|
||||
|
||||
#endif /* DC_TYPES_H_ */
|
||||
|
|
|
@ -312,7 +312,7 @@ static void process_channel_reply(
|
|||
|
||||
/* in case HPD is LOW, exit AUX transaction */
|
||||
if ((sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
|
||||
reply->status = AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
|
||||
reply->status = AUX_TRANSACTION_REPLY_HPD_DISCON;
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -664,6 +664,11 @@ static void dce_update_clocks(struct dccg *dccg,
|
|||
bool safe_to_lower)
|
||||
{
|
||||
struct dm_pp_power_level_change_request level_change_req;
|
||||
struct dce_dccg *clk_dce = TO_DCE_CLOCKS(dccg);
|
||||
|
||||
/* TODO: Investigate why this is needed to fix display corruption. */
|
||||
if (!clk_dce->dfs_bypass_active)
|
||||
new_clocks->dispclk_khz = new_clocks->dispclk_khz * 115 / 100;
|
||||
|
||||
level_change_req.power_level = dce_get_required_clocks_state(dccg, new_clocks);
|
||||
/* get max clock state from PPLIB */
|
||||
|
|
|
@ -662,21 +662,10 @@ bool dce110_link_encoder_validate_dp_output(
|
|||
const struct dce110_link_encoder *enc110,
|
||||
const struct dc_crtc_timing *crtc_timing)
|
||||
{
|
||||
/* default RGB only */
|
||||
if (crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
|
||||
return true;
|
||||
if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
|
||||
return false;
|
||||
|
||||
if (enc110->base.features.flags.bits.IS_YCBCR_CAPABLE)
|
||||
return true;
|
||||
|
||||
/* for DCE 8.x or later DP Y-only feature,
|
||||
* we need ASIC cap + FeatureSupportDPYonly, not support 666 */
|
||||
if (crtc_timing->flags.Y_ONLY &&
|
||||
enc110->base.features.flags.bits.IS_YCBCR_CAPABLE &&
|
||||
crtc_timing->display_color_depth != COLOR_DEPTH_666)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
void dce110_link_encoder_construct(
|
||||
|
|
|
@ -551,8 +551,7 @@ static const struct encoder_feature_support link_enc_feature = {
|
|||
.max_hdmi_deep_color = COLOR_DEPTH_121212,
|
||||
.max_hdmi_pixel_clock = 300000,
|
||||
.flags.bits.IS_HBR2_CAPABLE = true,
|
||||
.flags.bits.IS_TPS3_CAPABLE = true,
|
||||
.flags.bits.IS_YCBCR_CAPABLE = true
|
||||
.flags.bits.IS_TPS3_CAPABLE = true
|
||||
};
|
||||
|
||||
struct link_encoder *dce100_link_encoder_create(
|
||||
|
@ -690,7 +689,9 @@ static void destruct(struct dce110_resource_pool *pool)
|
|||
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
|
||||
pool->base.timing_generators[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
|
||||
if (pool->base.engines[i] != NULL)
|
||||
dce110_engine_destroy(&pool->base.engines[i]);
|
||||
if (pool->base.hw_i2cs[i] != NULL) {
|
||||
|
|
|
@ -570,8 +570,7 @@ static const struct encoder_feature_support link_enc_feature = {
|
|||
.max_hdmi_deep_color = COLOR_DEPTH_121212,
|
||||
.max_hdmi_pixel_clock = 594000,
|
||||
.flags.bits.IS_HBR2_CAPABLE = true,
|
||||
.flags.bits.IS_TPS3_CAPABLE = true,
|
||||
.flags.bits.IS_YCBCR_CAPABLE = true
|
||||
.flags.bits.IS_TPS3_CAPABLE = true
|
||||
};
|
||||
|
||||
static struct link_encoder *dce110_link_encoder_create(
|
||||
|
@ -720,7 +719,9 @@ static void destruct(struct dce110_resource_pool *pool)
|
|||
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
|
||||
pool->base.timing_generators[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
|
||||
if (pool->base.engines[i] != NULL)
|
||||
dce110_engine_destroy(&pool->base.engines[i]);
|
||||
if (pool->base.hw_i2cs[i] != NULL) {
|
||||
|
|
|
@ -555,8 +555,7 @@ static const struct encoder_feature_support link_enc_feature = {
|
|||
.flags.bits.IS_HBR2_CAPABLE = true,
|
||||
.flags.bits.IS_HBR3_CAPABLE = true,
|
||||
.flags.bits.IS_TPS3_CAPABLE = true,
|
||||
.flags.bits.IS_TPS4_CAPABLE = true,
|
||||
.flags.bits.IS_YCBCR_CAPABLE = true
|
||||
.flags.bits.IS_TPS4_CAPABLE = true
|
||||
};
|
||||
|
||||
struct link_encoder *dce112_link_encoder_create(
|
||||
|
@ -694,9 +693,6 @@ static void destruct(struct dce110_resource_pool *pool)
|
|||
if (pool->base.opps[i] != NULL)
|
||||
dce110_opp_destroy(&pool->base.opps[i]);
|
||||
|
||||
if (pool->base.engines[i] != NULL)
|
||||
dce110_engine_destroy(&pool->base.engines[i]);
|
||||
|
||||
if (pool->base.transforms[i] != NULL)
|
||||
dce112_transform_destroy(&pool->base.transforms[i]);
|
||||
|
||||
|
@ -712,6 +708,11 @@ static void destruct(struct dce110_resource_pool *pool)
|
|||
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
|
||||
pool->base.timing_generators[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
|
||||
if (pool->base.engines[i] != NULL)
|
||||
dce110_engine_destroy(&pool->base.engines[i]);
|
||||
if (pool->base.hw_i2cs[i] != NULL) {
|
||||
kfree(pool->base.hw_i2cs[i]);
|
||||
pool->base.hw_i2cs[i] = NULL;
|
||||
|
|
|
@ -533,7 +533,9 @@ static void destruct(struct dce110_resource_pool *pool)
|
|||
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
|
||||
pool->base.timing_generators[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
|
||||
if (pool->base.engines[i] != NULL)
|
||||
dce110_engine_destroy(&pool->base.engines[i]);
|
||||
if (pool->base.hw_i2cs[i] != NULL) {
|
||||
|
@ -609,7 +611,6 @@ static const struct encoder_feature_support link_enc_feature = {
|
|||
.flags.bits.IS_HBR3_CAPABLE = true,
|
||||
.flags.bits.IS_TPS3_CAPABLE = true,
|
||||
.flags.bits.IS_TPS4_CAPABLE = true,
|
||||
.flags.bits.IS_YCBCR_CAPABLE = true
|
||||
};
|
||||
|
||||
static struct link_encoder *dce120_link_encoder_create(
|
||||
|
|
|
@ -650,8 +650,7 @@ static const struct encoder_feature_support link_enc_feature = {
|
|||
.max_hdmi_deep_color = COLOR_DEPTH_121212,
|
||||
.max_hdmi_pixel_clock = 297000,
|
||||
.flags.bits.IS_HBR2_CAPABLE = true,
|
||||
.flags.bits.IS_TPS3_CAPABLE = true,
|
||||
.flags.bits.IS_YCBCR_CAPABLE = true
|
||||
.flags.bits.IS_TPS3_CAPABLE = true
|
||||
};
|
||||
|
||||
struct link_encoder *dce80_link_encoder_create(
|
||||
|
@ -739,7 +738,9 @@ static void destruct(struct dce110_resource_pool *pool)
|
|||
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
|
||||
pool->base.timing_generators[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
|
||||
if (pool->base.engines[i] != NULL)
|
||||
dce110_engine_destroy(&pool->base.engines[i]);
|
||||
if (pool->base.hw_i2cs[i] != NULL) {
|
||||
|
|
|
@ -87,6 +87,23 @@ void hubbub1_wm_read_state(struct hubbub *hubbub,
|
|||
s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
|
||||
}
|
||||
|
||||
void hubbub1_disable_allow_self_refresh(struct hubbub *hubbub)
|
||||
{
|
||||
REG_UPDATE(DCHUBBUB_ARB_DRAM_STATE_CNTL,
|
||||
DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, 0);
|
||||
}
|
||||
|
||||
bool hububu1_is_allow_self_refresh_enabled(struct hubbub *hubbub)
|
||||
{
|
||||
uint32_t enable = 0;
|
||||
|
||||
REG_GET(DCHUBBUB_ARB_DRAM_STATE_CNTL,
|
||||
DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, &enable);
|
||||
|
||||
return true ? false : enable;
|
||||
}
|
||||
|
||||
|
||||
bool hubbub1_verify_allow_pstate_change_high(
|
||||
struct hubbub *hubbub)
|
||||
{
|
||||
|
@ -116,7 +133,43 @@ bool hubbub1_verify_allow_pstate_change_high(
|
|||
forced_pstate_allow = false;
|
||||
}
|
||||
|
||||
/* RV1:
|
||||
/* RV2:
|
||||
* dchubbubdebugind, at: 0xB
|
||||
* description
|
||||
* 0: Pipe0 Plane0 Allow Pstate Change
|
||||
* 1: Pipe0 Plane1 Allow Pstate Change
|
||||
* 2: Pipe0 Cursor0 Allow Pstate Change
|
||||
* 3: Pipe0 Cursor1 Allow Pstate Change
|
||||
* 4: Pipe1 Plane0 Allow Pstate Change
|
||||
* 5: Pipe1 Plane1 Allow Pstate Change
|
||||
* 6: Pipe1 Cursor0 Allow Pstate Change
|
||||
* 7: Pipe1 Cursor1 Allow Pstate Change
|
||||
* 8: Pipe2 Plane0 Allow Pstate Change
|
||||
* 9: Pipe2 Plane1 Allow Pstate Change
|
||||
* 10: Pipe2 Cursor0 Allow Pstate Change
|
||||
* 11: Pipe2 Cursor1 Allow Pstate Change
|
||||
* 12: Pipe3 Plane0 Allow Pstate Change
|
||||
* 13: Pipe3 Plane1 Allow Pstate Change
|
||||
* 14: Pipe3 Cursor0 Allow Pstate Change
|
||||
* 15: Pipe3 Cursor1 Allow Pstate Change
|
||||
* 16: Pipe4 Plane0 Allow Pstate Change
|
||||
* 17: Pipe4 Plane1 Allow Pstate Change
|
||||
* 18: Pipe4 Cursor0 Allow Pstate Change
|
||||
* 19: Pipe4 Cursor1 Allow Pstate Change
|
||||
* 20: Pipe5 Plane0 Allow Pstate Change
|
||||
* 21: Pipe5 Plane1 Allow Pstate Change
|
||||
* 22: Pipe5 Cursor0 Allow Pstate Change
|
||||
* 23: Pipe5 Cursor1 Allow Pstate Change
|
||||
* 24: Pipe6 Plane0 Allow Pstate Change
|
||||
* 25: Pipe6 Plane1 Allow Pstate Change
|
||||
* 26: Pipe6 Cursor0 Allow Pstate Change
|
||||
* 27: Pipe6 Cursor1 Allow Pstate Change
|
||||
* 28: WB0 Allow Pstate Change
|
||||
* 29: WB1 Allow Pstate Change
|
||||
* 30: Arbiter's allow_pstate_change
|
||||
* 31: SOC pstate change request"
|
||||
*
|
||||
* RV1:
|
||||
* dchubbubdebugind, at: 0x7
|
||||
* description "3-0: Pipe0 cursor0 QOS
|
||||
* 7-4: Pipe1 cursor0 QOS
|
||||
|
@ -140,7 +193,6 @@ bool hubbub1_verify_allow_pstate_change_high(
|
|||
* 31: SOC pstate change request
|
||||
*/
|
||||
|
||||
|
||||
REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub->debug_test_index_pstate);
|
||||
|
||||
for (i = 0; i < pstate_wait_timeout_us; i++) {
|
||||
|
@ -802,5 +854,9 @@ void hubbub1_construct(struct hubbub *hubbub,
|
|||
hubbub->masks = hubbub_mask;
|
||||
|
||||
hubbub->debug_test_index_pstate = 0x7;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
|
||||
if (ctx->dce_version == DCN_VERSION_1_01)
|
||||
hubbub->debug_test_index_pstate = 0xB;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -203,6 +203,10 @@ void hubbub1_program_watermarks(
|
|||
unsigned int refclk_mhz,
|
||||
bool safe_to_lower);
|
||||
|
||||
void hubbub1_disable_allow_self_refresh(struct hubbub *hubbub);
|
||||
|
||||
bool hububu1_is_allow_self_refresh_enabled(struct hubbub *hubub);
|
||||
|
||||
void hubbub1_toggle_watermark_change_req(
|
||||
struct hubbub *hubbub);
|
||||
|
||||
|
|
|
@ -997,7 +997,21 @@ static void dcn10_init_hw(struct dc *dc)
|
|||
} else {
|
||||
|
||||
if (!dcb->funcs->is_accelerated_mode(dcb)) {
|
||||
bool allow_self_fresh_force_enable =
|
||||
hububu1_is_allow_self_refresh_enabled(dc->res_pool->hubbub);
|
||||
|
||||
bios_golden_init(dc);
|
||||
|
||||
/* WA for making DF sleep when idle after resume from S0i3.
|
||||
* DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
|
||||
* command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
|
||||
* before calling command table and it changed to 1 after,
|
||||
* it should be set back to 0.
|
||||
*/
|
||||
if (allow_self_fresh_force_enable == false &&
|
||||
hububu1_is_allow_self_refresh_enabled(dc->res_pool->hubbub))
|
||||
hubbub1_disable_allow_self_refresh(dc->res_pool->hubbub);
|
||||
|
||||
disable_vga(dc->hwseq);
|
||||
}
|
||||
|
||||
|
|
|
@ -606,22 +606,10 @@ bool dcn10_link_encoder_validate_dp_output(
|
|||
const struct dcn10_link_encoder *enc10,
|
||||
const struct dc_crtc_timing *crtc_timing)
|
||||
{
|
||||
/* default RGB only */
|
||||
if (crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
|
||||
return true;
|
||||
if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
|
||||
return false;
|
||||
|
||||
if (enc10->base.features.flags.bits.IS_YCBCR_CAPABLE)
|
||||
return true;
|
||||
|
||||
/* for DCE 8.x or later DP Y-only feature,
|
||||
* we need ASIC cap + FeatureSupportDPYonly, not support 666
|
||||
*/
|
||||
if (crtc_timing->flags.Y_ONLY &&
|
||||
enc10->base.features.flags.bits.IS_YCBCR_CAPABLE &&
|
||||
crtc_timing->display_color_depth != COLOR_DEPTH_666)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
void dcn10_link_encoder_construct(
|
||||
|
|
|
@ -98,7 +98,6 @@ static uint32_t get_start_vline(struct timing_generator *optc, const struct dc_c
|
|||
struct dc_crtc_timing patched_crtc_timing;
|
||||
int vesa_sync_start;
|
||||
int asic_blank_end;
|
||||
int interlace_factor;
|
||||
int vertical_line_start;
|
||||
|
||||
patched_crtc_timing = *dc_crtc_timing;
|
||||
|
@ -112,16 +111,13 @@ static uint32_t get_start_vline(struct timing_generator *optc, const struct dc_c
|
|||
vesa_sync_start -
|
||||
patched_crtc_timing.h_border_left;
|
||||
|
||||
interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
|
||||
|
||||
vesa_sync_start = patched_crtc_timing.v_addressable +
|
||||
patched_crtc_timing.v_border_bottom +
|
||||
patched_crtc_timing.v_front_porch;
|
||||
|
||||
asic_blank_end = (patched_crtc_timing.v_total -
|
||||
vesa_sync_start -
|
||||
patched_crtc_timing.v_border_top)
|
||||
* interlace_factor;
|
||||
patched_crtc_timing.v_border_top);
|
||||
|
||||
vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1;
|
||||
if (vertical_line_start < 0) {
|
||||
|
@ -154,7 +150,7 @@ void optc1_program_vline_interrupt(
|
|||
req_delta_lines--;
|
||||
|
||||
if (req_delta_lines > vsync_line)
|
||||
start_line = dc_crtc_timing->v_total - (req_delta_lines - vsync_line) - 1;
|
||||
start_line = dc_crtc_timing->v_total - (req_delta_lines - vsync_line) + 2;
|
||||
else
|
||||
start_line = vsync_line - req_delta_lines;
|
||||
|
||||
|
@ -186,7 +182,6 @@ void optc1_program_timing(
|
|||
uint32_t v_sync_end;
|
||||
uint32_t v_init, v_fp2;
|
||||
uint32_t h_sync_polarity, v_sync_polarity;
|
||||
uint32_t interlace_factor;
|
||||
uint32_t start_point = 0;
|
||||
uint32_t field_num = 0;
|
||||
uint32_t h_div_2;
|
||||
|
@ -237,16 +232,8 @@ void optc1_program_timing(
|
|||
REG_UPDATE(OTG_H_SYNC_A_CNTL,
|
||||
OTG_H_SYNC_A_POL, h_sync_polarity);
|
||||
|
||||
/* Load vertical timing */
|
||||
v_total = patched_crtc_timing.v_total - 1;
|
||||
|
||||
/* CRTC_V_TOTAL = v_total - 1 */
|
||||
if (patched_crtc_timing.flags.INTERLACE) {
|
||||
interlace_factor = 2;
|
||||
v_total = 2 * patched_crtc_timing.v_total;
|
||||
} else {
|
||||
interlace_factor = 1;
|
||||
v_total = patched_crtc_timing.v_total - 1;
|
||||
}
|
||||
REG_SET(OTG_V_TOTAL, 0,
|
||||
OTG_V_TOTAL, v_total);
|
||||
|
||||
|
@ -259,7 +246,7 @@ void optc1_program_timing(
|
|||
OTG_V_TOTAL_MIN, v_total);
|
||||
|
||||
/* v_sync_start = 0, v_sync_end = v_sync_width */
|
||||
v_sync_end = patched_crtc_timing.v_sync_width * interlace_factor;
|
||||
v_sync_end = patched_crtc_timing.v_sync_width;
|
||||
|
||||
REG_UPDATE_2(OTG_V_SYNC_A,
|
||||
OTG_V_SYNC_A_START, 0,
|
||||
|
@ -271,15 +258,13 @@ void optc1_program_timing(
|
|||
|
||||
asic_blank_end = (patched_crtc_timing.v_total -
|
||||
vesa_sync_start -
|
||||
patched_crtc_timing.v_border_top)
|
||||
* interlace_factor;
|
||||
patched_crtc_timing.v_border_top);
|
||||
|
||||
/* v_blank_start = v_blank_end + v_active */
|
||||
asic_blank_start = asic_blank_end +
|
||||
(patched_crtc_timing.v_border_top +
|
||||
patched_crtc_timing.v_addressable +
|
||||
patched_crtc_timing.v_border_bottom)
|
||||
* interlace_factor;
|
||||
patched_crtc_timing.v_border_bottom);
|
||||
|
||||
REG_UPDATE_2(OTG_V_BLANK_START_END,
|
||||
OTG_V_BLANK_START, asic_blank_start,
|
||||
|
@ -301,7 +286,7 @@ void optc1_program_timing(
|
|||
0 : 1;
|
||||
|
||||
REG_UPDATE(OTG_V_SYNC_A_CNTL,
|
||||
OTG_V_SYNC_A_POL, v_sync_polarity);
|
||||
OTG_V_SYNC_A_POL, v_sync_polarity);
|
||||
|
||||
v_init = asic_blank_start;
|
||||
if (optc->dlg_otg_param.signal == SIGNAL_TYPE_DISPLAY_PORT ||
|
||||
|
@ -532,7 +517,6 @@ bool optc1_validate_timing(
|
|||
struct timing_generator *optc,
|
||||
const struct dc_crtc_timing *timing)
|
||||
{
|
||||
uint32_t interlace_factor;
|
||||
uint32_t v_blank;
|
||||
uint32_t h_blank;
|
||||
uint32_t min_v_blank;
|
||||
|
@ -540,10 +524,8 @@ bool optc1_validate_timing(
|
|||
|
||||
ASSERT(timing != NULL);
|
||||
|
||||
interlace_factor = timing->flags.INTERLACE ? 2 : 1;
|
||||
v_blank = (timing->v_total - timing->v_addressable -
|
||||
timing->v_border_top - timing->v_border_bottom) *
|
||||
interlace_factor;
|
||||
timing->v_border_top - timing->v_border_bottom);
|
||||
|
||||
h_blank = (timing->h_total - timing->h_addressable -
|
||||
timing->h_border_right -
|
||||
|
|
|
@ -507,6 +507,18 @@ static const struct resource_caps res_cap = {
|
|||
.num_ddc = 4,
|
||||
};
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
|
||||
static const struct resource_caps rv2_res_cap = {
|
||||
.num_timing_generator = 3,
|
||||
.num_opp = 3,
|
||||
.num_video_plane = 3,
|
||||
.num_audio = 3,
|
||||
.num_stream_encoder = 3,
|
||||
.num_pll = 3,
|
||||
.num_ddc = 3,
|
||||
};
|
||||
#endif
|
||||
|
||||
static const struct dc_debug_options debug_defaults_drv = {
|
||||
.sanity_checks = true,
|
||||
.disable_dmcu = true,
|
||||
|
@ -711,8 +723,7 @@ static const struct encoder_feature_support link_enc_feature = {
|
|||
.flags.bits.IS_HBR2_CAPABLE = true,
|
||||
.flags.bits.IS_HBR3_CAPABLE = true,
|
||||
.flags.bits.IS_TPS3_CAPABLE = true,
|
||||
.flags.bits.IS_TPS4_CAPABLE = true,
|
||||
.flags.bits.IS_YCBCR_CAPABLE = true
|
||||
.flags.bits.IS_TPS4_CAPABLE = true
|
||||
};
|
||||
|
||||
struct link_encoder *dcn10_link_encoder_create(
|
||||
|
@ -897,7 +908,9 @@ static void destruct(struct dcn10_resource_pool *pool)
|
|||
kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
|
||||
pool->base.timing_generators[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
|
||||
if (pool->base.engines[i] != NULL)
|
||||
pool->base.engines[i]->funcs->destroy_engine(&pool->base.engines[i]);
|
||||
if (pool->base.hw_i2cs[i] != NULL) {
|
||||
|
@ -1119,6 +1132,24 @@ static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_st
|
|||
return DC_OK;
|
||||
}
|
||||
|
||||
static enum dc_status dcn10_get_default_swizzle_mode(struct dc_plane_state *plane_state)
|
||||
{
|
||||
enum dc_status result = DC_OK;
|
||||
|
||||
enum surface_pixel_format surf_pix_format = plane_state->format;
|
||||
unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format);
|
||||
|
||||
enum swizzle_mode_values swizzle = DC_SW_LINEAR;
|
||||
|
||||
if (bpp == 64)
|
||||
swizzle = DC_SW_64KB_D;
|
||||
else
|
||||
swizzle = DC_SW_64KB_S;
|
||||
|
||||
plane_state->tiling_info.gfx9.swizzle = swizzle;
|
||||
return result;
|
||||
}
|
||||
|
||||
static const struct dc_cap_funcs cap_funcs = {
|
||||
.get_dcc_compression_cap = dcn10_get_dcc_compression_cap
|
||||
};
|
||||
|
@ -1129,7 +1160,8 @@ static const struct resource_funcs dcn10_res_pool_funcs = {
|
|||
.validate_bandwidth = dcn_validate_bandwidth,
|
||||
.acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer,
|
||||
.validate_plane = dcn10_validate_plane,
|
||||
.add_stream_to_ctx = dcn10_add_stream_to_ctx
|
||||
.add_stream_to_ctx = dcn10_add_stream_to_ctx,
|
||||
.get_default_swizzle_mode = dcn10_get_default_swizzle_mode
|
||||
};
|
||||
|
||||
static uint32_t read_pipe_fuses(struct dc_context *ctx)
|
||||
|
@ -1152,7 +1184,12 @@ static bool construct(
|
|||
|
||||
ctx->dc_bios->regs = &bios_regs;
|
||||
|
||||
pool->base.res_cap = &res_cap;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
|
||||
if (ctx->dce_version == DCN_VERSION_1_01)
|
||||
pool->base.res_cap = &rv2_res_cap;
|
||||
else
|
||||
#endif
|
||||
pool->base.res_cap = &res_cap;
|
||||
pool->base.funcs = &dcn10_res_pool_funcs;
|
||||
|
||||
/*
|
||||
|
|
|
@ -346,7 +346,7 @@ static void process_channel_reply(
|
|||
|
||||
/* in case HPD is LOW, exit AUX transaction */
|
||||
if ((sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
|
||||
reply->status = AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
|
||||
reply->status = AUX_TRANSACTION_REPLY_HPD_DISCON;
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -120,6 +120,9 @@ struct resource_funcs {
|
|||
struct dc *dc,
|
||||
struct dc_state *new_ctx,
|
||||
struct dc_stream_state *stream);
|
||||
enum dc_status (*get_default_swizzle_mode)(
|
||||
struct dc_plane_state *plane_state);
|
||||
|
||||
};
|
||||
|
||||
struct audio_support{
|
||||
|
|
|
@ -58,7 +58,6 @@ struct encoder_feature_support {
|
|||
uint32_t IS_HBR3_CAPABLE:1;
|
||||
uint32_t IS_TPS3_CAPABLE:1;
|
||||
uint32_t IS_TPS4_CAPABLE:1;
|
||||
uint32_t IS_YCBCR_CAPABLE:1;
|
||||
uint32_t HDMI_6GB_EN:1;
|
||||
} bits;
|
||||
uint32_t raw;
|
||||
|
|
|
@ -172,4 +172,7 @@ void update_audio_usage(
|
|||
const struct resource_pool *pool,
|
||||
struct audio *audio,
|
||||
bool acquired);
|
||||
|
||||
unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format);
|
||||
|
||||
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
|
||||
|
|
|
@ -102,4 +102,9 @@ static inline bool dc_is_audio_capable_signal(enum signal_type signal)
|
|||
dc_is_hdmi_signal(signal));
|
||||
}
|
||||
|
||||
static inline bool dc_is_virtual_signal(enum signal_type signal)
|
||||
{
|
||||
return (signal == SIGNAL_TYPE_VIRTUAL);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -2449,6 +2449,8 @@
|
|||
#define mmCP_ECC_FIRSTOCCURRENCE_RING2_BASE_IDX 0
|
||||
#define mmGB_EDC_MODE 0x107e
|
||||
#define mmGB_EDC_MODE_BASE_IDX 0
|
||||
#define mmCP_DEBUG 0x107f
|
||||
#define mmCP_DEBUG_BASE_IDX 0
|
||||
#define mmCP_CPF_DEBUG 0x1080
|
||||
#define mmCP_PQ_WPTR_POLL_CNTL 0x1083
|
||||
#define mmCP_PQ_WPTR_POLL_CNTL_BASE_IDX 0
|
||||
|
|
|
@ -175,4 +175,7 @@
|
|||
#define mmSMUSVI0_PLANE0_CURRENTVID_BASE_IDX 0
|
||||
#define mmSMUSVI0_PLANE0_CURRENTVID 0x0013
|
||||
|
||||
#define mmSMUSVI0_TEL_PLANE0_BASE_IDX 0
|
||||
#define mmSMUSVI0_TEL_PLANE0 0x0004
|
||||
|
||||
#endif
|
||||
|
|
|
@ -258,4 +258,7 @@
|
|||
#define SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT 0x18
|
||||
#define SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID_MASK 0xFF000000L
|
||||
|
||||
#define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT 0x10
|
||||
#define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK 0x01FF0000L
|
||||
|
||||
#endif
|
||||
|
|
|
@ -26,6 +26,18 @@
|
|||
#define mmCG_MULT_THERMAL_STATUS 0x005f
|
||||
#define mmCG_MULT_THERMAL_STATUS_BASE_IDX 0
|
||||
|
||||
#define mmCG_FDO_CTRL0 0x0067
|
||||
#define mmCG_FDO_CTRL0_BASE_IDX 0
|
||||
|
||||
#define mmCG_FDO_CTRL1 0x0068
|
||||
#define mmCG_FDO_CTRL1_BASE_IDX 0
|
||||
|
||||
#define mmCG_FDO_CTRL2 0x0069
|
||||
#define mmCG_FDO_CTRL2_BASE_IDX 0
|
||||
|
||||
#define mmCG_TACH_CTRL 0x006a
|
||||
#define mmCG_TACH_CTRL_BASE_IDX 0
|
||||
|
||||
#define mmTHM_THERMAL_INT_ENA 0x000a
|
||||
#define mmTHM_THERMAL_INT_ENA_BASE_IDX 0
|
||||
#define mmTHM_THERMAL_INT_CTRL 0x000b
|
||||
|
|
|
@ -28,6 +28,16 @@
|
|||
#define CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT 0x9
|
||||
#define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP_MASK 0x000001FFL
|
||||
#define CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK 0x0003FE00L
|
||||
#define CG_FDO_CTRL2__TMIN__SHIFT 0x0
|
||||
#define CG_FDO_CTRL2__TMIN_MASK 0x000000FFL
|
||||
#define CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT 0xb
|
||||
#define CG_FDO_CTRL2__FDO_PWM_MODE_MASK 0x00003800L
|
||||
#define CG_FDO_CTRL1__FMAX_DUTY100__SHIFT 0x0
|
||||
#define CG_FDO_CTRL1__FMAX_DUTY100_MASK 0x000000FFL
|
||||
#define CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT 0x0
|
||||
#define CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK 0x000000FFL
|
||||
#define CG_TACH_CTRL__TARGET_PERIOD__SHIFT 0x3
|
||||
#define CG_TACH_CTRL__TARGET_PERIOD_MASK 0xFFFFFFF8L
|
||||
|
||||
//THM_THERMAL_INT_ENA
|
||||
#define THM_THERMAL_INT_ENA__THERM_INTH_SET__SHIFT 0x0
|
||||
|
|
|
@ -114,6 +114,8 @@ enum amd_pp_sensors {
|
|||
AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK,
|
||||
AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK,
|
||||
AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK,
|
||||
AMDGPU_PP_SENSOR_MIN_FAN_RPM,
|
||||
AMDGPU_PP_SENSOR_MAX_FAN_RPM,
|
||||
};
|
||||
|
||||
enum amd_pp_task {
|
||||
|
@ -228,6 +230,7 @@ struct amd_pm_funcs {
|
|||
enum amd_dpm_forced_level (*get_performance_level)(void *handle);
|
||||
enum amd_pm_state_type (*get_current_power_state)(void *handle);
|
||||
int (*get_fan_speed_rpm)(void *handle, uint32_t *rpm);
|
||||
int (*set_fan_speed_rpm)(void *handle, uint32_t rpm);
|
||||
int (*get_pp_num_states)(void *handle, struct pp_states_info *data);
|
||||
int (*get_pp_table)(void *handle, char **table);
|
||||
int (*set_pp_table)(void *handle, const char *buf, size_t size);
|
||||
|
@ -272,6 +275,7 @@ struct amd_pm_funcs {
|
|||
int (*get_display_mode_validation_clocks)(void *handle,
|
||||
struct amd_pp_simple_clock_info *clocks);
|
||||
int (*notify_smu_enable_pwe)(void *handle);
|
||||
int (*enable_mgpu_fan_boost)(void *handle);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -109,11 +109,8 @@ static int pp_sw_fini(void *handle)
|
|||
|
||||
hwmgr_sw_fini(hwmgr);
|
||||
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
|
||||
release_firmware(adev->pm.fw);
|
||||
adev->pm.fw = NULL;
|
||||
amdgpu_ucode_fini_bo(adev);
|
||||
}
|
||||
release_firmware(adev->pm.fw);
|
||||
adev->pm.fw = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -124,9 +121,6 @@ static int pp_hw_init(void *handle)
|
|||
struct amdgpu_device *adev = handle;
|
||||
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
|
||||
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
|
||||
amdgpu_ucode_init_bo(adev);
|
||||
|
||||
ret = hwmgr_hw_init(hwmgr);
|
||||
|
||||
if (ret)
|
||||
|
@ -273,8 +267,23 @@ const struct amdgpu_ip_block_version pp_smu_ip_block =
|
|||
.funcs = &pp_ip_funcs,
|
||||
};
|
||||
|
||||
/* This interface only be supported On Vi,
|
||||
* because only smu7/8 can help to load gfx/sdma fw,
|
||||
* smu need to be enabled before load other ip's fw.
|
||||
* so call start smu to load smu7 fw and other ip's fw
|
||||
*/
|
||||
static int pp_dpm_load_fw(void *handle)
|
||||
{
|
||||
struct pp_hwmgr *hwmgr = handle;
|
||||
|
||||
if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu)
|
||||
return -EINVAL;
|
||||
|
||||
if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
|
||||
pr_err("fw load failed\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -576,6 +585,24 @@ static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
|
||||
{
|
||||
struct pp_hwmgr *hwmgr = handle;
|
||||
int ret = 0;
|
||||
|
||||
if (!hwmgr || !hwmgr->pm_en)
|
||||
return -EINVAL;
|
||||
|
||||
if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL) {
|
||||
pr_info("%s was not implemented.\n", __func__);
|
||||
return 0;
|
||||
}
|
||||
mutex_lock(&hwmgr->smu_lock);
|
||||
ret = hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
|
||||
mutex_unlock(&hwmgr->smu_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int pp_dpm_get_pp_num_states(void *handle,
|
||||
struct pp_states_info *data)
|
||||
{
|
||||
|
@ -813,6 +840,12 @@ static int pp_dpm_read_sensor(void *handle, int idx,
|
|||
case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
|
||||
*((uint32_t *)value) = hwmgr->pstate_mclk;
|
||||
return 0;
|
||||
case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
|
||||
*((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM;
|
||||
return 0;
|
||||
case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
|
||||
*((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
|
||||
return 0;
|
||||
default:
|
||||
mutex_lock(&hwmgr->smu_lock);
|
||||
ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
|
||||
|
@ -1196,6 +1229,21 @@ static void pp_dpm_powergate_acp(void *handle, bool gate)
|
|||
hwmgr->hwmgr_func->powergate_acp(hwmgr, gate);
|
||||
}
|
||||
|
||||
static void pp_dpm_powergate_sdma(void *handle, bool gate)
|
||||
{
|
||||
struct pp_hwmgr *hwmgr = handle;
|
||||
|
||||
if (!hwmgr)
|
||||
return;
|
||||
|
||||
if (hwmgr->hwmgr_func->powergate_sdma == NULL) {
|
||||
pr_info("%s was not implemented.\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
hwmgr->hwmgr_func->powergate_sdma(hwmgr, gate);
|
||||
}
|
||||
|
||||
static int pp_set_powergating_by_smu(void *handle,
|
||||
uint32_t block_type, bool gate)
|
||||
{
|
||||
|
@ -1218,6 +1266,9 @@ static int pp_set_powergating_by_smu(void *handle,
|
|||
case AMD_IP_BLOCK_TYPE_ACP:
|
||||
pp_dpm_powergate_acp(handle, gate);
|
||||
break;
|
||||
case AMD_IP_BLOCK_TYPE_SDMA:
|
||||
pp_dpm_powergate_sdma(handle, gate);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -1243,6 +1294,24 @@ static int pp_notify_smu_enable_pwe(void *handle)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int pp_enable_mgpu_fan_boost(void *handle)
|
||||
{
|
||||
struct pp_hwmgr *hwmgr = handle;
|
||||
|
||||
if (!hwmgr || !hwmgr->pm_en)
|
||||
return -EINVAL;
|
||||
|
||||
if (hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
mutex_lock(&hwmgr->smu_lock);
|
||||
hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
|
||||
mutex_unlock(&hwmgr->smu_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct amd_pm_funcs pp_dpm_funcs = {
|
||||
.load_firmware = pp_dpm_load_fw,
|
||||
.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
|
||||
|
@ -1255,6 +1324,7 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
|
|||
.set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
|
||||
.get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
|
||||
.get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
|
||||
.set_fan_speed_rpm = pp_dpm_set_fan_speed_rpm,
|
||||
.get_pp_num_states = pp_dpm_get_pp_num_states,
|
||||
.get_pp_table = pp_dpm_get_pp_table,
|
||||
.set_pp_table = pp_dpm_set_pp_table,
|
||||
|
@ -1287,4 +1357,5 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
|
|||
.display_clock_voltage_request = pp_display_clock_voltage_request,
|
||||
.get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
|
||||
.notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
|
||||
.enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost,
|
||||
};
|
||||
|
|
|
@ -89,7 +89,6 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
|
|||
hwmgr_init_default_caps(hwmgr);
|
||||
hwmgr_set_user_specify_caps(hwmgr);
|
||||
hwmgr->fan_ctrl_is_in_default_mode = true;
|
||||
hwmgr->reload_fw = 1;
|
||||
hwmgr_init_workload_prority(hwmgr);
|
||||
|
||||
switch (hwmgr->chip_family) {
|
||||
|
@ -209,17 +208,6 @@ int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!hwmgr || !hwmgr->smumgr_funcs)
|
||||
return -EINVAL;
|
||||
|
||||
if (hwmgr->smumgr_funcs->start_smu) {
|
||||
ret = hwmgr->smumgr_funcs->start_smu(hwmgr);
|
||||
if (ret) {
|
||||
pr_err("smc start failed\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (!hwmgr->pm_en)
|
||||
return 0;
|
||||
|
||||
|
@ -320,13 +308,6 @@ int hwmgr_resume(struct pp_hwmgr *hwmgr)
|
|||
if (!hwmgr)
|
||||
return -EINVAL;
|
||||
|
||||
if (hwmgr->smumgr_funcs && hwmgr->smumgr_funcs->start_smu) {
|
||||
if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
|
||||
pr_err("smc start failed\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (!hwmgr->pm_en)
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -1153,6 +1153,14 @@ static int smu10_powergate_mmhub(struct pp_hwmgr *hwmgr)
|
|||
return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub);
|
||||
}
|
||||
|
||||
static int smu10_powergate_sdma(struct pp_hwmgr *hwmgr, bool gate)
|
||||
{
|
||||
if (gate)
|
||||
return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerDownSdma);
|
||||
else
|
||||
return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerUpSdma);
|
||||
}
|
||||
|
||||
static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate)
|
||||
{
|
||||
if (bgate) {
|
||||
|
@ -1208,6 +1216,7 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
|
|||
.smus_notify_pwe = smu10_smus_notify_pwe,
|
||||
.display_clock_voltage_request = smu10_display_clock_voltage_request,
|
||||
.powergate_gfx = smu10_gfx_off_control,
|
||||
.powergate_sdma = smu10_powergate_sdma,
|
||||
};
|
||||
|
||||
int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
|
||||
|
|
|
@ -260,6 +260,7 @@ int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
|
|||
if (hwmgr->thermal_controller.fanInfo.bNoFan ||
|
||||
(hwmgr->thermal_controller.fanInfo.
|
||||
ucTachometerPulsesPerRevolution == 0) ||
|
||||
speed == 0 ||
|
||||
(speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
|
||||
(speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
|
||||
return 0;
|
||||
|
@ -272,7 +273,7 @@ int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
|
|||
tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
|
||||
|
||||
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
|
||||
CG_TACH_STATUS, TACH_PERIOD, tach_period);
|
||||
CG_TACH_CTRL, TARGET_PERIOD, tach_period);
|
||||
|
||||
return smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC_RPM);
|
||||
}
|
||||
|
|
|
@ -1228,14 +1228,17 @@ static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
|
|||
|
||||
static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
if (PP_CAP(PHM_PlatformCaps_UVDPowerGating))
|
||||
if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
|
||||
smu8_nbdpm_pstate_enable_disable(hwmgr, true, true);
|
||||
return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
|
||||
smu8_nbdpm_pstate_enable_disable(hwmgr, false, true);
|
||||
return smum_send_msg_to_smc_with_parameter(
|
||||
hwmgr,
|
||||
PPSMC_MSG_UVDPowerON,
|
||||
|
|
|
@ -39,6 +39,50 @@ uint16_t convert_to_vddc(uint8_t vid)
|
|||
return (uint16_t) ((6200 - (vid * 25)) / VOLTAGE_SCALE);
|
||||
}
|
||||
|
||||
int phm_copy_clock_limits_array(
|
||||
struct pp_hwmgr *hwmgr,
|
||||
uint32_t **pptable_info_array,
|
||||
const uint32_t *pptable_array,
|
||||
uint32_t power_saving_clock_count)
|
||||
{
|
||||
uint32_t array_size, i;
|
||||
uint32_t *table;
|
||||
|
||||
array_size = sizeof(uint32_t) * power_saving_clock_count;
|
||||
table = kzalloc(array_size, GFP_KERNEL);
|
||||
if (NULL == table)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < power_saving_clock_count; i++)
|
||||
table[i] = le32_to_cpu(pptable_array[i]);
|
||||
|
||||
*pptable_info_array = table;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int phm_copy_overdrive_settings_limits_array(
|
||||
struct pp_hwmgr *hwmgr,
|
||||
uint32_t **pptable_info_array,
|
||||
const uint32_t *pptable_array,
|
||||
uint32_t od_setting_count)
|
||||
{
|
||||
uint32_t array_size, i;
|
||||
uint32_t *table;
|
||||
|
||||
array_size = sizeof(uint32_t) * od_setting_count;
|
||||
table = kzalloc(array_size, GFP_KERNEL);
|
||||
if (NULL == table)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < od_setting_count; i++)
|
||||
table[i] = le32_to_cpu(pptable_array[i]);
|
||||
|
||||
*pptable_info_array = table;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t phm_set_field_to_u32(u32 offset, u32 original_data, u32 field, u32 size)
|
||||
{
|
||||
u32 mask = 0;
|
||||
|
|
|
@ -47,6 +47,18 @@ struct watermarks {
|
|||
uint32_t padding[7];
|
||||
};
|
||||
|
||||
int phm_copy_clock_limits_array(
|
||||
struct pp_hwmgr *hwmgr,
|
||||
uint32_t **pptable_info_array,
|
||||
const uint32_t *pptable_array,
|
||||
uint32_t power_saving_clock_count);
|
||||
|
||||
int phm_copy_overdrive_settings_limits_array(
|
||||
struct pp_hwmgr *hwmgr,
|
||||
uint32_t **pptable_info_array,
|
||||
const uint32_t *pptable_array,
|
||||
uint32_t od_setting_count);
|
||||
|
||||
extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
|
||||
uint32_t index,
|
||||
uint32_t value, uint32_t mask);
|
||||
|
|
|
@ -451,23 +451,23 @@ static int get_tdp_table(
|
|||
le16_to_cpu(power_tune_table_v2->usLoadLineResistance);
|
||||
} else {
|
||||
power_tune_table_v3 = (ATOM_Vega10_PowerTune_Table_V3 *)table;
|
||||
tdp_table->usMaximumPowerDeliveryLimit = power_tune_table_v3->usSocketPowerLimit;
|
||||
tdp_table->usTDC = power_tune_table_v3->usTdcLimit;
|
||||
tdp_table->usEDCLimit = power_tune_table_v3->usEdcLimit;
|
||||
tdp_table->usSoftwareShutdownTemp = power_tune_table_v3->usSoftwareShutdownTemp;
|
||||
tdp_table->usTemperatureLimitTedge = power_tune_table_v3->usTemperatureLimitTedge;
|
||||
tdp_table->usTemperatureLimitHotspot = power_tune_table_v3->usTemperatureLimitHotSpot;
|
||||
tdp_table->usTemperatureLimitLiquid1 = power_tune_table_v3->usTemperatureLimitLiquid1;
|
||||
tdp_table->usTemperatureLimitLiquid2 = power_tune_table_v3->usTemperatureLimitLiquid2;
|
||||
tdp_table->usTemperatureLimitHBM = power_tune_table_v3->usTemperatureLimitHBM;
|
||||
tdp_table->usTemperatureLimitVrVddc = power_tune_table_v3->usTemperatureLimitVrSoc;
|
||||
tdp_table->usTemperatureLimitVrMvdd = power_tune_table_v3->usTemperatureLimitVrMem;
|
||||
tdp_table->usTemperatureLimitPlx = power_tune_table_v3->usTemperatureLimitPlx;
|
||||
tdp_table->usMaximumPowerDeliveryLimit = le16_to_cpu(power_tune_table_v3->usSocketPowerLimit);
|
||||
tdp_table->usTDC = le16_to_cpu(power_tune_table_v3->usTdcLimit);
|
||||
tdp_table->usEDCLimit = le16_to_cpu(power_tune_table_v3->usEdcLimit);
|
||||
tdp_table->usSoftwareShutdownTemp = le16_to_cpu(power_tune_table_v3->usSoftwareShutdownTemp);
|
||||
tdp_table->usTemperatureLimitTedge = le16_to_cpu(power_tune_table_v3->usTemperatureLimitTedge);
|
||||
tdp_table->usTemperatureLimitHotspot = le16_to_cpu(power_tune_table_v3->usTemperatureLimitHotSpot);
|
||||
tdp_table->usTemperatureLimitLiquid1 = le16_to_cpu(power_tune_table_v3->usTemperatureLimitLiquid1);
|
||||
tdp_table->usTemperatureLimitLiquid2 = le16_to_cpu(power_tune_table_v3->usTemperatureLimitLiquid2);
|
||||
tdp_table->usTemperatureLimitHBM = le16_to_cpu(power_tune_table_v3->usTemperatureLimitHBM);
|
||||
tdp_table->usTemperatureLimitVrVddc = le16_to_cpu(power_tune_table_v3->usTemperatureLimitVrSoc);
|
||||
tdp_table->usTemperatureLimitVrMvdd = le16_to_cpu(power_tune_table_v3->usTemperatureLimitVrMem);
|
||||
tdp_table->usTemperatureLimitPlx = le16_to_cpu(power_tune_table_v3->usTemperatureLimitPlx);
|
||||
tdp_table->ucLiquid1_I2C_address = power_tune_table_v3->ucLiquid1_I2C_address;
|
||||
tdp_table->ucLiquid2_I2C_address = power_tune_table_v3->ucLiquid2_I2C_address;
|
||||
tdp_table->usBoostStartTemperature = power_tune_table_v3->usBoostStartTemperature;
|
||||
tdp_table->usBoostStopTemperature = power_tune_table_v3->usBoostStopTemperature;
|
||||
tdp_table->ulBoostClock = power_tune_table_v3->ulBoostClock;
|
||||
tdp_table->usBoostStartTemperature = le16_to_cpu(power_tune_table_v3->usBoostStartTemperature);
|
||||
tdp_table->usBoostStopTemperature = le16_to_cpu(power_tune_table_v3->usBoostStopTemperature);
|
||||
tdp_table->ulBoostClock = le32_to_cpu(power_tune_table_v3->ulBoostClock);
|
||||
|
||||
get_scl_sda_value(power_tune_table_v3->ucLiquid_I2C_Line, &scl, &sda);
|
||||
|
||||
|
|
|
@ -312,6 +312,7 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
|
|||
int result = 0;
|
||||
|
||||
if (hwmgr->thermal_controller.fanInfo.bNoFan ||
|
||||
speed == 0 ||
|
||||
(speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
|
||||
(speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
|
||||
return -1;
|
||||
|
@ -322,9 +323,9 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
|
|||
if (!result) {
|
||||
crystal_clock_freq = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
|
||||
tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
|
||||
WREG32_SOC15(THM, 0, mmCG_TACH_STATUS,
|
||||
REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_STATUS),
|
||||
CG_TACH_STATUS, TACH_PERIOD,
|
||||
WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
|
||||
REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
|
||||
CG_TACH_CTRL, TARGET_PERIOD,
|
||||
tach_period));
|
||||
}
|
||||
return vega10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC_RPM);
|
||||
|
|
|
@ -99,50 +99,6 @@ static int set_platform_caps(struct pp_hwmgr *hwmgr, uint32_t powerplay_caps)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int copy_clock_limits_array(
|
||||
struct pp_hwmgr *hwmgr,
|
||||
uint32_t **pptable_info_array,
|
||||
const uint32_t *pptable_array)
|
||||
{
|
||||
uint32_t array_size, i;
|
||||
uint32_t *table;
|
||||
|
||||
array_size = sizeof(uint32_t) * ATOM_VEGA12_PPCLOCK_COUNT;
|
||||
|
||||
table = kzalloc(array_size, GFP_KERNEL);
|
||||
if (NULL == table)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < ATOM_VEGA12_PPCLOCK_COUNT; i++)
|
||||
table[i] = pptable_array[i];
|
||||
|
||||
*pptable_info_array = table;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int copy_overdrive_settings_limits_array(
|
||||
struct pp_hwmgr *hwmgr,
|
||||
uint32_t **pptable_info_array,
|
||||
const uint32_t *pptable_array)
|
||||
{
|
||||
uint32_t array_size, i;
|
||||
uint32_t *table;
|
||||
|
||||
array_size = sizeof(uint32_t) * ATOM_VEGA12_ODSETTING_COUNT;
|
||||
|
||||
table = kzalloc(array_size, GFP_KERNEL);
|
||||
if (NULL == table)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < ATOM_VEGA12_ODSETTING_COUNT; i++)
|
||||
table[i] = pptable_array[i];
|
||||
|
||||
*pptable_info_array = table;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable)
|
||||
{
|
||||
struct pp_atomfwctrl_smc_dpm_parameters smc_dpm_table;
|
||||
|
@ -250,14 +206,22 @@ static int init_powerplay_table_information(
|
|||
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
|
||||
|
||||
if (powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_GFXCLKFMAX] > VEGA12_ENGINECLOCK_HARDMAX)
|
||||
if (le32_to_cpu(powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_GFXCLKFMAX]) > VEGA12_ENGINECLOCK_HARDMAX)
|
||||
hwmgr->platform_descriptor.overdriveLimit.engineClock = VEGA12_ENGINECLOCK_HARDMAX;
|
||||
else
|
||||
hwmgr->platform_descriptor.overdriveLimit.engineClock = powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_GFXCLKFMAX];
|
||||
hwmgr->platform_descriptor.overdriveLimit.memoryClock = powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_UCLKFMAX];
|
||||
hwmgr->platform_descriptor.overdriveLimit.engineClock =
|
||||
le32_to_cpu(powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_GFXCLKFMAX]);
|
||||
hwmgr->platform_descriptor.overdriveLimit.memoryClock =
|
||||
le32_to_cpu(powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_UCLKFMAX]);
|
||||
|
||||
copy_overdrive_settings_limits_array(hwmgr, &pptable_information->od_settings_max, powerplay_table->ODSettingsMax);
|
||||
copy_overdrive_settings_limits_array(hwmgr, &pptable_information->od_settings_min, powerplay_table->ODSettingsMin);
|
||||
phm_copy_overdrive_settings_limits_array(hwmgr,
|
||||
&pptable_information->od_settings_max,
|
||||
powerplay_table->ODSettingsMax,
|
||||
ATOM_VEGA12_ODSETTING_COUNT);
|
||||
phm_copy_overdrive_settings_limits_array(hwmgr,
|
||||
&pptable_information->od_settings_min,
|
||||
powerplay_table->ODSettingsMin,
|
||||
ATOM_VEGA12_ODSETTING_COUNT);
|
||||
|
||||
/* hwmgr->platformDescriptor.minOverdriveVDDC = 0;
|
||||
hwmgr->platformDescriptor.maxOverdriveVDDC = 0;
|
||||
|
@ -267,15 +231,15 @@ static int init_powerplay_table_information(
|
|||
&& hwmgr->platform_descriptor.overdriveLimit.memoryClock > 0)
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ACOverdriveSupport);
|
||||
|
||||
pptable_information->us_small_power_limit1 = powerplay_table->usSmallPowerLimit1;
|
||||
pptable_information->us_small_power_limit2 = powerplay_table->usSmallPowerLimit2;
|
||||
pptable_information->us_boost_power_limit = powerplay_table->usBoostPowerLimit;
|
||||
pptable_information->us_od_turbo_power_limit = powerplay_table->usODTurboPowerLimit;
|
||||
pptable_information->us_od_powersave_power_limit = powerplay_table->usODPowerSavePowerLimit;
|
||||
pptable_information->us_small_power_limit1 = le16_to_cpu(powerplay_table->usSmallPowerLimit1);
|
||||
pptable_information->us_small_power_limit2 = le16_to_cpu(powerplay_table->usSmallPowerLimit2);
|
||||
pptable_information->us_boost_power_limit = le16_to_cpu(powerplay_table->usBoostPowerLimit);
|
||||
pptable_information->us_od_turbo_power_limit = le16_to_cpu(powerplay_table->usODTurboPowerLimit);
|
||||
pptable_information->us_od_powersave_power_limit = le16_to_cpu(powerplay_table->usODPowerSavePowerLimit);
|
||||
|
||||
pptable_information->us_software_shutdown_temp = powerplay_table->usSoftwareShutdownTemp;
|
||||
pptable_information->us_software_shutdown_temp = le16_to_cpu(powerplay_table->usSoftwareShutdownTemp);
|
||||
|
||||
hwmgr->platform_descriptor.TDPODLimit = (uint16_t)powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_POWERPERCENTAGE];
|
||||
hwmgr->platform_descriptor.TDPODLimit = le32_to_cpu(powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_POWERPERCENTAGE]);
|
||||
|
||||
disable_power_control = 0;
|
||||
if (!disable_power_control) {
|
||||
|
@ -285,8 +249,8 @@ static int init_powerplay_table_information(
|
|||
PHM_PlatformCaps_PowerControl);
|
||||
}
|
||||
|
||||
copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_max, powerplay_table->PowerSavingClockMax);
|
||||
copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_min, powerplay_table->PowerSavingClockMin);
|
||||
phm_copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_max, powerplay_table->PowerSavingClockMax, ATOM_VEGA12_PPCLOCK_COUNT);
|
||||
phm_copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_min, powerplay_table->PowerSavingClockMin, ATOM_VEGA12_PPCLOCK_COUNT);
|
||||
|
||||
pptable_information->smc_pptable = (PPTable_t *)kmalloc(sizeof(PPTable_t), GFP_KERNEL);
|
||||
if (pptable_information->smc_pptable == NULL)
|
||||
|
|
|
@ -46,6 +46,9 @@
|
|||
#include "ppinterrupt.h"
|
||||
#include "pp_overdriver.h"
|
||||
#include "pp_thermal.h"
|
||||
#include "soc15_common.h"
|
||||
#include "smuio/smuio_9_0_offset.h"
|
||||
#include "smuio/smuio_9_0_sh_mask.h"
|
||||
|
||||
static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
|
@ -1474,6 +1477,19 @@ static int vega20_init_max_sustainable_clocks(struct pp_hwmgr *hwmgr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int vega20_enable_mgpu_fan_boost(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
int result;
|
||||
|
||||
result = smum_send_msg_to_smc(hwmgr,
|
||||
PPSMC_MSG_SetMGpuFanBoostLimitRpm);
|
||||
PP_ASSERT_WITH_CODE(!result,
|
||||
"[EnableMgpuFan] Failed to enable mgpu fan boost!",
|
||||
return result);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vega20_init_powergate_state(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct vega20_hwmgr *data =
|
||||
|
@ -1544,6 +1560,14 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
|||
"[EnableDPMTasks] Failed to populate umdpstate clocks!",
|
||||
return result);
|
||||
|
||||
result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetPptLimit,
|
||||
POWER_SOURCE_AC << 16);
|
||||
PP_ASSERT_WITH_CODE(!result,
|
||||
"[GetPptLimit] get default PPT limit failed!",
|
||||
return result);
|
||||
hwmgr->power_limit =
|
||||
hwmgr->default_power_limit = smum_get_argument(hwmgr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1907,6 +1931,8 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx,
|
|||
void *value, int *size)
|
||||
{
|
||||
struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
|
||||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
uint32_t val_vid;
|
||||
int ret = 0;
|
||||
|
||||
switch (idx) {
|
||||
|
@ -1941,6 +1967,13 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx,
|
|||
*size = 16;
|
||||
ret = vega20_get_gpu_power(hwmgr, (uint32_t *)value);
|
||||
break;
|
||||
case AMDGPU_PP_SENSOR_VDDGFX:
|
||||
val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) &
|
||||
SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >>
|
||||
SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT;
|
||||
*((uint32_t *)value) =
|
||||
(uint32_t)convert_to_vddc((uint8_t)val_vid);
|
||||
break;
|
||||
case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
|
||||
ret = vega20_get_enabled_smc_features(hwmgr, (uint64_t *)value);
|
||||
if (!ret)
|
||||
|
@ -2269,6 +2302,25 @@ static uint32_t vega20_get_fan_control_mode(struct pp_hwmgr *hwmgr)
|
|||
return AMD_FAN_CTRL_AUTO;
|
||||
}
|
||||
|
||||
static void vega20_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
|
||||
{
|
||||
switch (mode) {
|
||||
case AMD_FAN_CTRL_NONE:
|
||||
vega20_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
|
||||
break;
|
||||
case AMD_FAN_CTRL_MANUAL:
|
||||
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
|
||||
vega20_fan_ctrl_stop_smc_fan_control(hwmgr);
|
||||
break;
|
||||
case AMD_FAN_CTRL_AUTO:
|
||||
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
|
||||
vega20_fan_ctrl_start_smc_fan_control(hwmgr);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int vega20_get_dal_power_level(struct pp_hwmgr *hwmgr,
|
||||
struct amd_pp_simple_clock_info *info)
|
||||
{
|
||||
|
@ -3165,7 +3217,7 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
|
|||
"[GetPowerProfile] Failed to get activity monitor!",
|
||||
return result);
|
||||
|
||||
size += sprintf(buf + size, "%2d(%14s%s)\n",
|
||||
size += sprintf(buf + size, "%2d %14s%s:\n",
|
||||
i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ");
|
||||
|
||||
size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
|
||||
|
@ -3432,15 +3484,25 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
|
|||
.disable_smc_firmware_ctf =
|
||||
vega20_thermal_disable_alert,
|
||||
/* fan control related */
|
||||
.get_fan_speed_percent =
|
||||
vega20_fan_ctrl_get_fan_speed_percent,
|
||||
.set_fan_speed_percent =
|
||||
vega20_fan_ctrl_set_fan_speed_percent,
|
||||
.get_fan_speed_info =
|
||||
vega20_fan_ctrl_get_fan_speed_info,
|
||||
.get_fan_speed_rpm =
|
||||
vega20_fan_ctrl_get_fan_speed_rpm,
|
||||
.set_fan_speed_rpm =
|
||||
vega20_fan_ctrl_set_fan_speed_rpm,
|
||||
.get_fan_control_mode =
|
||||
vega20_get_fan_control_mode,
|
||||
.set_fan_control_mode =
|
||||
vega20_set_fan_control_mode,
|
||||
/* smu memory related */
|
||||
.notify_cac_buffer_info =
|
||||
vega20_notify_cac_buffer_info,
|
||||
.enable_mgpu_fan_boost =
|
||||
vega20_enable_mgpu_fan_boost,
|
||||
};
|
||||
|
||||
int vega20_hwmgr_init(struct pp_hwmgr *hwmgr)
|
||||
|
|
|
@ -661,50 +661,6 @@ static int set_platform_caps(struct pp_hwmgr *hwmgr, uint32_t powerplay_caps)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int copy_clock_limits_array(
|
||||
struct pp_hwmgr *hwmgr,
|
||||
uint32_t **pptable_info_array,
|
||||
const uint32_t *pptable_array,
|
||||
uint32_t power_saving_clock_count)
|
||||
{
|
||||
uint32_t array_size, i;
|
||||
uint32_t *table;
|
||||
|
||||
array_size = sizeof(uint32_t) * power_saving_clock_count;
|
||||
table = kzalloc(array_size, GFP_KERNEL);
|
||||
if (NULL == table)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < power_saving_clock_count; i++)
|
||||
table[i] = pptable_array[i];
|
||||
|
||||
*pptable_info_array = table;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int copy_overdrive_settings_limits_array(
|
||||
struct pp_hwmgr *hwmgr,
|
||||
uint32_t **pptable_info_array,
|
||||
const uint32_t *pptable_array,
|
||||
uint32_t od_setting_count)
|
||||
{
|
||||
uint32_t array_size, i;
|
||||
uint32_t *table;
|
||||
|
||||
array_size = sizeof(uint32_t) * od_setting_count;
|
||||
table = kzalloc(array_size, GFP_KERNEL);
|
||||
if (NULL == table)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < od_setting_count; i++)
|
||||
table[i] = pptable_array[i];
|
||||
|
||||
*pptable_info_array = table;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int copy_overdrive_feature_capabilities_array(
|
||||
struct pp_hwmgr *hwmgr,
|
||||
uint8_t **pptable_info_array,
|
||||
|
@ -721,7 +677,7 @@ static int copy_overdrive_feature_capabilities_array(
|
|||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < od_feature_count; i++) {
|
||||
table[i] = pptable_array[i];
|
||||
table[i] = le32_to_cpu(pptable_array[i]);
|
||||
if (table[i])
|
||||
od_supported = true;
|
||||
}
|
||||
|
@ -834,6 +790,8 @@ static int init_powerplay_table_information(
|
|||
|
||||
hwmgr->thermal_controller.ucType = powerplay_table->ucThermalControllerType;
|
||||
pptable_information->uc_thermal_controller_type = powerplay_table->ucThermalControllerType;
|
||||
hwmgr->thermal_controller.fanInfo.ulMinRPM = 0;
|
||||
hwmgr->thermal_controller.fanInfo.ulMaxRPM = powerplay_table->smcPPTable.FanMaximumRpm;
|
||||
|
||||
set_hw_cap(hwmgr,
|
||||
ATOM_VEGA20_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType,
|
||||
|
@ -842,34 +800,40 @@ static int init_powerplay_table_information(
|
|||
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
|
||||
|
||||
if (powerplay_table->OverDrive8Table.ucODTableRevision == 1) {
|
||||
od_feature_count = (powerplay_table->OverDrive8Table.ODFeatureCount > ATOM_VEGA20_ODFEATURE_COUNT) ?
|
||||
ATOM_VEGA20_ODFEATURE_COUNT : powerplay_table->OverDrive8Table.ODFeatureCount;
|
||||
od_setting_count = (powerplay_table->OverDrive8Table.ODSettingCount > ATOM_VEGA20_ODSETTING_COUNT) ?
|
||||
ATOM_VEGA20_ODSETTING_COUNT : powerplay_table->OverDrive8Table.ODSettingCount;
|
||||
od_feature_count =
|
||||
(le32_to_cpu(powerplay_table->OverDrive8Table.ODFeatureCount) >
|
||||
ATOM_VEGA20_ODFEATURE_COUNT) ?
|
||||
ATOM_VEGA20_ODFEATURE_COUNT :
|
||||
le32_to_cpu(powerplay_table->OverDrive8Table.ODFeatureCount);
|
||||
od_setting_count =
|
||||
(le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingCount) >
|
||||
ATOM_VEGA20_ODSETTING_COUNT) ?
|
||||
ATOM_VEGA20_ODSETTING_COUNT :
|
||||
le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingCount);
|
||||
|
||||
copy_overdrive_feature_capabilities_array(hwmgr,
|
||||
&pptable_information->od_feature_capabilities,
|
||||
powerplay_table->OverDrive8Table.ODFeatureCapabilities,
|
||||
od_feature_count);
|
||||
copy_overdrive_settings_limits_array(hwmgr,
|
||||
phm_copy_overdrive_settings_limits_array(hwmgr,
|
||||
&pptable_information->od_settings_max,
|
||||
powerplay_table->OverDrive8Table.ODSettingsMax,
|
||||
od_setting_count);
|
||||
copy_overdrive_settings_limits_array(hwmgr,
|
||||
phm_copy_overdrive_settings_limits_array(hwmgr,
|
||||
&pptable_information->od_settings_min,
|
||||
powerplay_table->OverDrive8Table.ODSettingsMin,
|
||||
od_setting_count);
|
||||
}
|
||||
|
||||
pptable_information->us_small_power_limit1 = powerplay_table->usSmallPowerLimit1;
|
||||
pptable_information->us_small_power_limit2 = powerplay_table->usSmallPowerLimit2;
|
||||
pptable_information->us_boost_power_limit = powerplay_table->usBoostPowerLimit;
|
||||
pptable_information->us_od_turbo_power_limit = powerplay_table->usODTurboPowerLimit;
|
||||
pptable_information->us_od_powersave_power_limit = powerplay_table->usODPowerSavePowerLimit;
|
||||
pptable_information->us_small_power_limit1 = le16_to_cpu(powerplay_table->usSmallPowerLimit1);
|
||||
pptable_information->us_small_power_limit2 = le16_to_cpu(powerplay_table->usSmallPowerLimit2);
|
||||
pptable_information->us_boost_power_limit = le16_to_cpu(powerplay_table->usBoostPowerLimit);
|
||||
pptable_information->us_od_turbo_power_limit = le16_to_cpu(powerplay_table->usODTurboPowerLimit);
|
||||
pptable_information->us_od_powersave_power_limit = le16_to_cpu(powerplay_table->usODPowerSavePowerLimit);
|
||||
|
||||
pptable_information->us_software_shutdown_temp = powerplay_table->usSoftwareShutdownTemp;
|
||||
pptable_information->us_software_shutdown_temp = le16_to_cpu(powerplay_table->usSoftwareShutdownTemp);
|
||||
|
||||
hwmgr->platform_descriptor.TDPODLimit = (uint16_t)powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE];
|
||||
hwmgr->platform_descriptor.TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]);
|
||||
|
||||
disable_power_control = 0;
|
||||
if (!disable_power_control && hwmgr->platform_descriptor.TDPODLimit)
|
||||
|
@ -877,13 +841,16 @@ static int init_powerplay_table_information(
|
|||
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerControl);
|
||||
|
||||
if (powerplay_table->PowerSavingClockTable.ucTableRevision == 1) {
|
||||
power_saving_clock_count = (powerplay_table->PowerSavingClockTable.PowerSavingClockCount >= ATOM_VEGA20_PPCLOCK_COUNT) ?
|
||||
ATOM_VEGA20_PPCLOCK_COUNT : powerplay_table->PowerSavingClockTable.PowerSavingClockCount;
|
||||
copy_clock_limits_array(hwmgr,
|
||||
power_saving_clock_count =
|
||||
(le32_to_cpu(powerplay_table->PowerSavingClockTable.PowerSavingClockCount) >=
|
||||
ATOM_VEGA20_PPCLOCK_COUNT) ?
|
||||
ATOM_VEGA20_PPCLOCK_COUNT :
|
||||
le32_to_cpu(powerplay_table->PowerSavingClockTable.PowerSavingClockCount);
|
||||
phm_copy_clock_limits_array(hwmgr,
|
||||
&pptable_information->power_saving_clock_max,
|
||||
powerplay_table->PowerSavingClockTable.PowerSavingClockMax,
|
||||
power_saving_clock_count);
|
||||
copy_clock_limits_array(hwmgr,
|
||||
phm_copy_clock_limits_array(hwmgr,
|
||||
&pptable_information->power_saving_clock_min,
|
||||
powerplay_table->PowerSavingClockTable.PowerSavingClockMin,
|
||||
power_saving_clock_count);
|
||||
|
|
|
@ -29,6 +29,78 @@
|
|||
#include "soc15_common.h"
|
||||
#include "pp_debug.h"
|
||||
|
||||
static int vega20_disable_fan_control_feature(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct vega20_hwmgr *data = hwmgr->backend;
|
||||
int ret = 0;
|
||||
|
||||
if (data->smu_features[GNLD_FAN_CONTROL].supported) {
|
||||
ret = vega20_enable_smc_features(
|
||||
hwmgr, false,
|
||||
data->smu_features[GNLD_FAN_CONTROL].
|
||||
smu_feature_bitmap);
|
||||
PP_ASSERT_WITH_CODE(!ret,
|
||||
"Disable FAN CONTROL feature Failed!",
|
||||
return ret);
|
||||
data->smu_features[GNLD_FAN_CONTROL].enabled = false;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vega20_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct vega20_hwmgr *data = hwmgr->backend;
|
||||
|
||||
if (data->smu_features[GNLD_FAN_CONTROL].supported)
|
||||
return vega20_disable_fan_control_feature(hwmgr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vega20_enable_fan_control_feature(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct vega20_hwmgr *data = hwmgr->backend;
|
||||
int ret = 0;
|
||||
|
||||
if (data->smu_features[GNLD_FAN_CONTROL].supported) {
|
||||
ret = vega20_enable_smc_features(
|
||||
hwmgr, true,
|
||||
data->smu_features[GNLD_FAN_CONTROL].
|
||||
smu_feature_bitmap);
|
||||
PP_ASSERT_WITH_CODE(!ret,
|
||||
"Enable FAN CONTROL feature Failed!",
|
||||
return ret);
|
||||
data->smu_features[GNLD_FAN_CONTROL].enabled = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vega20_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct vega20_hwmgr *data = hwmgr->backend;
|
||||
|
||||
if (data->smu_features[GNLD_FAN_CONTROL].supported)
|
||||
return vega20_enable_fan_control_feature(hwmgr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vega20_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
|
||||
{
|
||||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
|
||||
WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
|
||||
REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
|
||||
CG_FDO_CTRL2, TMIN, 0));
|
||||
WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
|
||||
REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
|
||||
CG_FDO_CTRL2, FDO_PWM_MODE, mode));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vega20_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
|
||||
{
|
||||
int ret = 0;
|
||||
|
@ -42,12 +114,62 @@ static int vega20_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int vega20_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
|
||||
uint32_t *speed)
|
||||
{
|
||||
struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
|
||||
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
|
||||
uint32_t current_rpm, percent = 0;
|
||||
int ret = 0;
|
||||
|
||||
ret = vega20_get_current_rpm(hwmgr, ¤t_rpm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
percent = current_rpm * 100 / pp_table->FanMaximumRpm;
|
||||
|
||||
*speed = percent > 100 ? 100 : percent;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vega20_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
|
||||
uint32_t speed)
|
||||
{
|
||||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
uint32_t duty100;
|
||||
uint32_t duty;
|
||||
uint64_t tmp64;
|
||||
|
||||
if (speed > 100)
|
||||
speed = 100;
|
||||
|
||||
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
|
||||
vega20_fan_ctrl_stop_smc_fan_control(hwmgr);
|
||||
|
||||
duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
|
||||
CG_FDO_CTRL1, FMAX_DUTY100);
|
||||
|
||||
if (duty100 == 0)
|
||||
return -EINVAL;
|
||||
|
||||
tmp64 = (uint64_t)speed * duty100;
|
||||
do_div(tmp64, 100);
|
||||
duty = (uint32_t)tmp64;
|
||||
|
||||
WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
|
||||
REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
|
||||
CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
|
||||
|
||||
return vega20_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
|
||||
}
|
||||
|
||||
int vega20_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
|
||||
struct phm_fan_speed_info *fan_speed_info)
|
||||
{
|
||||
memset(fan_speed_info, 0, sizeof(*fan_speed_info));
|
||||
fan_speed_info->supports_percent_read = false;
|
||||
fan_speed_info->supports_percent_write = false;
|
||||
fan_speed_info->supports_percent_read = true;
|
||||
fan_speed_info->supports_percent_write = true;
|
||||
fan_speed_info->supports_rpm_read = true;
|
||||
fan_speed_info->supports_rpm_write = true;
|
||||
|
||||
|
@ -61,6 +183,31 @@ int vega20_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
|
|||
return vega20_get_current_rpm(hwmgr, speed);
|
||||
}
|
||||
|
||||
int vega20_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
|
||||
{
|
||||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
uint32_t tach_period, crystal_clock_freq;
|
||||
int result = 0;
|
||||
|
||||
if (!speed)
|
||||
return -EINVAL;
|
||||
|
||||
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) {
|
||||
result = vega20_fan_ctrl_stop_smc_fan_control(hwmgr);
|
||||
if (result)
|
||||
return result;
|
||||
}
|
||||
|
||||
crystal_clock_freq = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
|
||||
tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
|
||||
WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
|
||||
REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
|
||||
CG_TACH_CTRL, TARGET_PERIOD,
|
||||
tach_period));
|
||||
|
||||
return vega20_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC_RPM);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads the remote temperature from the SIslands thermal controller.
|
||||
*
|
||||
|
|
|
@ -50,15 +50,22 @@ struct vega20_temperature {
|
|||
#define FDO_PWM_MODE_STATIC_RPM 5
|
||||
|
||||
extern int vega20_thermal_get_temperature(struct pp_hwmgr *hwmgr);
|
||||
extern int vega20_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
|
||||
extern int vega20_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
|
||||
struct phm_fan_speed_info *fan_speed_info);
|
||||
extern int vega20_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
|
||||
extern int vega20_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr,
|
||||
uint32_t *speed);
|
||||
extern int vega20_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr,
|
||||
uint32_t speed);
|
||||
extern int vega20_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
|
||||
uint32_t *speed);
|
||||
extern int vega20_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
|
||||
uint32_t speed);
|
||||
extern int vega20_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
|
||||
extern int vega20_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr);
|
||||
extern int vega20_thermal_disable_alert(struct pp_hwmgr *hwmgr);
|
||||
extern int vega20_start_thermal_controller(struct pp_hwmgr *hwmgr,
|
||||
struct PP_TemperatureRange *range);
|
||||
extern int vega20_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -328,6 +328,8 @@ struct pp_hwmgr_func {
|
|||
int (*set_power_limit)(struct pp_hwmgr *hwmgr, uint32_t n);
|
||||
int (*powergate_mmhub)(struct pp_hwmgr *hwmgr);
|
||||
int (*smus_notify_pwe)(struct pp_hwmgr *hwmgr);
|
||||
int (*powergate_sdma)(struct pp_hwmgr *hwmgr, bool bgate);
|
||||
int (*enable_mgpu_fan_boost)(struct pp_hwmgr *hwmgr);
|
||||
};
|
||||
|
||||
struct pp_table_func {
|
||||
|
@ -732,7 +734,6 @@ struct pp_hwmgr {
|
|||
void *smu_backend;
|
||||
const struct pp_smumgr_func *smumgr_funcs;
|
||||
bool is_kicker;
|
||||
bool reload_fw;
|
||||
|
||||
enum PP_DAL_POWERLEVEL dal_power_level;
|
||||
struct phm_dynamic_state_info dyn_state;
|
||||
|
|
|
@ -268,6 +268,12 @@ typedef enum {
|
|||
PPCLK_COUNT,
|
||||
} PPCLK_e;
|
||||
|
||||
typedef enum {
|
||||
POWER_SOURCE_AC,
|
||||
POWER_SOURCE_DC,
|
||||
POWER_SOURCE_COUNT,
|
||||
} POWER_SOURCE_e;
|
||||
|
||||
typedef enum {
|
||||
VOLTAGE_MODE_AVFS = 0,
|
||||
VOLTAGE_MODE_AVFS_SS,
|
||||
|
|
|
@ -2269,11 +2269,13 @@ static uint32_t ci_get_offsetof(uint32_t type, uint32_t member)
|
|||
case DRAM_LOG_BUFF_SIZE:
|
||||
return offsetof(SMU7_SoftRegisters, DRAM_LOG_BUFF_SIZE);
|
||||
}
|
||||
break;
|
||||
case SMU_Discrete_DpmTable:
|
||||
switch (member) {
|
||||
case LowSclkInterruptThreshold:
|
||||
return offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT);
|
||||
}
|
||||
break;
|
||||
}
|
||||
pr_debug("can't get the offset of type %x member %x\n", type, member);
|
||||
return 0;
|
||||
|
|
|
@ -302,16 +302,6 @@ static int fiji_start_smu(struct pp_hwmgr *hwmgr)
|
|||
hwmgr->avfs_supported = false;
|
||||
}
|
||||
|
||||
/* To initialize all clock gating before RLC loaded and running.*/
|
||||
amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
|
||||
AMD_IP_BLOCK_TYPE_GFX, AMD_CG_STATE_GATE);
|
||||
amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
|
||||
AMD_IP_BLOCK_TYPE_GMC, AMD_CG_STATE_GATE);
|
||||
amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
|
||||
AMD_IP_BLOCK_TYPE_SDMA, AMD_CG_STATE_GATE);
|
||||
amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
|
||||
AMD_IP_BLOCK_TYPE_COMMON, AMD_CG_STATE_GATE);
|
||||
|
||||
/* Setup SoftRegsStart here for register lookup in case
|
||||
* DummyBackEnd is used and ProcessFirmwareHeader is not executed
|
||||
*/
|
||||
|
@ -2331,6 +2321,7 @@ static uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
|
|||
case DRAM_LOG_BUFF_SIZE:
|
||||
return offsetof(SMU73_SoftRegisters, DRAM_LOG_BUFF_SIZE);
|
||||
}
|
||||
break;
|
||||
case SMU_Discrete_DpmTable:
|
||||
switch (member) {
|
||||
case UvdBootLevel:
|
||||
|
@ -2340,6 +2331,7 @@ static uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
|
|||
case LowSclkInterruptThreshold:
|
||||
return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold);
|
||||
}
|
||||
break;
|
||||
}
|
||||
pr_warn("can't get the offset of type %x member %x\n", type, member);
|
||||
return 0;
|
||||
|
|
|
@ -232,26 +232,25 @@ static int iceland_request_smu_load_specific_fw(struct pp_hwmgr *hwmgr,
|
|||
|
||||
static int iceland_start_smu(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct iceland_smumgr *priv = hwmgr->smu_backend;
|
||||
int result;
|
||||
|
||||
result = iceland_smu_upload_firmware_image(hwmgr);
|
||||
if (result)
|
||||
return result;
|
||||
result = iceland_smu_start_smc(hwmgr);
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
if (!smu7_is_smc_ram_running(hwmgr)) {
|
||||
pr_info("smu not running, upload firmware again \n");
|
||||
result = iceland_smu_upload_firmware_image(hwmgr);
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
result = iceland_smu_start_smc(hwmgr);
|
||||
if (result)
|
||||
return result;
|
||||
iceland_smu_start_smc(hwmgr);
|
||||
}
|
||||
|
||||
/* Setup SoftRegsStart here to visit the register UcodeLoadStatus
|
||||
* to check fw loading state
|
||||
*/
|
||||
smu7_read_smc_sram_dword(hwmgr,
|
||||
SMU71_FIRMWARE_HEADER_LOCATION +
|
||||
offsetof(SMU71_Firmware_Header, SoftRegisters),
|
||||
&(priv->smu7_data.soft_regs_start), 0x40000);
|
||||
|
||||
result = smu7_request_smu_load_fw(hwmgr);
|
||||
|
||||
return result;
|
||||
|
@ -2237,11 +2236,13 @@ static uint32_t iceland_get_offsetof(uint32_t type, uint32_t member)
|
|||
case DRAM_LOG_BUFF_SIZE:
|
||||
return offsetof(SMU71_SoftRegisters, DRAM_LOG_BUFF_SIZE);
|
||||
}
|
||||
break;
|
||||
case SMU_Discrete_DpmTable:
|
||||
switch (member) {
|
||||
case LowSclkInterruptThreshold:
|
||||
return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold);
|
||||
}
|
||||
break;
|
||||
}
|
||||
pr_warn("can't get the offset of type %x member %x\n", type, member);
|
||||
return 0;
|
||||
|
@ -2662,7 +2663,7 @@ const struct pp_smumgr_func iceland_smu_funcs = {
|
|||
.smu_fini = &smu7_smu_fini,
|
||||
.start_smu = &iceland_start_smu,
|
||||
.check_fw_load_finish = &smu7_check_fw_load_finish,
|
||||
.request_smu_load_fw = &smu7_reload_firmware,
|
||||
.request_smu_load_fw = &smu7_request_smu_load_fw,
|
||||
.request_smu_load_specific_fw = &iceland_request_smu_load_specific_fw,
|
||||
.send_msg_to_smc = &smu7_send_msg_to_smc,
|
||||
.send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
|
||||
|
|
|
@ -186,40 +186,12 @@ static int smu10_verify_smc_interface(struct pp_hwmgr *hwmgr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* sdma is disabled by default in vbios, need to re-enable in driver */
|
||||
static void smu10_smc_enable_sdma(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
smu10_send_msg_to_smc(hwmgr,
|
||||
PPSMC_MSG_PowerUpSdma);
|
||||
}
|
||||
|
||||
static void smu10_smc_disable_sdma(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
smu10_send_msg_to_smc(hwmgr,
|
||||
PPSMC_MSG_PowerDownSdma);
|
||||
}
|
||||
|
||||
/* vcn is disabled by default in vbios, need to re-enable in driver */
|
||||
static void smu10_smc_enable_vcn(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
smu10_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_PowerUpVcn, 0);
|
||||
}
|
||||
|
||||
static void smu10_smc_disable_vcn(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
smu10_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_PowerDownVcn, 0);
|
||||
}
|
||||
|
||||
static int smu10_smu_fini(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct smu10_smumgr *priv =
|
||||
(struct smu10_smumgr *)(hwmgr->smu_backend);
|
||||
|
||||
if (priv) {
|
||||
smu10_smc_disable_sdma(hwmgr);
|
||||
smu10_smc_disable_vcn(hwmgr);
|
||||
amdgpu_bo_free_kernel(&priv->smu_tables.entry[SMU10_WMTABLE].handle,
|
||||
&priv->smu_tables.entry[SMU10_WMTABLE].mc_addr,
|
||||
&priv->smu_tables.entry[SMU10_WMTABLE].table);
|
||||
|
@ -243,8 +215,7 @@ static int smu10_start_smu(struct pp_hwmgr *hwmgr)
|
|||
|
||||
if (smu10_verify_smc_interface(hwmgr))
|
||||
return -EINVAL;
|
||||
smu10_smc_enable_sdma(hwmgr);
|
||||
smu10_smc_enable_vcn(hwmgr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -302,44 +302,6 @@ int smu7_write_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Convert the firmware type to SMU type mask. For MEC, we need to check all MEC related type */
|
||||
|
||||
static uint32_t smu7_get_mask_for_firmware_type(uint32_t fw_type)
|
||||
{
|
||||
uint32_t result = 0;
|
||||
|
||||
switch (fw_type) {
|
||||
case UCODE_ID_SDMA0:
|
||||
result = UCODE_ID_SDMA0_MASK;
|
||||
break;
|
||||
case UCODE_ID_SDMA1:
|
||||
result = UCODE_ID_SDMA1_MASK;
|
||||
break;
|
||||
case UCODE_ID_CP_CE:
|
||||
result = UCODE_ID_CP_CE_MASK;
|
||||
break;
|
||||
case UCODE_ID_CP_PFP:
|
||||
result = UCODE_ID_CP_PFP_MASK;
|
||||
break;
|
||||
case UCODE_ID_CP_ME:
|
||||
result = UCODE_ID_CP_ME_MASK;
|
||||
break;
|
||||
case UCODE_ID_CP_MEC:
|
||||
case UCODE_ID_CP_MEC_JT1:
|
||||
case UCODE_ID_CP_MEC_JT2:
|
||||
result = UCODE_ID_CP_MEC_MASK;
|
||||
break;
|
||||
case UCODE_ID_RLC_G:
|
||||
result = UCODE_ID_RLC_G_MASK;
|
||||
break;
|
||||
default:
|
||||
pr_info("UCode type is out of range! \n");
|
||||
result = 0;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static int smu7_populate_single_firmware_entry(struct pp_hwmgr *hwmgr,
|
||||
uint32_t fw_type,
|
||||
struct SMU_Entry *entry)
|
||||
|
@ -381,10 +343,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
|
|||
uint32_t fw_to_load;
|
||||
int r = 0;
|
||||
|
||||
if (!hwmgr->reload_fw) {
|
||||
pr_info("skip reloading...\n");
|
||||
return 0;
|
||||
}
|
||||
amdgpu_ucode_init_bo(hwmgr->adev);
|
||||
|
||||
if (smu_data->soft_regs_start)
|
||||
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
|
||||
|
@ -467,10 +426,13 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
|
|||
smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, upper_32_bits(smu_data->header_buffer.mc_addr));
|
||||
smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, lower_32_bits(smu_data->header_buffer.mc_addr));
|
||||
|
||||
if (smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load))
|
||||
pr_err("Fail to Request SMU Load uCode");
|
||||
smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load);
|
||||
|
||||
return r;
|
||||
r = smu7_check_fw_load_finish(hwmgr, fw_to_load);
|
||||
if (!r)
|
||||
return 0;
|
||||
|
||||
pr_err("SMU load firmware failed\n");
|
||||
|
||||
failed:
|
||||
kfree(smu_data->toc);
|
||||
|
@ -482,13 +444,12 @@ failed:
|
|||
int smu7_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t fw_type)
|
||||
{
|
||||
struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
|
||||
uint32_t fw_mask = smu7_get_mask_for_firmware_type(fw_type);
|
||||
uint32_t ret;
|
||||
|
||||
ret = phm_wait_on_indirect_register(hwmgr, mmSMC_IND_INDEX_11,
|
||||
smu_data->soft_regs_start + smum_get_offsetof(hwmgr,
|
||||
SMU_SoftRegisters, UcodeLoadStatus),
|
||||
fw_mask, fw_mask);
|
||||
fw_type, fw_type);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -658,11 +658,10 @@ static int smu8_request_smu_load_fw(struct pp_hwmgr *hwmgr)
|
|||
{
|
||||
struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
|
||||
uint32_t smc_address;
|
||||
uint32_t fw_to_check = 0;
|
||||
int ret;
|
||||
|
||||
if (!hwmgr->reload_fw) {
|
||||
pr_info("skip reloading...\n");
|
||||
return 0;
|
||||
}
|
||||
amdgpu_ucode_init_bo(hwmgr->adev);
|
||||
|
||||
smu8_smu_populate_firmware_entries(hwmgr);
|
||||
|
||||
|
@ -689,15 +688,39 @@ static int smu8_request_smu_load_fw(struct pp_hwmgr *hwmgr)
|
|||
smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
|
||||
smu8_smu->toc_entry_power_profiling_index);
|
||||
|
||||
return smu8_send_msg_to_smc_with_parameter(hwmgr,
|
||||
smu8_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_ExecuteJob,
|
||||
smu8_smu->toc_entry_initialize_index);
|
||||
|
||||
fw_to_check = UCODE_ID_RLC_G_MASK |
|
||||
UCODE_ID_SDMA0_MASK |
|
||||
UCODE_ID_SDMA1_MASK |
|
||||
UCODE_ID_CP_CE_MASK |
|
||||
UCODE_ID_CP_ME_MASK |
|
||||
UCODE_ID_CP_PFP_MASK |
|
||||
UCODE_ID_CP_MEC_JT1_MASK |
|
||||
UCODE_ID_CP_MEC_JT2_MASK;
|
||||
|
||||
if (hwmgr->chip_id == CHIP_STONEY)
|
||||
fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
|
||||
|
||||
ret = smu8_check_fw_load_finish(hwmgr, fw_to_check);
|
||||
if (ret) {
|
||||
pr_err("SMU firmware load failed\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = smu8_load_mec_firmware(hwmgr);
|
||||
if (ret) {
|
||||
pr_err("Mec Firmware load failed\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int smu8_start_smu(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
int ret = 0;
|
||||
uint32_t fw_to_check = 0;
|
||||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
|
||||
uint32_t index = SMN_MP1_SRAM_START_ADDR +
|
||||
|
@ -712,31 +735,7 @@ static int smu8_start_smu(struct pp_hwmgr *hwmgr)
|
|||
hwmgr->smu_version = cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA);
|
||||
adev->pm.fw_version = hwmgr->smu_version >> 8;
|
||||
|
||||
fw_to_check = UCODE_ID_RLC_G_MASK |
|
||||
UCODE_ID_SDMA0_MASK |
|
||||
UCODE_ID_SDMA1_MASK |
|
||||
UCODE_ID_CP_CE_MASK |
|
||||
UCODE_ID_CP_ME_MASK |
|
||||
UCODE_ID_CP_PFP_MASK |
|
||||
UCODE_ID_CP_MEC_JT1_MASK |
|
||||
UCODE_ID_CP_MEC_JT2_MASK;
|
||||
|
||||
if (hwmgr->chip_id == CHIP_STONEY)
|
||||
fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
|
||||
|
||||
smu8_request_smu_load_fw(hwmgr);
|
||||
|
||||
ret = smu8_check_fw_load_finish(hwmgr, fw_to_check);
|
||||
if (ret) {
|
||||
pr_err("SMU firmware load failed\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = smu8_load_mec_firmware(hwmgr);
|
||||
if (ret)
|
||||
pr_err("Mec Firmware load failed\n");
|
||||
|
||||
return ret;
|
||||
return smu8_request_smu_load_fw(hwmgr);
|
||||
}
|
||||
|
||||
static int smu8_smu_init(struct pp_hwmgr *hwmgr)
|
||||
|
|
|
@ -192,6 +192,7 @@ static int tonga_start_in_non_protection_mode(struct pp_hwmgr *hwmgr)
|
|||
|
||||
static int tonga_start_smu(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct tonga_smumgr *priv = hwmgr->smu_backend;
|
||||
int result;
|
||||
|
||||
/* Only start SMC if SMC RAM is not running */
|
||||
|
@ -209,6 +210,14 @@ static int tonga_start_smu(struct pp_hwmgr *hwmgr)
|
|||
}
|
||||
}
|
||||
|
||||
/* Setup SoftRegsStart here to visit the register UcodeLoadStatus
|
||||
* to check fw loading state
|
||||
*/
|
||||
smu7_read_smc_sram_dword(hwmgr,
|
||||
SMU72_FIRMWARE_HEADER_LOCATION +
|
||||
offsetof(SMU72_Firmware_Header, SoftRegisters),
|
||||
&(priv->smu7_data.soft_regs_start), 0x40000);
|
||||
|
||||
result = smu7_request_smu_load_fw(hwmgr);
|
||||
|
||||
return result;
|
||||
|
@ -2619,6 +2628,7 @@ static uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
|
|||
case DRAM_LOG_BUFF_SIZE:
|
||||
return offsetof(SMU72_SoftRegisters, DRAM_LOG_BUFF_SIZE);
|
||||
}
|
||||
break;
|
||||
case SMU_Discrete_DpmTable:
|
||||
switch (member) {
|
||||
case UvdBootLevel:
|
||||
|
@ -2628,6 +2638,7 @@ static uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
|
|||
case LowSclkInterruptThreshold:
|
||||
return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold);
|
||||
}
|
||||
break;
|
||||
}
|
||||
pr_warn("can't get the offset of type %x member %x\n", type, member);
|
||||
return 0;
|
||||
|
|
|
@ -2185,6 +2185,7 @@ static uint32_t vegam_get_offsetof(uint32_t type, uint32_t member)
|
|||
case DRAM_LOG_BUFF_SIZE:
|
||||
return offsetof(SMU75_SoftRegisters, DRAM_LOG_BUFF_SIZE);
|
||||
}
|
||||
break;
|
||||
case SMU_Discrete_DpmTable:
|
||||
switch (member) {
|
||||
case UvdBootLevel:
|
||||
|
@ -2194,6 +2195,7 @@ static uint32_t vegam_get_offsetof(uint32_t type, uint32_t member)
|
|||
case LowSclkInterruptThreshold:
|
||||
return offsetof(SMU75_Discrete_DpmTable, LowSclkInterruptThreshold);
|
||||
}
|
||||
break;
|
||||
}
|
||||
pr_warn("can't get the offset of type %x member %x\n", type, member);
|
||||
return 0;
|
||||
|
|
|
@ -2416,7 +2416,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
|
|||
size = radeon_get_ib_value(p, idx+1+(i*8)+1);
|
||||
if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
|
||||
/* force size to size of the buffer */
|
||||
dev_warn(p->dev, "vbo resource seems too big for the bo\n");
|
||||
dev_warn_ratelimited(p->dev, "vbo resource seems too big for the bo\n");
|
||||
ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset;
|
||||
}
|
||||
|
||||
|
|
|
@ -467,8 +467,7 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
|
|||
struct dma_fence *fence;
|
||||
struct drm_sched_rq *rq;
|
||||
|
||||
if (!spsc_queue_count(&entity->job_queue) == 0 ||
|
||||
entity->num_rq_list <= 1)
|
||||
if (spsc_queue_count(&entity->job_queue) || entity->num_rq_list <= 1)
|
||||
return;
|
||||
|
||||
fence = READ_ONCE(entity->last_scheduled);
|
||||
|
|
|
@ -247,20 +247,6 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
|
|||
}
|
||||
EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
|
||||
|
||||
static void ttm_list_move_bulk_tail(struct list_head *list,
|
||||
struct list_head *first,
|
||||
struct list_head *last)
|
||||
{
|
||||
first->prev->next = last->next;
|
||||
last->next->prev = first->prev;
|
||||
|
||||
list->prev->next = first;
|
||||
first->prev = list->prev;
|
||||
|
||||
last->next = list;
|
||||
list->prev = last;
|
||||
}
|
||||
|
||||
void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
|
||||
{
|
||||
unsigned i;
|
||||
|
@ -276,8 +262,8 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
|
|||
reservation_object_assert_held(pos->last->resv);
|
||||
|
||||
man = &pos->first->bdev->man[TTM_PL_TT];
|
||||
ttm_list_move_bulk_tail(&man->lru[i], &pos->first->lru,
|
||||
&pos->last->lru);
|
||||
list_bulk_move_tail(&man->lru[i], &pos->first->lru,
|
||||
&pos->last->lru);
|
||||
}
|
||||
|
||||
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
|
||||
|
@ -291,8 +277,8 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
|
|||
reservation_object_assert_held(pos->last->resv);
|
||||
|
||||
man = &pos->first->bdev->man[TTM_PL_VRAM];
|
||||
ttm_list_move_bulk_tail(&man->lru[i], &pos->first->lru,
|
||||
&pos->last->lru);
|
||||
list_bulk_move_tail(&man->lru[i], &pos->first->lru,
|
||||
&pos->last->lru);
|
||||
}
|
||||
|
||||
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
|
||||
|
@ -306,8 +292,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
|
|||
reservation_object_assert_held(pos->last->resv);
|
||||
|
||||
lru = &pos->first->bdev->glob->swap_lru[i];
|
||||
ttm_list_move_bulk_tail(lru, &pos->first->swap,
|
||||
&pos->last->swap);
|
||||
list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail);
|
||||
|
|
|
@ -183,6 +183,29 @@ static inline void list_move_tail(struct list_head *list,
|
|||
list_add_tail(list, head);
|
||||
}
|
||||
|
||||
/**
|
||||
* list_bulk_move_tail - move a subsection of a list to its tail
|
||||
* @head: the head that will follow our entry
|
||||
* @first: first entry to move
|
||||
* @last: last entry to move, can be the same as first
|
||||
*
|
||||
* Move all entries between @first and including @last before @head.
|
||||
* All three entries must belong to the same linked list.
|
||||
*/
|
||||
static inline void list_bulk_move_tail(struct list_head *head,
|
||||
struct list_head *first,
|
||||
struct list_head *last)
|
||||
{
|
||||
first->prev->next = last->next;
|
||||
last->next->prev = first->prev;
|
||||
|
||||
head->prev->next = first;
|
||||
first->prev = head->prev;
|
||||
|
||||
last->next = head;
|
||||
head->prev = last;
|
||||
}
|
||||
|
||||
/**
|
||||
* list_is_last - tests whether @list is the last entry in list @head
|
||||
* @list: the entry to test
|
||||
|
|
Loading…
Reference in New Issue