Merge tag 'amd-drm-next-5.9-2020-07-24' of git://people.freedesktop.org/~agd5f/linux into drm-next
amd-drm-next-5.9-2020-07-24: amdgpu: - Misc sienna cichlid fixes - Final bits of swSMU cleanup - Misc display fixes - Misc VCN fixes - Eeprom i2c cleanup - Drop amd vrr_range debugfs in favor of core drm Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200724205712.3913-1-alexander.deucher@amd.com
This commit is contained in:
commit
92be423922
|
@ -2036,3 +2036,20 @@ int amdgpu_atombios_init(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_atombios_get_data_table(struct amdgpu_device *adev,
|
||||
uint32_t table,
|
||||
uint16_t *size,
|
||||
uint8_t *frev,
|
||||
uint8_t *crev,
|
||||
uint8_t **addr)
|
||||
{
|
||||
uint16_t data_start;
|
||||
|
||||
if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
|
||||
size, frev, crev, &data_start))
|
||||
return -EINVAL;
|
||||
|
||||
*addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -216,6 +216,13 @@ int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev,
|
|||
u8 voltage_type,
|
||||
u8 *svd_gpio_id, u8 *svc_gpio_id);
|
||||
|
||||
int amdgpu_atombios_get_data_table(struct amdgpu_device *adev,
|
||||
uint32_t table,
|
||||
uint16_t *size,
|
||||
uint8_t *frev,
|
||||
uint8_t *crev,
|
||||
uint8_t **addr);
|
||||
|
||||
void amdgpu_atombios_fini(struct amdgpu_device *adev);
|
||||
int amdgpu_atombios_init(struct amdgpu_device *adev);
|
||||
|
||||
|
|
|
@ -1073,6 +1073,57 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu
|
|||
}
|
||||
|
||||
|
||||
/**
|
||||
* amdgpu_debugfs_regs_gfxoff_status - read gfxoff status
|
||||
*
|
||||
* @f: open file handle
|
||||
* @buf: User buffer to store read data in
|
||||
* @size: Number of bytes to read
|
||||
* @pos: Offset to seek to
|
||||
*/
|
||||
static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
struct amdgpu_device *adev = file_inode(f)->i_private;
|
||||
ssize_t result = 0;
|
||||
int r;
|
||||
|
||||
if (size & 0x3 || *pos & 0x3)
|
||||
return -EINVAL;
|
||||
|
||||
r = pm_runtime_get_sync(adev->ddev->dev);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
while (size) {
|
||||
uint32_t value;
|
||||
|
||||
r = amdgpu_get_gfx_off_status(adev, &value);
|
||||
if (r) {
|
||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = put_user(value, (uint32_t *)buf);
|
||||
if (r) {
|
||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
||||
return r;
|
||||
}
|
||||
|
||||
result += 4;
|
||||
buf += 4;
|
||||
*pos += 4;
|
||||
size -= 4;
|
||||
}
|
||||
|
||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static const struct file_operations amdgpu_debugfs_regs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = amdgpu_debugfs_regs_read,
|
||||
|
@ -1123,7 +1174,9 @@ static const struct file_operations amdgpu_debugfs_gpr_fops = {
|
|||
|
||||
static const struct file_operations amdgpu_debugfs_gfxoff_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = amdgpu_debugfs_gfxoff_read,
|
||||
.write = amdgpu_debugfs_gfxoff_write,
|
||||
.llseek = default_llseek
|
||||
};
|
||||
|
||||
static const struct file_operations *debugfs_regs[] = {
|
||||
|
|
|
@ -425,6 +425,7 @@ struct amdgpu_pm {
|
|||
u32 default_sclk;
|
||||
u32 default_mclk;
|
||||
struct amdgpu_i2c_chan *i2c_bus;
|
||||
bool bus_locked;
|
||||
/* internal thermal controller on rv6xx+ */
|
||||
enum amdgpu_int_thermal_type int_thermal_type;
|
||||
struct device *int_hwmon_dev;
|
||||
|
|
|
@ -1186,7 +1186,8 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
|
|||
* unfortunately we can't detect certain
|
||||
* hypervisors so just do this all the time.
|
||||
*/
|
||||
adev->mp1_state = PP_MP1_STATE_UNLOAD;
|
||||
if (!amdgpu_passthrough(adev))
|
||||
adev->mp1_state = PP_MP1_STATE_UNLOAD;
|
||||
amdgpu_device_ip_suspend(adev);
|
||||
adev->mp1_state = PP_MP1_STATE_NONE;
|
||||
}
|
||||
|
|
|
@ -578,6 +578,20 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
|
|||
mutex_unlock(&adev->gfx.gfx_off_mutex);
|
||||
}
|
||||
|
||||
int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value)
|
||||
{
|
||||
|
||||
int r = 0;
|
||||
|
||||
mutex_lock(&adev->gfx.gfx_off_mutex);
|
||||
|
||||
r = smu_get_status_gfxoff(adev, value);
|
||||
|
||||
mutex_unlock(&adev->gfx.gfx_off_mutex);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
|
|
@ -378,6 +378,7 @@ void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
|
|||
bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me,
|
||||
int pipe, int queue);
|
||||
void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable);
|
||||
int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value);
|
||||
int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev);
|
||||
void amdgpu_gfx_ras_fini(struct amdgpu_device *adev);
|
||||
int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
|
||||
|
|
|
@ -796,8 +796,7 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
|
|||
tmp_str++;
|
||||
while (isspace(*++tmp_str));
|
||||
|
||||
while (tmp_str[0]) {
|
||||
sub_str = strsep(&tmp_str, delimiter);
|
||||
while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
|
||||
ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
@ -1067,8 +1066,7 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
|
|||
memcpy(buf_cpy, buf, bytes);
|
||||
buf_cpy[bytes] = '\0';
|
||||
tmp = buf_cpy;
|
||||
while (tmp[0]) {
|
||||
sub_str = strsep(&tmp, delimiter);
|
||||
while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
|
||||
if (strlen(sub_str)) {
|
||||
ret = kstrtol(sub_str, 0, &level);
|
||||
if (ret)
|
||||
|
@ -1697,8 +1695,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
|
|||
i++;
|
||||
memcpy(buf_cpy, buf, count-i);
|
||||
tmp_str = buf_cpy;
|
||||
while (tmp_str[0]) {
|
||||
sub_str = strsep(&tmp_str, delimiter);
|
||||
while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
|
||||
ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
|
|
@ -500,7 +500,6 @@ static int psp_asd_load(struct psp_context *psp)
|
|||
* TODO: add version check to make it common
|
||||
*/
|
||||
if (amdgpu_sriov_vf(psp->adev) ||
|
||||
(psp->adev->asic_type == CHIP_SIENNA_CICHLID) ||
|
||||
(psp->adev->asic_type == CHIP_NAVY_FLOUNDER))
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -47,7 +47,6 @@ struct amdgpu_ras_eeprom_control {
|
|||
uint32_t next_addr;
|
||||
unsigned int num_recs;
|
||||
struct mutex tbl_mutex;
|
||||
bool bus_locked;
|
||||
uint32_t tbl_byte_sum;
|
||||
uint16_t i2c_address; // 8-bit represented address
|
||||
};
|
||||
|
|
|
@ -60,7 +60,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
|
|||
|
||||
int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned long bo_size, fw_shared_bo_size;
|
||||
unsigned long bo_size;
|
||||
const char *fw_name;
|
||||
const struct common_firmware_header *hdr;
|
||||
unsigned char fw_check;
|
||||
|
@ -176,6 +176,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
|||
bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
|
||||
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
|
||||
bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
|
||||
bo_size += AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
|
||||
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
|
||||
if (adev->vcn.harvest_config & (1 << i))
|
||||
|
@ -189,6 +190,11 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
|||
return r;
|
||||
}
|
||||
|
||||
adev->vcn.inst[i].fw_shared_cpu_addr = adev->vcn.inst[i].cpu_addr +
|
||||
bo_size - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
|
||||
adev->vcn.inst[i].fw_shared_gpu_addr = adev->vcn.inst[i].gpu_addr +
|
||||
bo_size - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
|
||||
|
||||
if (adev->vcn.indirect_sram) {
|
||||
r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo,
|
||||
|
@ -198,17 +204,6 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
|||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
r = amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)),
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].fw_shared_bo,
|
||||
&adev->vcn.inst[i].fw_shared_gpu_addr, &adev->vcn.inst[i].fw_shared_cpu_addr);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "VCN %d (%d) failed to allocate firmware shared bo\n", i, r);
|
||||
return r;
|
||||
}
|
||||
|
||||
fw_shared_bo_size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo);
|
||||
adev->vcn.inst[i].saved_shm_bo = kvmalloc(fw_shared_bo_size, GFP_KERNEL);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -224,11 +219,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
|
|||
if (adev->vcn.harvest_config & (1 << j))
|
||||
continue;
|
||||
|
||||
kvfree(adev->vcn.inst[j].saved_shm_bo);
|
||||
amdgpu_bo_free_kernel(&adev->vcn.inst[j].fw_shared_bo,
|
||||
&adev->vcn.inst[j].fw_shared_gpu_addr,
|
||||
(void **)&adev->vcn.inst[j].fw_shared_cpu_addr);
|
||||
|
||||
if (adev->vcn.indirect_sram) {
|
||||
amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo,
|
||||
&adev->vcn.inst[j].dpg_sram_gpu_addr,
|
||||
|
@ -274,17 +264,6 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
|
|||
return -ENOMEM;
|
||||
|
||||
memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
|
||||
|
||||
if (adev->vcn.inst[i].fw_shared_bo == NULL)
|
||||
return 0;
|
||||
|
||||
if (!adev->vcn.inst[i].saved_shm_bo)
|
||||
return -ENOMEM;
|
||||
|
||||
size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo);
|
||||
ptr = adev->vcn.inst[i].fw_shared_cpu_addr;
|
||||
|
||||
memcpy_fromio(adev->vcn.inst[i].saved_shm_bo, ptr, size);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -322,17 +301,6 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
|
|||
}
|
||||
memset_io(ptr, 0, size);
|
||||
}
|
||||
|
||||
if (adev->vcn.inst[i].fw_shared_bo == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo);
|
||||
ptr = adev->vcn.inst[i].fw_shared_cpu_addr;
|
||||
|
||||
if (adev->vcn.inst[i].saved_shm_bo != NULL)
|
||||
memcpy_toio(ptr, adev->vcn.inst[i].saved_shm_bo, size);
|
||||
else
|
||||
memset_io(ptr, 0, size);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -199,7 +199,6 @@ struct amdgpu_vcn_inst {
|
|||
struct amdgpu_irq_src irq;
|
||||
struct amdgpu_vcn_reg external;
|
||||
struct amdgpu_bo *dpg_sram_bo;
|
||||
struct amdgpu_bo *fw_shared_bo;
|
||||
struct dpg_pause_state pause_state;
|
||||
void *dpg_sram_cpu_addr;
|
||||
uint64_t dpg_sram_gpu_addr;
|
||||
|
@ -207,7 +206,6 @@ struct amdgpu_vcn_inst {
|
|||
atomic_t dpg_enc_submission_cnt;
|
||||
void *fw_shared_cpu_addr;
|
||||
uint64_t fw_shared_gpu_addr;
|
||||
void *saved_shm_bo;
|
||||
};
|
||||
|
||||
struct amdgpu_vcn {
|
||||
|
|
|
@ -39,6 +39,48 @@
|
|||
|
||||
static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
|
||||
|
||||
/**
|
||||
* force_update_wptr_for_self_int - Force update the wptr for self interrupt
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @threshold: threshold to trigger the wptr reporting
|
||||
* @timeout: timeout to trigger the wptr reporting
|
||||
* @enabled: Enable/disable timeout flush mechanism
|
||||
*
|
||||
* threshold input range: 0 ~ 15, default 0,
|
||||
* real_threshold = 2^threshold
|
||||
* timeout input range: 0 ~ 20, default 8,
|
||||
* real_timeout = (2^timeout) * 1024 / (socclk_freq)
|
||||
*
|
||||
* Force update wptr for self interrupt ( >= SIENNA_CICHLID).
|
||||
*/
|
||||
static void
|
||||
force_update_wptr_for_self_int(struct amdgpu_device *adev,
|
||||
u32 threshold, u32 timeout, bool enabled)
|
||||
{
|
||||
u32 ih_cntl, ih_rb_cntl;
|
||||
|
||||
if (adev->asic_type < CHIP_SIENNA_CICHLID)
|
||||
return;
|
||||
|
||||
ih_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_CNTL2);
|
||||
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
|
||||
|
||||
ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
|
||||
SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT, timeout);
|
||||
ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
|
||||
SELF_IV_FORCE_WPTR_UPDATE_ENABLE, enabled);
|
||||
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
|
||||
RB_USED_INT_THRESHOLD, threshold);
|
||||
|
||||
WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
|
||||
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
|
||||
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
|
||||
RB_USED_INT_THRESHOLD, threshold);
|
||||
WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
|
||||
WREG32_SOC15(OSSSYS, 0, mmIH_CNTL2, ih_cntl);
|
||||
}
|
||||
|
||||
/**
|
||||
* navi10_ih_enable_interrupts - Enable the interrupt ring buffer
|
||||
*
|
||||
|
@ -371,6 +413,8 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
|
|||
|
||||
/* enable interrupts */
|
||||
navi10_ih_enable_interrupts(adev);
|
||||
/* enable wptr force update for self int */
|
||||
force_update_wptr_for_self_int(adev, 0, 8, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -384,6 +428,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
|
|||
*/
|
||||
static void navi10_ih_irq_disable(struct amdgpu_device *adev)
|
||||
{
|
||||
force_update_wptr_for_self_int(adev, 0, 8, false);
|
||||
navi10_ih_disable_interrupts(adev);
|
||||
|
||||
/* Wait and acknowledge irq */
|
||||
|
|
|
@ -446,6 +446,9 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
|
|||
adev->nbio.funcs = &nbio_v2_3_funcs;
|
||||
adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
|
||||
|
||||
if (adev->asic_type == CHIP_SIENNA_CICHLID)
|
||||
adev->gmc.xgmi.supported = true;
|
||||
|
||||
/* Set IP register base before any HW register access */
|
||||
r = nv_reg_base_init(adev);
|
||||
if (r)
|
||||
|
|
|
@ -56,7 +56,7 @@ MODULE_FIRMWARE("amdgpu/arcturus_sos.bin");
|
|||
MODULE_FIRMWARE("amdgpu/arcturus_asd.bin");
|
||||
MODULE_FIRMWARE("amdgpu/arcturus_ta.bin");
|
||||
MODULE_FIRMWARE("amdgpu/sienna_cichlid_sos.bin");
|
||||
MODULE_FIRMWARE("amdgpu/sienna_cichlid_asd.bin");
|
||||
MODULE_FIRMWARE("amdgpu/sienna_cichlid_ta.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navy_flounder_sos.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navy_flounder_asd.bin");
|
||||
|
||||
|
@ -179,6 +179,10 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
|
|||
}
|
||||
break;
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
err = psp_init_ta_microcode(&adev->psp, chip_name);
|
||||
if (err)
|
||||
return err;
|
||||
break;
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -32,7 +32,6 @@
|
|||
#include "amdgpu_amdkfd.h"
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/pci.h>
|
||||
#include "amdgpu_ras.h"
|
||||
|
||||
/* error codes */
|
||||
#define I2C_OK 0
|
||||
|
@ -537,12 +536,12 @@ Fail:
|
|||
return false;
|
||||
}
|
||||
|
||||
/***************************** EEPROM I2C GLUE ****************************/
|
||||
/***************************** I2C GLUE ****************************/
|
||||
|
||||
static uint32_t smu_v11_0_i2c_eeprom_read_data(struct i2c_adapter *control,
|
||||
uint8_t address,
|
||||
uint8_t *data,
|
||||
uint32_t numbytes)
|
||||
static uint32_t smu_v11_0_i2c_read_data(struct i2c_adapter *control,
|
||||
uint8_t address,
|
||||
uint8_t *data,
|
||||
uint32_t numbytes)
|
||||
{
|
||||
uint32_t ret = 0;
|
||||
|
||||
|
@ -562,10 +561,10 @@ Fail:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static uint32_t smu_v11_0_i2c_eeprom_write_data(struct i2c_adapter *control,
|
||||
uint8_t address,
|
||||
uint8_t *data,
|
||||
uint32_t numbytes)
|
||||
static uint32_t smu_v11_0_i2c_write_data(struct i2c_adapter *control,
|
||||
uint8_t address,
|
||||
uint8_t *data,
|
||||
uint32_t numbytes)
|
||||
{
|
||||
uint32_t ret;
|
||||
|
||||
|
@ -592,14 +591,13 @@ static uint32_t smu_v11_0_i2c_eeprom_write_data(struct i2c_adapter *control,
|
|||
static void lock_bus(struct i2c_adapter *i2c, unsigned int flags)
|
||||
{
|
||||
struct amdgpu_device *adev = to_amdgpu_device(i2c);
|
||||
struct amdgpu_ras_eeprom_control *control = &adev->psp.ras.ras->eeprom_control;
|
||||
|
||||
if (!smu_v11_0_i2c_bus_lock(i2c)) {
|
||||
DRM_ERROR("Failed to lock the bus from SMU");
|
||||
return;
|
||||
}
|
||||
|
||||
control->bus_locked = true;
|
||||
adev->pm.bus_locked = true;
|
||||
}
|
||||
|
||||
static int trylock_bus(struct i2c_adapter *i2c, unsigned int flags)
|
||||
|
@ -611,14 +609,13 @@ static int trylock_bus(struct i2c_adapter *i2c, unsigned int flags)
|
|||
static void unlock_bus(struct i2c_adapter *i2c, unsigned int flags)
|
||||
{
|
||||
struct amdgpu_device *adev = to_amdgpu_device(i2c);
|
||||
struct amdgpu_ras_eeprom_control *control = &adev->psp.ras.ras->eeprom_control;
|
||||
|
||||
if (!smu_v11_0_i2c_bus_unlock(i2c)) {
|
||||
DRM_ERROR("Failed to unlock the bus from SMU");
|
||||
return;
|
||||
}
|
||||
|
||||
control->bus_locked = false;
|
||||
adev->pm.bus_locked = false;
|
||||
}
|
||||
|
||||
static const struct i2c_lock_operations smu_v11_0_i2c_i2c_lock_ops = {
|
||||
|
@ -627,14 +624,13 @@ static const struct i2c_lock_operations smu_v11_0_i2c_i2c_lock_ops = {
|
|||
.unlock_bus = unlock_bus,
|
||||
};
|
||||
|
||||
static int smu_v11_0_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
static int smu_v11_0_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
struct i2c_msg *msgs, int num)
|
||||
{
|
||||
int i, ret;
|
||||
struct amdgpu_device *adev = to_amdgpu_device(i2c_adap);
|
||||
struct amdgpu_ras_eeprom_control *control = &adev->psp.ras.ras->eeprom_control;
|
||||
|
||||
if (!control->bus_locked) {
|
||||
if (!adev->pm.bus_locked) {
|
||||
DRM_ERROR("I2C bus unlocked, stopping transaction!");
|
||||
return -EIO;
|
||||
}
|
||||
|
@ -643,13 +639,13 @@ static int smu_v11_0_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap,
|
|||
|
||||
for (i = 0; i < num; i++) {
|
||||
if (msgs[i].flags & I2C_M_RD)
|
||||
ret = smu_v11_0_i2c_eeprom_read_data(i2c_adap,
|
||||
(uint8_t)msgs[i].addr,
|
||||
msgs[i].buf, msgs[i].len);
|
||||
ret = smu_v11_0_i2c_read_data(i2c_adap,
|
||||
(uint8_t)msgs[i].addr,
|
||||
msgs[i].buf, msgs[i].len);
|
||||
else
|
||||
ret = smu_v11_0_i2c_eeprom_write_data(i2c_adap,
|
||||
(uint8_t)msgs[i].addr,
|
||||
msgs[i].buf, msgs[i].len);
|
||||
ret = smu_v11_0_i2c_write_data(i2c_adap,
|
||||
(uint8_t)msgs[i].addr,
|
||||
msgs[i].buf, msgs[i].len);
|
||||
|
||||
if (ret != I2C_OK) {
|
||||
num = -EIO;
|
||||
|
@ -661,18 +657,18 @@ static int smu_v11_0_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap,
|
|||
return num;
|
||||
}
|
||||
|
||||
static u32 smu_v11_0_i2c_eeprom_i2c_func(struct i2c_adapter *adap)
|
||||
static u32 smu_v11_0_i2c_func(struct i2c_adapter *adap)
|
||||
{
|
||||
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
|
||||
}
|
||||
|
||||
|
||||
static const struct i2c_algorithm smu_v11_0_i2c_eeprom_i2c_algo = {
|
||||
.master_xfer = smu_v11_0_i2c_eeprom_i2c_xfer,
|
||||
.functionality = smu_v11_0_i2c_eeprom_i2c_func,
|
||||
static const struct i2c_algorithm smu_v11_0_i2c_algo = {
|
||||
.master_xfer = smu_v11_0_i2c_xfer,
|
||||
.functionality = smu_v11_0_i2c_func,
|
||||
};
|
||||
|
||||
int smu_v11_0_i2c_eeprom_control_init(struct i2c_adapter *control)
|
||||
int smu_v11_0_i2c_control_init(struct i2c_adapter *control)
|
||||
{
|
||||
struct amdgpu_device *adev = to_amdgpu_device(control);
|
||||
int res;
|
||||
|
@ -680,8 +676,8 @@ int smu_v11_0_i2c_eeprom_control_init(struct i2c_adapter *control)
|
|||
control->owner = THIS_MODULE;
|
||||
control->class = I2C_CLASS_SPD;
|
||||
control->dev.parent = &adev->pdev->dev;
|
||||
control->algo = &smu_v11_0_i2c_eeprom_i2c_algo;
|
||||
snprintf(control->name, sizeof(control->name), "AMDGPU EEPROM");
|
||||
control->algo = &smu_v11_0_i2c_algo;
|
||||
snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
|
||||
control->lock_ops = &smu_v11_0_i2c_i2c_lock_ops;
|
||||
|
||||
res = i2c_add_adapter(control);
|
||||
|
@ -691,7 +687,7 @@ int smu_v11_0_i2c_eeprom_control_init(struct i2c_adapter *control)
|
|||
return res;
|
||||
}
|
||||
|
||||
void smu_v11_0_i2c_eeprom_control_fini(struct i2c_adapter *control)
|
||||
void smu_v11_0_i2c_control_fini(struct i2c_adapter *control)
|
||||
{
|
||||
i2c_del_adapter(control);
|
||||
}
|
||||
|
@ -719,9 +715,9 @@ bool smu_v11_0_i2c_test_bus(struct i2c_adapter *control)
|
|||
smu_v11_0_i2c_init(control);
|
||||
|
||||
/* Write 0xde to address 0x0000 on the EEPROM */
|
||||
ret = smu_v11_0_i2c_eeprom_write_data(control, I2C_TARGET_ADDR, data, 6);
|
||||
ret = smu_v11_0_i2c_write_data(control, I2C_TARGET_ADDR, data, 6);
|
||||
|
||||
ret = smu_v11_0_i2c_eeprom_read_data(control, I2C_TARGET_ADDR, data, 6);
|
||||
ret = smu_v11_0_i2c_read_data(control, I2C_TARGET_ADDR, data, 6);
|
||||
|
||||
smu_v11_0_i2c_fini(control);
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
|
||||
struct i2c_adapter;
|
||||
|
||||
int smu_v11_0_i2c_eeprom_control_init(struct i2c_adapter *control);
|
||||
void smu_v11_0_i2c_eeprom_control_fini(struct i2c_adapter *control);
|
||||
int smu_v11_0_i2c_control_init(struct i2c_adapter *control);
|
||||
void smu_v11_0_i2c_control_fini(struct i2c_adapter *control);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1376,7 +1376,7 @@ static int dm_late_init(void *handle)
|
|||
unsigned int linear_lut[16];
|
||||
int i;
|
||||
struct dmcu *dmcu = NULL;
|
||||
bool ret;
|
||||
bool ret = true;
|
||||
|
||||
if (!adev->dm.fw_dmcu && !adev->dm.dmub_fw)
|
||||
return detect_mst_link_for_all_connectors(adev->ddev);
|
||||
|
@ -1397,7 +1397,14 @@ static int dm_late_init(void *handle)
|
|||
*/
|
||||
params.min_abm_backlight = 0x28F;
|
||||
|
||||
ret = dmcu_load_iram(dmcu, params);
|
||||
/* In the case where abm is implemented on dmcub,
|
||||
* dmcu object will be null.
|
||||
* ABM 2.4 and up are implemented on dmcub.
|
||||
*/
|
||||
if (dmcu)
|
||||
ret = dmcu_load_iram(dmcu, params);
|
||||
else if (adev->dm.dc->ctx->dmub_srv)
|
||||
ret = dmub_init_abm_config(adev->dm.dc->res_pool->abm, params);
|
||||
|
||||
if (!ret)
|
||||
return -EINVAL;
|
||||
|
@ -1486,23 +1493,12 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
/* pass data to smu controller */
|
||||
if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
|
||||
!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
|
||||
ret = smu_write_watermarks_table(smu);
|
||||
|
||||
if (ret) {
|
||||
mutex_unlock(&smu->mutex);
|
||||
DRM_ERROR("Failed to update WMTABLE!\n");
|
||||
return ret;
|
||||
}
|
||||
smu->watermarks_bitmap |= WATERMARKS_LOADED;
|
||||
ret = smu_write_watermarks_table(smu);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to update WMTABLE!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
mutex_unlock(&smu->mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4546,7 +4542,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
|||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
|
||||
aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
|
||||
aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
|
||||
aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
|
||||
&dsc_caps);
|
||||
#endif
|
||||
link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
|
||||
|
@ -6235,7 +6231,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
|
|||
aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
|
||||
|
||||
if (connector_type == DRM_MODE_CONNECTOR_eDP &&
|
||||
dc_is_dmcu_initialized(adev->dm.dc)) {
|
||||
(dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
|
||||
drm_object_attach_property(&aconnector->base.base,
|
||||
adev->mode_info.abm_level_property, 0);
|
||||
}
|
||||
|
@ -8471,7 +8467,7 @@ cleanup:
|
|||
*out_type = update_type;
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_connector *connector;
|
||||
|
@ -8494,6 +8490,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
|
|||
|
||||
return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
|
||||
|
@ -8547,6 +8544,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
|||
if (ret)
|
||||
goto fail;
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (adev->asic_type >= CHIP_NAVI10) {
|
||||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
||||
if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
|
||||
|
@ -8556,7 +8554,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
||||
if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
|
||||
!new_crtc_state->color_mgmt_changed &&
|
||||
|
|
|
@ -33,6 +33,8 @@
|
|||
#include "amdgpu_dm_debugfs.h"
|
||||
#include "dm_helpers.h"
|
||||
#include "dmub/dmub_srv.h"
|
||||
#include "resource.h"
|
||||
#include "dsc.h"
|
||||
|
||||
struct dmub_debugfs_trace_header {
|
||||
uint32_t entry_count;
|
||||
|
@ -817,24 +819,6 @@ unlock:
|
|||
return res;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns the min and max vrr vfreq through the connector's debugfs file.
|
||||
* Example usage: cat /sys/kernel/debug/dri/0/DP-1/vrr_range
|
||||
*/
|
||||
static int vrr_range_show(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_connector *connector = m->private;
|
||||
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
|
||||
|
||||
if (connector->status != connector_status_connected)
|
||||
return -ENODEV;
|
||||
|
||||
seq_printf(m, "Min: %u\n", (unsigned int)aconnector->min_vfreq);
|
||||
seq_printf(m, "Max: %u\n", (unsigned int)aconnector->max_vfreq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_HDCP
|
||||
/*
|
||||
* Returns the HDCP capability of the Display (1.4 for now).
|
||||
|
@ -995,14 +979,517 @@ static ssize_t dp_dpcd_data_read(struct file *f, char __user *buf,
|
|||
return read_size - r;
|
||||
}
|
||||
|
||||
static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
char *rd_buf = NULL;
|
||||
char *rd_buf_ptr = NULL;
|
||||
struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
|
||||
struct display_stream_compressor *dsc;
|
||||
struct dcn_dsc_state dsc_state = {0};
|
||||
const uint32_t rd_buf_size = 10;
|
||||
struct pipe_ctx *pipe_ctx;
|
||||
ssize_t result = 0;
|
||||
int i, r, str_len = 30;
|
||||
|
||||
rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
|
||||
|
||||
if (!rd_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
rd_buf_ptr = rd_buf;
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
|
||||
if (pipe_ctx && pipe_ctx->stream &&
|
||||
pipe_ctx->stream->link == aconnector->dc_link)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!pipe_ctx)
|
||||
return -ENXIO;
|
||||
|
||||
dsc = pipe_ctx->stream_res.dsc;
|
||||
if (dsc)
|
||||
dsc->funcs->dsc_read_state(dsc, &dsc_state);
|
||||
|
||||
snprintf(rd_buf_ptr, str_len,
|
||||
"%d\n",
|
||||
dsc_state.dsc_clock_en);
|
||||
rd_buf_ptr += str_len;
|
||||
|
||||
while (size) {
|
||||
if (*pos >= rd_buf_size)
|
||||
break;
|
||||
|
||||
r = put_user(*(rd_buf + result), buf);
|
||||
if (r)
|
||||
return r; /* r = -EFAULT */
|
||||
|
||||
buf += 1;
|
||||
size -= 1;
|
||||
*pos += 1;
|
||||
result += 1;
|
||||
}
|
||||
|
||||
kfree(rd_buf);
|
||||
return result;
|
||||
}
|
||||
|
||||
static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
char *rd_buf = NULL;
|
||||
char *rd_buf_ptr = NULL;
|
||||
struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
|
||||
struct display_stream_compressor *dsc;
|
||||
struct dcn_dsc_state dsc_state = {0};
|
||||
const uint32_t rd_buf_size = 100;
|
||||
struct pipe_ctx *pipe_ctx;
|
||||
ssize_t result = 0;
|
||||
int i, r, str_len = 30;
|
||||
|
||||
rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
|
||||
|
||||
if (!rd_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
rd_buf_ptr = rd_buf;
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
|
||||
if (pipe_ctx && pipe_ctx->stream &&
|
||||
pipe_ctx->stream->link == aconnector->dc_link)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!pipe_ctx)
|
||||
return -ENXIO;
|
||||
|
||||
dsc = pipe_ctx->stream_res.dsc;
|
||||
if (dsc)
|
||||
dsc->funcs->dsc_read_state(dsc, &dsc_state);
|
||||
|
||||
snprintf(rd_buf_ptr, str_len,
|
||||
"%d\n",
|
||||
dsc_state.dsc_slice_width);
|
||||
rd_buf_ptr += str_len;
|
||||
|
||||
while (size) {
|
||||
if (*pos >= rd_buf_size)
|
||||
break;
|
||||
|
||||
r = put_user(*(rd_buf + result), buf);
|
||||
if (r)
|
||||
return r; /* r = -EFAULT */
|
||||
|
||||
buf += 1;
|
||||
size -= 1;
|
||||
*pos += 1;
|
||||
result += 1;
|
||||
}
|
||||
|
||||
kfree(rd_buf);
|
||||
return result;
|
||||
}
|
||||
|
||||
static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
char *rd_buf = NULL;
|
||||
char *rd_buf_ptr = NULL;
|
||||
struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
|
||||
struct display_stream_compressor *dsc;
|
||||
struct dcn_dsc_state dsc_state = {0};
|
||||
const uint32_t rd_buf_size = 100;
|
||||
struct pipe_ctx *pipe_ctx;
|
||||
ssize_t result = 0;
|
||||
int i, r, str_len = 30;
|
||||
|
||||
rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
|
||||
|
||||
if (!rd_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
rd_buf_ptr = rd_buf;
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
|
||||
if (pipe_ctx && pipe_ctx->stream &&
|
||||
pipe_ctx->stream->link == aconnector->dc_link)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!pipe_ctx)
|
||||
return -ENXIO;
|
||||
|
||||
dsc = pipe_ctx->stream_res.dsc;
|
||||
if (dsc)
|
||||
dsc->funcs->dsc_read_state(dsc, &dsc_state);
|
||||
|
||||
snprintf(rd_buf_ptr, str_len,
|
||||
"%d\n",
|
||||
dsc_state.dsc_slice_height);
|
||||
rd_buf_ptr += str_len;
|
||||
|
||||
while (size) {
|
||||
if (*pos >= rd_buf_size)
|
||||
break;
|
||||
|
||||
r = put_user(*(rd_buf + result), buf);
|
||||
if (r)
|
||||
return r; /* r = -EFAULT */
|
||||
|
||||
buf += 1;
|
||||
size -= 1;
|
||||
*pos += 1;
|
||||
result += 1;
|
||||
}
|
||||
|
||||
kfree(rd_buf);
|
||||
return result;
|
||||
}
|
||||
|
||||
static ssize_t dp_dsc_bytes_per_pixel_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
char *rd_buf = NULL;
|
||||
char *rd_buf_ptr = NULL;
|
||||
struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
|
||||
struct display_stream_compressor *dsc;
|
||||
struct dcn_dsc_state dsc_state = {0};
|
||||
const uint32_t rd_buf_size = 100;
|
||||
struct pipe_ctx *pipe_ctx;
|
||||
ssize_t result = 0;
|
||||
int i, r, str_len = 30;
|
||||
|
||||
rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
|
||||
|
||||
if (!rd_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
rd_buf_ptr = rd_buf;
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
|
||||
if (pipe_ctx && pipe_ctx->stream &&
|
||||
pipe_ctx->stream->link == aconnector->dc_link)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!pipe_ctx)
|
||||
return -ENXIO;
|
||||
|
||||
dsc = pipe_ctx->stream_res.dsc;
|
||||
if (dsc)
|
||||
dsc->funcs->dsc_read_state(dsc, &dsc_state);
|
||||
|
||||
snprintf(rd_buf_ptr, str_len,
|
||||
"%d\n",
|
||||
dsc_state.dsc_bytes_per_pixel);
|
||||
rd_buf_ptr += str_len;
|
||||
|
||||
while (size) {
|
||||
if (*pos >= rd_buf_size)
|
||||
break;
|
||||
|
||||
r = put_user(*(rd_buf + result), buf);
|
||||
if (r)
|
||||
return r; /* r = -EFAULT */
|
||||
|
||||
buf += 1;
|
||||
size -= 1;
|
||||
*pos += 1;
|
||||
result += 1;
|
||||
}
|
||||
|
||||
kfree(rd_buf);
|
||||
return result;
|
||||
}
|
||||
|
||||
static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
char *rd_buf = NULL;
|
||||
char *rd_buf_ptr = NULL;
|
||||
struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
|
||||
struct display_stream_compressor *dsc;
|
||||
struct dcn_dsc_state dsc_state = {0};
|
||||
const uint32_t rd_buf_size = 100;
|
||||
struct pipe_ctx *pipe_ctx;
|
||||
ssize_t result = 0;
|
||||
int i, r, str_len = 30;
|
||||
|
||||
rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
|
||||
|
||||
if (!rd_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
rd_buf_ptr = rd_buf;
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
|
||||
if (pipe_ctx && pipe_ctx->stream &&
|
||||
pipe_ctx->stream->link == aconnector->dc_link)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!pipe_ctx)
|
||||
return -ENXIO;
|
||||
|
||||
dsc = pipe_ctx->stream_res.dsc;
|
||||
if (dsc)
|
||||
dsc->funcs->dsc_read_state(dsc, &dsc_state);
|
||||
|
||||
snprintf(rd_buf_ptr, str_len,
|
||||
"%d\n",
|
||||
dsc_state.dsc_pic_width);
|
||||
rd_buf_ptr += str_len;
|
||||
|
||||
while (size) {
|
||||
if (*pos >= rd_buf_size)
|
||||
break;
|
||||
|
||||
r = put_user(*(rd_buf + result), buf);
|
||||
if (r)
|
||||
return r; /* r = -EFAULT */
|
||||
|
||||
buf += 1;
|
||||
size -= 1;
|
||||
*pos += 1;
|
||||
result += 1;
|
||||
}
|
||||
|
||||
kfree(rd_buf);
|
||||
return result;
|
||||
}
|
||||
|
||||
static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
char *rd_buf = NULL;
|
||||
char *rd_buf_ptr = NULL;
|
||||
struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
|
||||
struct display_stream_compressor *dsc;
|
||||
struct dcn_dsc_state dsc_state = {0};
|
||||
const uint32_t rd_buf_size = 100;
|
||||
struct pipe_ctx *pipe_ctx;
|
||||
ssize_t result = 0;
|
||||
int i, r, str_len = 30;
|
||||
|
||||
rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
|
||||
|
||||
if (!rd_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
rd_buf_ptr = rd_buf;
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
|
||||
if (pipe_ctx && pipe_ctx->stream &&
|
||||
pipe_ctx->stream->link == aconnector->dc_link)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!pipe_ctx)
|
||||
return -ENXIO;
|
||||
|
||||
dsc = pipe_ctx->stream_res.dsc;
|
||||
if (dsc)
|
||||
dsc->funcs->dsc_read_state(dsc, &dsc_state);
|
||||
|
||||
snprintf(rd_buf_ptr, str_len,
|
||||
"%d\n",
|
||||
dsc_state.dsc_pic_height);
|
||||
rd_buf_ptr += str_len;
|
||||
|
||||
while (size) {
|
||||
if (*pos >= rd_buf_size)
|
||||
break;
|
||||
|
||||
r = put_user(*(rd_buf + result), buf);
|
||||
if (r)
|
||||
return r; /* r = -EFAULT */
|
||||
|
||||
buf += 1;
|
||||
size -= 1;
|
||||
*pos += 1;
|
||||
result += 1;
|
||||
}
|
||||
|
||||
kfree(rd_buf);
|
||||
return result;
|
||||
}
|
||||
|
||||
static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
char *rd_buf = NULL;
|
||||
char *rd_buf_ptr = NULL;
|
||||
struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
|
||||
struct display_stream_compressor *dsc;
|
||||
struct dcn_dsc_state dsc_state = {0};
|
||||
const uint32_t rd_buf_size = 100;
|
||||
struct pipe_ctx *pipe_ctx;
|
||||
ssize_t result = 0;
|
||||
int i, r, str_len = 30;
|
||||
|
||||
rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
|
||||
|
||||
if (!rd_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
rd_buf_ptr = rd_buf;
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
|
||||
if (pipe_ctx && pipe_ctx->stream &&
|
||||
pipe_ctx->stream->link == aconnector->dc_link)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!pipe_ctx)
|
||||
return -ENXIO;
|
||||
|
||||
dsc = pipe_ctx->stream_res.dsc;
|
||||
if (dsc)
|
||||
dsc->funcs->dsc_read_state(dsc, &dsc_state);
|
||||
|
||||
snprintf(rd_buf_ptr, str_len,
|
||||
"%d\n",
|
||||
dsc_state.dsc_chunk_size);
|
||||
rd_buf_ptr += str_len;
|
||||
|
||||
while (size) {
|
||||
if (*pos >= rd_buf_size)
|
||||
break;
|
||||
|
||||
r = put_user(*(rd_buf + result), buf);
|
||||
if (r)
|
||||
return r; /* r = -EFAULT */
|
||||
|
||||
buf += 1;
|
||||
size -= 1;
|
||||
*pos += 1;
|
||||
result += 1;
|
||||
}
|
||||
|
||||
kfree(rd_buf);
|
||||
return result;
|
||||
}
|
||||
|
||||
static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
char *rd_buf = NULL;
|
||||
char *rd_buf_ptr = NULL;
|
||||
struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
|
||||
struct display_stream_compressor *dsc;
|
||||
struct dcn_dsc_state dsc_state = {0};
|
||||
const uint32_t rd_buf_size = 100;
|
||||
struct pipe_ctx *pipe_ctx;
|
||||
ssize_t result = 0;
|
||||
int i, r, str_len = 30;
|
||||
|
||||
rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
|
||||
|
||||
if (!rd_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
rd_buf_ptr = rd_buf;
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
|
||||
if (pipe_ctx && pipe_ctx->stream &&
|
||||
pipe_ctx->stream->link == aconnector->dc_link)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!pipe_ctx)
|
||||
return -ENXIO;
|
||||
|
||||
dsc = pipe_ctx->stream_res.dsc;
|
||||
if (dsc)
|
||||
dsc->funcs->dsc_read_state(dsc, &dsc_state);
|
||||
|
||||
snprintf(rd_buf_ptr, str_len,
|
||||
"%d\n",
|
||||
dsc_state.dsc_slice_bpg_offset);
|
||||
rd_buf_ptr += str_len;
|
||||
|
||||
while (size) {
|
||||
if (*pos >= rd_buf_size)
|
||||
break;
|
||||
|
||||
r = put_user(*(rd_buf + result), buf);
|
||||
if (r)
|
||||
return r; /* r = -EFAULT */
|
||||
|
||||
buf += 1;
|
||||
size -= 1;
|
||||
*pos += 1;
|
||||
result += 1;
|
||||
}
|
||||
|
||||
kfree(rd_buf);
|
||||
return result;
|
||||
}
|
||||
|
||||
DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
|
||||
DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
|
||||
DEFINE_SHOW_ATTRIBUTE(output_bpc);
|
||||
DEFINE_SHOW_ATTRIBUTE(vrr_range);
|
||||
#ifdef CONFIG_DRM_AMD_DC_HDCP
|
||||
DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability);
|
||||
#endif
|
||||
|
||||
static const struct file_operations dp_dsc_clock_en_debugfs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = dp_dsc_clock_en_read,
|
||||
.llseek = default_llseek
|
||||
};
|
||||
|
||||
static const struct file_operations dp_dsc_slice_width_debugfs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = dp_dsc_slice_width_read,
|
||||
.llseek = default_llseek
|
||||
};
|
||||
|
||||
static const struct file_operations dp_dsc_slice_height_debugfs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = dp_dsc_slice_height_read,
|
||||
.llseek = default_llseek
|
||||
};
|
||||
|
||||
static const struct file_operations dp_dsc_bytes_per_pixel_debugfs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = dp_dsc_bytes_per_pixel_read,
|
||||
.llseek = default_llseek
|
||||
};
|
||||
|
||||
static const struct file_operations dp_dsc_pic_width_debugfs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = dp_dsc_pic_width_read,
|
||||
.llseek = default_llseek
|
||||
};
|
||||
|
||||
static const struct file_operations dp_dsc_pic_height_debugfs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = dp_dsc_pic_height_read,
|
||||
.llseek = default_llseek
|
||||
};
|
||||
|
||||
static const struct file_operations dp_dsc_chunk_size_debugfs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = dp_dsc_chunk_size_read,
|
||||
.llseek = default_llseek
|
||||
};
|
||||
|
||||
static const struct file_operations dp_dsc_slice_bpg_offset_debugfs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = dp_dsc_slice_bpg_offset_read,
|
||||
.llseek = default_llseek
|
||||
};
|
||||
|
||||
static const struct file_operations dp_link_settings_debugfs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = dp_link_settings_read,
|
||||
|
@ -1055,14 +1542,21 @@ static const struct {
|
|||
{"link_settings", &dp_link_settings_debugfs_fops},
|
||||
{"phy_settings", &dp_phy_settings_debugfs_fop},
|
||||
{"test_pattern", &dp_phy_test_pattern_fops},
|
||||
{"vrr_range", &vrr_range_fops},
|
||||
#ifdef CONFIG_DRM_AMD_DC_HDCP
|
||||
{"hdcp_sink_capability", &hdcp_sink_capability_fops},
|
||||
#endif
|
||||
{"sdp_message", &sdp_message_fops},
|
||||
{"aux_dpcd_address", &dp_dpcd_address_debugfs_fops},
|
||||
{"aux_dpcd_size", &dp_dpcd_size_debugfs_fops},
|
||||
{"aux_dpcd_data", &dp_dpcd_data_debugfs_fops}
|
||||
{"aux_dpcd_data", &dp_dpcd_data_debugfs_fops},
|
||||
{"dsc_clock_en", &dp_dsc_clock_en_debugfs_fops},
|
||||
{"dsc_slice_width", &dp_dsc_slice_width_debugfs_fops},
|
||||
{"dsc_slice_height", &dp_dsc_slice_height_debugfs_fops},
|
||||
{"dsc_bytes_per_pixel", &dp_dsc_bytes_per_pixel_debugfs_fops},
|
||||
{"dsc_pic_width", &dp_dsc_pic_width_debugfs_fops},
|
||||
{"dsc_pic_height", &dp_dsc_pic_height_debugfs_fops},
|
||||
{"dsc_chunk_size", &dp_dsc_chunk_size_debugfs_fops},
|
||||
{"dsc_slice_bpg", &dp_dsc_slice_bpg_offset_debugfs_fops}
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_HDCP
|
||||
|
|
|
@ -806,7 +806,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
|
|||
stream = dc_state->streams[i];
|
||||
|
||||
if (stream->timing.flags.DSC == 1)
|
||||
dcn20_add_dsc_to_stream_resource(stream->ctx->dc, dc_state, stream);
|
||||
dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
|
|
@ -1104,10 +1104,6 @@ static inline enum link_training_result perform_link_training_int(
|
|||
dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE;
|
||||
dpcd_set_training_pattern(link, dpcd_pattern);
|
||||
|
||||
/* delay 5ms after notifying sink of idle pattern before switching output */
|
||||
if (link->connector_signal != SIGNAL_TYPE_EDP)
|
||||
msleep(5);
|
||||
|
||||
/* 4. mainlink output idle pattern*/
|
||||
dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
|
||||
|
||||
|
@ -3523,8 +3519,8 @@ static bool retrieve_link_cap(struct dc_link *link)
|
|||
status = core_link_read_dpcd(
|
||||
link,
|
||||
DP_DSC_BRANCH_OVERALL_THROUGHPUT_0,
|
||||
link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
|
||||
sizeof(link->dpcd_caps.dsc_caps.dsc_ext_caps.raw));
|
||||
link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
|
||||
sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw));
|
||||
}
|
||||
|
||||
if (!dpcd_read_sink_ext_caps(link))
|
||||
|
|
|
@ -244,6 +244,25 @@ struct dc_stream_status *dc_stream_get_status(
|
|||
return dc_stream_get_status_from_state(dc->current_state, stream);
|
||||
}
|
||||
|
||||
#ifndef TRIM_FSFT
|
||||
/**
|
||||
* dc_optimize_timing() - dc to optimize timing
|
||||
*/
|
||||
bool dc_optimize_timing(
|
||||
struct dc_crtc_timing *timing,
|
||||
unsigned int max_input_rate_in_khz)
|
||||
{
|
||||
//optimization is expected to assing a value to these:
|
||||
//timing->pix_clk_100hz
|
||||
//timing->v_front_porch
|
||||
//timing->v_total
|
||||
//timing->fast_transport_output_rate_100hz;
|
||||
timing->fast_transport_output_rate_100hz = timing->pix_clk_100hz;
|
||||
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/**
|
||||
* dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
|
||||
|
@ -655,6 +674,17 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc,
|
|||
return true;
|
||||
}
|
||||
|
||||
enum dc_status dc_stream_add_dsc_to_resource(struct dc *dc,
|
||||
struct dc_state *state,
|
||||
struct dc_stream_state *stream)
|
||||
{
|
||||
if (dc->res_pool->funcs->add_dsc_to_stream_resource) {
|
||||
return dc->res_pool->funcs->add_dsc_to_stream_resource(dc, state, stream);
|
||||
} else {
|
||||
return DC_NO_DSC_RESOURCE;
|
||||
}
|
||||
}
|
||||
|
||||
void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream)
|
||||
{
|
||||
DC_LOG_DC(
|
||||
|
|
|
@ -42,7 +42,7 @@
|
|||
#include "inc/hw/dmcu.h"
|
||||
#include "dml/display_mode_lib.h"
|
||||
|
||||
#define DC_VER "3.2.94"
|
||||
#define DC_VER "3.2.95"
|
||||
|
||||
#define MAX_SURFACES 3
|
||||
#define MAX_PLANES 6
|
||||
|
@ -96,6 +96,9 @@ struct dc_plane_cap {
|
|||
uint32_t nv12;
|
||||
uint32_t fp16;
|
||||
} max_downscale_factor;
|
||||
// minimal width/height
|
||||
uint32_t min_width;
|
||||
uint32_t min_height;
|
||||
};
|
||||
|
||||
// Color management caps (DPP and MPC)
|
||||
|
|
|
@ -726,7 +726,7 @@ union dpcd_dsc_basic_capabilities {
|
|||
uint8_t raw[16];
|
||||
};
|
||||
|
||||
union dpcd_dsc_ext_capabilities {
|
||||
union dpcd_dsc_branch_decoder_capabilities {
|
||||
struct {
|
||||
uint8_t BRANCH_OVERALL_THROUGHPUT_0;
|
||||
uint8_t BRANCH_OVERALL_THROUGHPUT_1;
|
||||
|
@ -737,7 +737,7 @@ union dpcd_dsc_ext_capabilities {
|
|||
|
||||
struct dpcd_dsc_capabilities {
|
||||
union dpcd_dsc_basic_capabilities dsc_basic_caps;
|
||||
union dpcd_dsc_ext_capabilities dsc_ext_caps;
|
||||
union dpcd_dsc_branch_decoder_capabilities dsc_branch_decoder_caps;
|
||||
};
|
||||
|
||||
/* These parameters are from PSR capabilities reported by Sink DPCD */
|
||||
|
|
|
@ -713,6 +713,9 @@ struct dc_crtc_timing_flags {
|
|||
uint32_t LTE_340MCSC_SCRAMBLE:1;
|
||||
|
||||
uint32_t DSC : 1; /* Use DSC with this timing */
|
||||
#ifndef TRIM_FSFT
|
||||
uint32_t FAST_TRANSPORT: 1;
|
||||
#endif
|
||||
};
|
||||
|
||||
enum dc_timing_3d_format {
|
||||
|
@ -772,6 +775,10 @@ struct dc_crtc_timing {
|
|||
enum dc_aspect_ratio aspect_ratio;
|
||||
enum scanning_type scan_type;
|
||||
|
||||
#ifndef TRIM_FSFT
|
||||
uint32_t fast_transport_output_rate_100hz;
|
||||
#endif
|
||||
|
||||
struct dc_crtc_timing_flags flags;
|
||||
struct dc_dsc_config dsc_cfg;
|
||||
};
|
||||
|
|
|
@ -363,6 +363,10 @@ bool dc_stream_remove_writeback(struct dc *dc,
|
|||
struct dc_stream_state *stream,
|
||||
uint32_t dwb_pipe_inst);
|
||||
|
||||
enum dc_status dc_stream_add_dsc_to_resource(struct dc *dc,
|
||||
struct dc_state *state,
|
||||
struct dc_stream_state *stream);
|
||||
|
||||
bool dc_stream_warmup_writeback(struct dc *dc,
|
||||
int num_dwb,
|
||||
struct dc_writeback_info *wb_info);
|
||||
|
@ -419,6 +423,12 @@ struct dc_stream_status *dc_stream_get_status_from_state(
|
|||
struct dc_stream_status *dc_stream_get_status(
|
||||
struct dc_stream_state *dc_stream);
|
||||
|
||||
#ifndef TRIM_FSFT
|
||||
bool dc_optimize_timing(
|
||||
struct dc_crtc_timing *timing,
|
||||
unsigned int max_input_rate_in_khz);
|
||||
#endif
|
||||
|
||||
/*******************************************************************************
|
||||
* Cursor interfaces - To manages the cursor within a stream
|
||||
******************************************************************************/
|
||||
|
|
|
@ -423,7 +423,9 @@ static const struct dc_plane_cap plane_cap = {
|
|||
.argb8888 = 250,
|
||||
.nv12 = 1,
|
||||
.fp16 = 1
|
||||
}
|
||||
},
|
||||
64,
|
||||
64
|
||||
};
|
||||
|
||||
static const struct dc_plane_cap underlay_plane_cap = {
|
||||
|
@ -447,7 +449,9 @@ static const struct dc_plane_cap underlay_plane_cap = {
|
|||
.argb8888 = 1,
|
||||
.nv12 = 250,
|
||||
.fp16 = 1
|
||||
}
|
||||
},
|
||||
64,
|
||||
64
|
||||
};
|
||||
|
||||
#define CTX ctx
|
||||
|
|
|
@ -424,7 +424,9 @@ static const struct dc_plane_cap plane_cap = {
|
|||
.argb8888 = 250,
|
||||
.nv12 = 1,
|
||||
.fp16 = 1
|
||||
}
|
||||
},
|
||||
64,
|
||||
64
|
||||
};
|
||||
|
||||
#define CTX ctx
|
||||
|
|
|
@ -157,6 +157,11 @@ static void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_ds
|
|||
REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &s->dsc_clock_en);
|
||||
REG_GET(DSCC_PPS_CONFIG3, SLICE_WIDTH, &s->dsc_slice_width);
|
||||
REG_GET(DSCC_PPS_CONFIG1, BITS_PER_PIXEL, &s->dsc_bytes_per_pixel);
|
||||
REG_GET(DSCC_PPS_CONFIG3, SLICE_HEIGHT, &s->dsc_slice_height);
|
||||
REG_GET(DSCC_PPS_CONFIG1, CHUNK_SIZE, &s->dsc_chunk_size);
|
||||
REG_GET(DSCC_PPS_CONFIG2, PIC_WIDTH, &s->dsc_pic_width);
|
||||
REG_GET(DSCC_PPS_CONFIG2, PIC_HEIGHT, &s->dsc_pic_height);
|
||||
REG_GET(DSCC_PPS_CONFIG7, SLICE_BPG_OFFSET, &s->dsc_slice_bpg_offset);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1043,7 +1043,9 @@ static const struct dc_plane_cap plane_cap = {
|
|||
.argb8888 = 250,
|
||||
.nv12 = 250,
|
||||
.fp16 = 1
|
||||
}
|
||||
},
|
||||
16,
|
||||
16
|
||||
};
|
||||
static const struct resource_caps res_cap_nv14 = {
|
||||
.num_timing_generator = 5,
|
||||
|
@ -3364,6 +3366,7 @@ static struct resource_funcs dcn20_res_pool_funcs = {
|
|||
.validate_bandwidth = dcn20_validate_bandwidth,
|
||||
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
|
||||
.add_stream_to_ctx = dcn20_add_stream_to_ctx,
|
||||
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
|
||||
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
|
||||
.populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
|
||||
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
|
||||
|
|
|
@ -152,11 +152,11 @@ static void enc2_stream_encoder_update_hdmi_info_packets(
|
|||
|
||||
/*Always add mandatory packets first followed by optional ones*/
|
||||
enc2_update_hdmi_info_packet(enc1, 0, &info_frame->avi);
|
||||
enc2_update_hdmi_info_packet(enc1, 5, &info_frame->hfvsif);
|
||||
enc2_update_hdmi_info_packet(enc1, 1, &info_frame->hfvsif);
|
||||
enc2_update_hdmi_info_packet(enc1, 2, &info_frame->gamut);
|
||||
enc2_update_hdmi_info_packet(enc1, 1, &info_frame->vendor);
|
||||
enc2_update_hdmi_info_packet(enc1, 3, &info_frame->spd);
|
||||
enc2_update_hdmi_info_packet(enc1, 4, &info_frame->hdrsmd);
|
||||
enc2_update_hdmi_info_packet(enc1, 3, &info_frame->vendor);
|
||||
enc2_update_hdmi_info_packet(enc1, 4, &info_frame->spd);
|
||||
enc2_update_hdmi_info_packet(enc1, 5, &info_frame->hdrsmd);
|
||||
}
|
||||
|
||||
static void enc2_stream_encoder_stop_hdmi_info_packets(
|
||||
|
|
|
@ -857,7 +857,9 @@ static const struct dc_plane_cap plane_cap = {
|
|||
.argb8888 = 250,
|
||||
.nv12 = 250,
|
||||
.fp16 = 250
|
||||
}
|
||||
},
|
||||
64,
|
||||
64
|
||||
};
|
||||
|
||||
static const struct dc_debug_options debug_defaults_drv = {
|
||||
|
@ -1759,6 +1761,7 @@ static struct resource_funcs dcn21_res_pool_funcs = {
|
|||
.validate_bandwidth = dcn21_validate_bandwidth,
|
||||
.populate_dml_pipes = dcn21_populate_dml_pipes_from_context,
|
||||
.add_stream_to_ctx = dcn20_add_stream_to_ctx,
|
||||
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
|
||||
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
|
||||
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
|
||||
.populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
|
||||
|
|
|
@ -747,7 +747,7 @@ done:
|
|||
return is_dsc_possible;
|
||||
}
|
||||
|
||||
bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, const uint8_t *dpcd_dsc_basic_data, const uint8_t *dpcd_dsc_ext_data, struct dsc_dec_dpcd_caps *dsc_sink_caps)
|
||||
bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, const uint8_t *dpcd_dsc_basic_data, const uint8_t *dpcd_dsc_branch_decoder_caps, struct dsc_dec_dpcd_caps *dsc_sink_caps)
|
||||
{
|
||||
if (!dpcd_dsc_basic_data)
|
||||
return false;
|
||||
|
@ -818,14 +818,14 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, const uint8_t *dpcd_dsc_basic_da
|
|||
}
|
||||
|
||||
/* Extended caps */
|
||||
if (dpcd_dsc_ext_data == NULL) { // Extended DPCD DSC data can be null, e.g. because it doesn't apply to SST
|
||||
if (dpcd_dsc_branch_decoder_caps == NULL) { // branch decoder DPCD DSC data can be null for non branch device
|
||||
dsc_sink_caps->branch_overall_throughput_0_mps = 0;
|
||||
dsc_sink_caps->branch_overall_throughput_1_mps = 0;
|
||||
dsc_sink_caps->branch_max_line_width = 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
dsc_sink_caps->branch_overall_throughput_0_mps = dpcd_dsc_ext_data[DP_DSC_BRANCH_OVERALL_THROUGHPUT_0 - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0];
|
||||
dsc_sink_caps->branch_overall_throughput_0_mps = dpcd_dsc_branch_decoder_caps[DP_DSC_BRANCH_OVERALL_THROUGHPUT_0 - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0];
|
||||
if (dsc_sink_caps->branch_overall_throughput_0_mps == 0)
|
||||
dsc_sink_caps->branch_overall_throughput_0_mps = 0;
|
||||
else if (dsc_sink_caps->branch_overall_throughput_0_mps == 1)
|
||||
|
@ -835,7 +835,7 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, const uint8_t *dpcd_dsc_basic_da
|
|||
dsc_sink_caps->branch_overall_throughput_0_mps += 600;
|
||||
}
|
||||
|
||||
dsc_sink_caps->branch_overall_throughput_1_mps = dpcd_dsc_ext_data[DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0];
|
||||
dsc_sink_caps->branch_overall_throughput_1_mps = dpcd_dsc_branch_decoder_caps[DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0];
|
||||
if (dsc_sink_caps->branch_overall_throughput_1_mps == 0)
|
||||
dsc_sink_caps->branch_overall_throughput_1_mps = 0;
|
||||
else if (dsc_sink_caps->branch_overall_throughput_1_mps == 1)
|
||||
|
@ -845,7 +845,7 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, const uint8_t *dpcd_dsc_basic_da
|
|||
dsc_sink_caps->branch_overall_throughput_1_mps += 600;
|
||||
}
|
||||
|
||||
dsc_sink_caps->branch_max_line_width = dpcd_dsc_ext_data[DP_DSC_BRANCH_MAX_LINE_WIDTH - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0] * 320;
|
||||
dsc_sink_caps->branch_max_line_width = dpcd_dsc_branch_decoder_caps[DP_DSC_BRANCH_MAX_LINE_WIDTH - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0] * 320;
|
||||
ASSERT(dsc_sink_caps->branch_max_line_width == 0 || dsc_sink_caps->branch_max_line_width >= 5120);
|
||||
|
||||
return true;
|
||||
|
|
|
@ -165,7 +165,9 @@ struct resource_funcs {
|
|||
struct dc_3dlut **lut,
|
||||
struct dc_transfer_func **shaper);
|
||||
#endif
|
||||
|
||||
enum dc_status (*add_dsc_to_stream_resource)(
|
||||
struct dc *dc, struct dc_state *state,
|
||||
struct dc_stream_state *stream);
|
||||
};
|
||||
|
||||
struct audio_support{
|
||||
|
|
|
@ -56,6 +56,11 @@ struct dcn_dsc_state {
|
|||
uint32_t dsc_clock_en;
|
||||
uint32_t dsc_slice_width;
|
||||
uint32_t dsc_bytes_per_pixel;
|
||||
uint32_t dsc_slice_height;
|
||||
uint32_t dsc_pic_width;
|
||||
uint32_t dsc_pic_height;
|
||||
uint32_t dsc_slice_bpg_offset;
|
||||
uint32_t dsc_chunk_size;
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -36,10 +36,10 @@
|
|||
|
||||
/* Firmware versioning. */
|
||||
#ifdef DMUB_EXPOSE_VERSION
|
||||
#define DMUB_FW_VERSION_GIT_HASH 0xf675c6448
|
||||
#define DMUB_FW_VERSION_GIT_HASH 0xe6d590b09
|
||||
#define DMUB_FW_VERSION_MAJOR 0
|
||||
#define DMUB_FW_VERSION_MINOR 0
|
||||
#define DMUB_FW_VERSION_REVISION 24
|
||||
#define DMUB_FW_VERSION_REVISION 25
|
||||
#define DMUB_FW_VERSION_UCODE ((DMUB_FW_VERSION_MAJOR << 24) | (DMUB_FW_VERSION_MINOR << 16) | DMUB_FW_VERSION_REVISION)
|
||||
#endif
|
||||
|
||||
|
|
|
@ -760,9 +760,35 @@ static void build_vrr_infopacket_v2(enum signal_type signal,
|
|||
|
||||
infopacket->valid = true;
|
||||
}
|
||||
#ifndef TRIM_FSFT
|
||||
static void build_vrr_infopacket_fast_transport_data(
|
||||
bool ftActive,
|
||||
unsigned int ftOutputRate,
|
||||
struct dc_info_packet *infopacket)
|
||||
{
|
||||
/* PB9 : bit7 - fast transport Active*/
|
||||
unsigned char activeBit = (ftActive) ? 1 << 7 : 0;
|
||||
|
||||
infopacket->sb[1] &= ~activeBit; //clear bit
|
||||
infopacket->sb[1] |= activeBit; //set bit
|
||||
|
||||
/* PB13 : Target Output Pixel Rate [kHz] - bits 7:0 */
|
||||
infopacket->sb[13] = ftOutputRate & 0xFF;
|
||||
|
||||
/* PB14 : Target Output Pixel Rate [kHz] - bits 15:8 */
|
||||
infopacket->sb[14] = (ftOutputRate >> 8) & 0xFF;
|
||||
|
||||
/* PB15 : Target Output Pixel Rate [kHz] - bits 23:16 */
|
||||
infopacket->sb[15] = (ftOutputRate >> 16) & 0xFF;
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
static void build_vrr_infopacket_v3(enum signal_type signal,
|
||||
const struct mod_vrr_params *vrr,
|
||||
#ifndef TRIM_FSFT
|
||||
bool ftActive, unsigned int ftOutputRate,
|
||||
#endif
|
||||
enum color_transfer_func app_tf,
|
||||
struct dc_info_packet *infopacket)
|
||||
{
|
||||
|
@ -773,6 +799,13 @@ static void build_vrr_infopacket_v3(enum signal_type signal,
|
|||
|
||||
build_vrr_infopacket_fs2_data(app_tf, infopacket);
|
||||
|
||||
#ifndef TRIM_FSFT
|
||||
build_vrr_infopacket_fast_transport_data(
|
||||
ftActive,
|
||||
ftOutputRate,
|
||||
infopacket);
|
||||
#endif
|
||||
|
||||
build_vrr_infopacket_checksum(&payload_size, infopacket);
|
||||
|
||||
infopacket->valid = true;
|
||||
|
@ -795,7 +828,15 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
|
|||
|
||||
switch (packet_type) {
|
||||
case PACKET_TYPE_FS_V3:
|
||||
#ifndef TRIM_FSFT
|
||||
build_vrr_infopacket_v3(
|
||||
stream->signal, vrr,
|
||||
stream->timing.flags.FAST_TRANSPORT,
|
||||
stream->timing.fast_transport_output_rate_100hz,
|
||||
app_tf, infopacket);
|
||||
#else
|
||||
build_vrr_infopacket_v3(stream->signal, vrr, app_tf, infopacket);
|
||||
#endif
|
||||
break;
|
||||
case PACKET_TYPE_FS_V2:
|
||||
build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket);
|
||||
|
|
|
@ -35,7 +35,9 @@ AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/powerplay/,$(
|
|||
|
||||
include $(AMD_POWERPLAY)
|
||||
|
||||
POWER_MGR = amd_powerplay.o amdgpu_smu.o smu_v11_0.o smu_v12_0.o arcturus_ppt.o navi10_ppt.o renoir_ppt.o sienna_cichlid_ppt.o
|
||||
POWER_MGR = amd_powerplay.o amdgpu_smu.o smu_v11_0.o \
|
||||
smu_v12_0.o arcturus_ppt.o navi10_ppt.o \
|
||||
renoir_ppt.o sienna_cichlid_ppt.o smu_cmn.o
|
||||
|
||||
AMD_PP_POWER = $(addprefix $(AMD_PP_PATH)/,$(POWER_MGR))
|
||||
|
||||
|
|
|
@ -20,14 +20,14 @@
|
|||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#define SWSMU_CODE_LAYER_L1
|
||||
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_smu.h"
|
||||
#include "smu_internal.h"
|
||||
#include "smu_v11_0.h"
|
||||
#include "smu_v12_0.h"
|
||||
#include "atom.h"
|
||||
#include "arcturus_ppt.h"
|
||||
#include "navi10_ppt.h"
|
||||
|
@ -45,196 +45,47 @@
|
|||
#undef pr_info
|
||||
#undef pr_debug
|
||||
|
||||
#undef __SMU_DUMMY_MAP
|
||||
#define __SMU_DUMMY_MAP(type) #type
|
||||
static const char* __smu_message_names[] = {
|
||||
SMU_MESSAGE_TYPES
|
||||
};
|
||||
|
||||
const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type)
|
||||
{
|
||||
if (type < 0 || type >= SMU_MSG_MAX_COUNT)
|
||||
return "unknown smu message";
|
||||
return __smu_message_names[type];
|
||||
}
|
||||
|
||||
#undef __SMU_DUMMY_MAP
|
||||
#define __SMU_DUMMY_MAP(fea) #fea
|
||||
static const char* __smu_feature_names[] = {
|
||||
SMU_FEATURE_MASKS
|
||||
};
|
||||
|
||||
const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature)
|
||||
{
|
||||
if (feature < 0 || feature >= SMU_FEATURE_COUNT)
|
||||
return "unknown smu feature";
|
||||
return __smu_feature_names[feature];
|
||||
}
|
||||
|
||||
size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
|
||||
{
|
||||
size_t size = 0;
|
||||
int ret = 0, i = 0;
|
||||
uint32_t feature_mask[2] = { 0 };
|
||||
int32_t feature_index = 0;
|
||||
uint32_t count = 0;
|
||||
uint32_t sort_feature[SMU_FEATURE_COUNT];
|
||||
uint64_t hw_feature_count = 0;
|
||||
|
||||
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
|
||||
if (ret)
|
||||
goto failed;
|
||||
size = smu_get_pp_feature_mask(smu, buf);
|
||||
|
||||
size = sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
|
||||
feature_mask[1], feature_mask[0]);
|
||||
|
||||
for (i = 0; i < SMU_FEATURE_COUNT; i++) {
|
||||
feature_index = smu_feature_get_index(smu, i);
|
||||
if (feature_index < 0)
|
||||
continue;
|
||||
sort_feature[feature_index] = i;
|
||||
hw_feature_count++;
|
||||
}
|
||||
|
||||
for (i = 0; i < hw_feature_count; i++) {
|
||||
size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
|
||||
count++,
|
||||
smu_get_feature_name(smu, sort_feature[i]),
|
||||
i,
|
||||
!!smu_feature_is_enabled(smu, sort_feature[i]) ?
|
||||
"enabled" : "disabled");
|
||||
}
|
||||
|
||||
failed:
|
||||
mutex_unlock(&smu->mutex);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static int smu_feature_update_enable_state(struct smu_context *smu,
|
||||
uint64_t feature_mask,
|
||||
bool enabled)
|
||||
{
|
||||
struct smu_feature *feature = &smu->smu_feature;
|
||||
int ret = 0;
|
||||
|
||||
if (enabled) {
|
||||
ret = smu_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_EnableSmuFeaturesLow,
|
||||
lower_32_bits(feature_mask),
|
||||
NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = smu_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_EnableSmuFeaturesHigh,
|
||||
upper_32_bits(feature_mask),
|
||||
NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
ret = smu_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_DisableSmuFeaturesLow,
|
||||
lower_32_bits(feature_mask),
|
||||
NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = smu_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_DisableSmuFeaturesHigh,
|
||||
upper_32_bits(feature_mask),
|
||||
NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
mutex_lock(&feature->mutex);
|
||||
if (enabled)
|
||||
bitmap_or(feature->enabled, feature->enabled,
|
||||
(unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
|
||||
else
|
||||
bitmap_andnot(feature->enabled, feature->enabled,
|
||||
(unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
|
||||
mutex_unlock(&feature->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
|
||||
{
|
||||
int ret = 0;
|
||||
uint32_t feature_mask[2] = { 0 };
|
||||
uint64_t feature_2_enabled = 0;
|
||||
uint64_t feature_2_disabled = 0;
|
||||
uint64_t feature_enables = 0;
|
||||
|
||||
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
|
||||
if (ret)
|
||||
goto out;
|
||||
ret = smu_set_pp_feature_mask(smu, new_mask);
|
||||
|
||||
feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
|
||||
|
||||
feature_2_enabled = ~feature_enables & new_mask;
|
||||
feature_2_disabled = feature_enables & ~new_mask;
|
||||
|
||||
if (feature_2_enabled) {
|
||||
ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
if (feature_2_disabled) {
|
||||
ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&smu->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
|
||||
int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
|
||||
{
|
||||
int ret = 0;
|
||||
struct smu_context *smu = &adev->smu;
|
||||
|
||||
if (!if_version && !smu_version)
|
||||
return -EINVAL;
|
||||
|
||||
if (smu->smc_fw_if_version && smu->smc_fw_version)
|
||||
{
|
||||
if (if_version)
|
||||
*if_version = smu->smc_fw_if_version;
|
||||
|
||||
if (smu_version)
|
||||
*smu_version = smu->smc_fw_version;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (if_version) {
|
||||
ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
smu->smc_fw_if_version = *if_version;
|
||||
}
|
||||
|
||||
if (smu_version) {
|
||||
ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
smu->smc_fw_version = *smu_version;
|
||||
}
|
||||
if (is_support_sw_smu(adev) && smu->ppt_funcs->get_gfx_off_status)
|
||||
*value = smu_get_gfx_off_status(smu);
|
||||
else
|
||||
ret = -EINVAL;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -246,9 +97,6 @@ int smu_set_soft_freq_range(struct smu_context *smu,
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!smu_clk_dpm_is_enabled(smu, clk_type))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (smu->ppt_funcs->set_soft_freq_limited_range)
|
||||
|
@ -285,33 +133,6 @@ int smu_get_dpm_freq_range(struct smu_context *smu,
|
|||
return ret;
|
||||
}
|
||||
|
||||
bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
|
||||
{
|
||||
enum smu_feature_mask feature_id = 0;
|
||||
|
||||
switch (clk_type) {
|
||||
case SMU_MCLK:
|
||||
case SMU_UCLK:
|
||||
feature_id = SMU_FEATURE_DPM_UCLK_BIT;
|
||||
break;
|
||||
case SMU_GFXCLK:
|
||||
case SMU_SCLK:
|
||||
feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
|
||||
break;
|
||||
case SMU_SOCCLK:
|
||||
feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
|
||||
break;
|
||||
default:
|
||||
return true;
|
||||
}
|
||||
|
||||
if(!smu_feature_is_enabled(smu, feature_id)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* smu_dpm_set_power_gate - power gate/ungate the specific IP block
|
||||
*
|
||||
|
@ -386,45 +207,6 @@ int smu_get_power_num_states(struct smu_context *smu,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
|
||||
void *table_data, bool drv2smu)
|
||||
{
|
||||
struct smu_table_context *smu_table = &smu->smu_table;
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
struct smu_table *table = &smu_table->driver_table;
|
||||
int table_id = smu_table_get_index(smu, table_index);
|
||||
uint32_t table_size;
|
||||
int ret = 0;
|
||||
if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
|
||||
return -EINVAL;
|
||||
|
||||
table_size = smu_table->tables[table_index].size;
|
||||
|
||||
if (drv2smu) {
|
||||
memcpy(table->cpu_addr, table_data, table_size);
|
||||
/*
|
||||
* Flush hdp cache: to guard the content seen by
|
||||
* GPU is consitent with CPU.
|
||||
*/
|
||||
amdgpu_asic_flush_hdp(adev, NULL);
|
||||
}
|
||||
|
||||
ret = smu_send_smc_msg_with_param(smu, drv2smu ?
|
||||
SMU_MSG_TransferTableDram2Smu :
|
||||
SMU_MSG_TransferTableSmu2Dram,
|
||||
table_id | ((argument & 0xFFFF) << 16),
|
||||
NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!drv2smu) {
|
||||
amdgpu_asic_flush_hdp(adev, NULL);
|
||||
memcpy(table_data, table->cpu_addr, table_size);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool is_support_sw_smu(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->asic_type >= CHIP_ARCTURUS)
|
||||
|
@ -525,63 +307,6 @@ static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
|
||||
{
|
||||
struct smu_feature *feature = &smu->smu_feature;
|
||||
int feature_id;
|
||||
int ret = 0;
|
||||
|
||||
if (smu->is_apu)
|
||||
return 1;
|
||||
feature_id = smu_feature_get_index(smu, mask);
|
||||
if (feature_id < 0)
|
||||
return 0;
|
||||
|
||||
WARN_ON(feature_id > feature->feature_num);
|
||||
|
||||
mutex_lock(&feature->mutex);
|
||||
ret = test_bit(feature_id, feature->enabled);
|
||||
mutex_unlock(&feature->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
|
||||
bool enable)
|
||||
{
|
||||
struct smu_feature *feature = &smu->smu_feature;
|
||||
int feature_id;
|
||||
|
||||
feature_id = smu_feature_get_index(smu, mask);
|
||||
if (feature_id < 0)
|
||||
return -EINVAL;
|
||||
|
||||
WARN_ON(feature_id > feature->feature_num);
|
||||
|
||||
return smu_feature_update_enable_state(smu,
|
||||
1ULL << feature_id,
|
||||
enable);
|
||||
}
|
||||
|
||||
int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
|
||||
{
|
||||
struct smu_feature *feature = &smu->smu_feature;
|
||||
int feature_id;
|
||||
int ret = 0;
|
||||
|
||||
feature_id = smu_feature_get_index(smu, mask);
|
||||
if (feature_id < 0)
|
||||
return 0;
|
||||
|
||||
WARN_ON(feature_id > feature->feature_num);
|
||||
|
||||
mutex_lock(&feature->mutex);
|
||||
ret = test_bit(feature_id, feature->supported);
|
||||
mutex_unlock(&feature->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int smu_set_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
struct smu_context *smu = &adev->smu;
|
||||
|
@ -676,22 +401,6 @@ static int smu_late_init(void *handle)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
|
||||
uint16_t *size, uint8_t *frev, uint8_t *crev,
|
||||
uint8_t **addr)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
uint16_t data_start;
|
||||
|
||||
if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
|
||||
size, frev, crev, &data_start))
|
||||
return -EINVAL;
|
||||
|
||||
*addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int smu_init_fb_allocations(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
@ -1135,7 +844,7 @@ static int smu_smc_hw_setup(struct smu_context *smu)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
|
||||
ret = smu_i2c_init(smu, &adev->pm.smu_i2c);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1280,7 +989,6 @@ static int smu_hw_init(void *handle)
|
|||
static int smu_disable_dpms(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
uint64_t features_to_disable;
|
||||
int ret = 0;
|
||||
bool use_baco = !smu->is_apu &&
|
||||
((adev->in_gpu_reset &&
|
||||
|
@ -1316,11 +1024,8 @@ static int smu_disable_dpms(struct smu_context *smu)
|
|||
* BACO feature has to be kept enabled.
|
||||
*/
|
||||
if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
|
||||
features_to_disable = U64_MAX &
|
||||
~(1ULL << smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT));
|
||||
ret = smu_feature_update_enable_state(smu,
|
||||
features_to_disable,
|
||||
0);
|
||||
ret = smu_disable_all_features_with_exception(smu,
|
||||
SMU_FEATURE_BACO_BIT);
|
||||
if (ret)
|
||||
dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
|
||||
} else {
|
||||
|
@ -1341,7 +1046,7 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
|
|||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
|
||||
smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
|
||||
smu_i2c_fini(smu, &adev->pm.smu_i2c);
|
||||
|
||||
cancel_work_sync(&smu->throttling_logging_work);
|
||||
|
||||
|
@ -1884,12 +1589,6 @@ int smu_set_mp1_state(struct smu_context *smu,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* some asics may not support those messages */
|
||||
if (smu_msg_get_index(smu, msg) < 0) {
|
||||
mutex_unlock(&smu->mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = smu_send_smc_msg(smu, msg, NULL);
|
||||
if (ret)
|
||||
dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");
|
||||
|
@ -1944,35 +1643,34 @@ int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
|
|||
|
||||
int smu_write_watermarks_table(struct smu_context *smu)
|
||||
{
|
||||
void *watermarks_table = smu->smu_table.watermarks_table;
|
||||
int ret = 0;
|
||||
|
||||
if (!watermarks_table)
|
||||
return -EINVAL;
|
||||
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return smu_update_table(smu,
|
||||
SMU_TABLE_WATERMARKS,
|
||||
0,
|
||||
watermarks_table,
|
||||
true);
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
ret = smu_set_watermarks_table(smu, NULL);
|
||||
|
||||
mutex_unlock(&smu->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
|
||||
struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
|
||||
{
|
||||
void *table = smu->smu_table.watermarks_table;
|
||||
int ret = 0;
|
||||
|
||||
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!table)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (!smu->disable_watermark &&
|
||||
smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
|
||||
smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
|
||||
smu_set_watermarks_table(smu, table, clock_ranges);
|
||||
ret = smu_set_watermarks_table(smu, clock_ranges);
|
||||
|
||||
if (!(smu->watermarks_bitmap & WATERMARKS_EXIST)) {
|
||||
smu->watermarks_bitmap |= WATERMARKS_EXIST;
|
||||
|
@ -1982,7 +1680,7 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
|
|||
|
||||
mutex_unlock(&smu->mutex);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int smu_set_ac_dc(struct smu_context *smu)
|
||||
|
@ -2216,8 +1914,14 @@ int smu_od_edit_dpm_table(struct smu_context *smu,
|
|||
|
||||
mutex_lock(&smu->mutex);
|
||||
|
||||
if (smu->ppt_funcs->od_edit_dpm_table)
|
||||
if (smu->ppt_funcs->od_edit_dpm_table) {
|
||||
ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
|
||||
if (!ret && (type == PP_OD_COMMIT_DPM_TABLE))
|
||||
ret = smu_handle_task(smu,
|
||||
smu->smu_dpm.dpm_level,
|
||||
AMD_PP_TASK_READJUST_POWER_STATE,
|
||||
false);
|
||||
}
|
||||
|
||||
mutex_unlock(&smu->mutex);
|
||||
|
||||
|
|
|
@ -21,12 +21,14 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#define SWSMU_CODE_LAYER_L2
|
||||
|
||||
#include <linux/firmware.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_smu.h"
|
||||
#include "smu_internal.h"
|
||||
#include "atomfirmware.h"
|
||||
#include "amdgpu_atomfirmware.h"
|
||||
#include "amdgpu_atombios.h"
|
||||
#include "smu_v11_0.h"
|
||||
#include "smu11_driver_if_arcturus.h"
|
||||
#include "soc15_common.h"
|
||||
|
@ -43,6 +45,7 @@
|
|||
#include <linux/i2c.h>
|
||||
#include <linux/pci.h>
|
||||
#include "amdgpu_ras.h"
|
||||
#include "smu_cmn.h"
|
||||
|
||||
/*
|
||||
* DO NOT use these for err/warn/info/debug messages.
|
||||
|
@ -56,8 +59,6 @@
|
|||
|
||||
#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
|
||||
|
||||
#define MSG_MAP(msg, index, valid_in_vf) \
|
||||
[SMU_MSG_##msg] = {1, (index), (valid_in_vf)}
|
||||
#define ARCTURUS_FEA_MAP(smu_feature, arcturus_feature) \
|
||||
[smu_feature] = {1, (arcturus_feature)}
|
||||
|
||||
|
@ -78,7 +79,7 @@
|
|||
/* possible frequency drift (1Mhz) */
|
||||
#define EPSILON 1
|
||||
|
||||
static struct smu_11_0_msg_mapping arcturus_message_map[SMU_MSG_MAX_COUNT] = {
|
||||
static const struct cmn2asic_msg_mapping arcturus_message_map[SMU_MSG_MAX_COUNT] = {
|
||||
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
|
||||
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
|
||||
MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
|
||||
|
@ -141,7 +142,7 @@ static struct smu_11_0_msg_mapping arcturus_message_map[SMU_MSG_MAX_COUNT] = {
|
|||
MSG_MAP(ReadSerialNumBottom32, PPSMC_MSG_ReadSerialNumBottom32, 1),
|
||||
};
|
||||
|
||||
static struct smu_11_0_cmn2aisc_mapping arcturus_clk_map[SMU_CLK_COUNT] = {
|
||||
static const struct cmn2asic_mapping arcturus_clk_map[SMU_CLK_COUNT] = {
|
||||
CLK_MAP(GFXCLK, PPCLK_GFXCLK),
|
||||
CLK_MAP(SCLK, PPCLK_GFXCLK),
|
||||
CLK_MAP(SOCCLK, PPCLK_SOCCLK),
|
||||
|
@ -152,7 +153,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_clk_map[SMU_CLK_COUNT] = {
|
|||
CLK_MAP(VCLK, PPCLK_VCLK),
|
||||
};
|
||||
|
||||
static struct smu_11_0_cmn2aisc_mapping arcturus_feature_mask_map[SMU_FEATURE_COUNT] = {
|
||||
static const struct cmn2asic_mapping arcturus_feature_mask_map[SMU_FEATURE_COUNT] = {
|
||||
FEA_MAP(DPM_PREFETCHER),
|
||||
FEA_MAP(DPM_GFXCLK),
|
||||
FEA_MAP(DPM_UCLK),
|
||||
|
@ -181,7 +182,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_feature_mask_map[SMU_FEATURE_CO
|
|||
FEA_MAP(TEMP_DEPENDENT_VMIN),
|
||||
};
|
||||
|
||||
static struct smu_11_0_cmn2aisc_mapping arcturus_table_map[SMU_TABLE_COUNT] = {
|
||||
static const struct cmn2asic_mapping arcturus_table_map[SMU_TABLE_COUNT] = {
|
||||
TAB_MAP(PPTABLE),
|
||||
TAB_MAP(AVFS),
|
||||
TAB_MAP(AVFS_PSM_DEBUG),
|
||||
|
@ -194,12 +195,12 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_table_map[SMU_TABLE_COUNT] = {
|
|||
TAB_MAP(ACTIVITY_MONITOR_COEFF),
|
||||
};
|
||||
|
||||
static struct smu_11_0_cmn2aisc_mapping arcturus_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
|
||||
static const struct cmn2asic_mapping arcturus_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
|
||||
PWR_MAP(AC),
|
||||
PWR_MAP(DC),
|
||||
};
|
||||
|
||||
static struct smu_11_0_cmn2aisc_mapping arcturus_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
|
||||
static const struct cmn2asic_mapping arcturus_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
|
||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT),
|
||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
|
||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
|
||||
|
@ -207,103 +208,10 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_workload_map[PP_SMC_POWER_PROFI
|
|||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
|
||||
};
|
||||
|
||||
static int arcturus_get_smu_msg_index(struct smu_context *smc, uint32_t index)
|
||||
{
|
||||
struct smu_11_0_msg_mapping mapping;
|
||||
|
||||
if (index >= SMU_MSG_MAX_COUNT)
|
||||
return -EINVAL;
|
||||
|
||||
mapping = arcturus_message_map[index];
|
||||
if (!(mapping.valid_mapping))
|
||||
return -EINVAL;
|
||||
|
||||
if (amdgpu_sriov_vf(smc->adev) && !mapping.valid_in_vf)
|
||||
return -EACCES;
|
||||
|
||||
return mapping.map_to;
|
||||
}
|
||||
|
||||
static int arcturus_get_smu_clk_index(struct smu_context *smc, uint32_t index)
|
||||
{
|
||||
struct smu_11_0_cmn2aisc_mapping mapping;
|
||||
|
||||
if (index >= SMU_CLK_COUNT)
|
||||
return -EINVAL;
|
||||
|
||||
mapping = arcturus_clk_map[index];
|
||||
if (!(mapping.valid_mapping)) {
|
||||
dev_warn(smc->adev->dev, "Unsupported SMU clk: %d\n", index);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return mapping.map_to;
|
||||
}
|
||||
|
||||
static int arcturus_get_smu_feature_index(struct smu_context *smc, uint32_t index)
|
||||
{
|
||||
struct smu_11_0_cmn2aisc_mapping mapping;
|
||||
|
||||
if (index >= SMU_FEATURE_COUNT)
|
||||
return -EINVAL;
|
||||
|
||||
mapping = arcturus_feature_mask_map[index];
|
||||
if (!(mapping.valid_mapping)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return mapping.map_to;
|
||||
}
|
||||
|
||||
static int arcturus_get_smu_table_index(struct smu_context *smc, uint32_t index)
|
||||
{
|
||||
struct smu_11_0_cmn2aisc_mapping mapping;
|
||||
|
||||
if (index >= SMU_TABLE_COUNT)
|
||||
return -EINVAL;
|
||||
|
||||
mapping = arcturus_table_map[index];
|
||||
if (!(mapping.valid_mapping)) {
|
||||
dev_warn(smc->adev->dev, "Unsupported SMU table: %d\n", index);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return mapping.map_to;
|
||||
}
|
||||
|
||||
static int arcturus_get_pwr_src_index(struct smu_context *smc, uint32_t index)
|
||||
{
|
||||
struct smu_11_0_cmn2aisc_mapping mapping;
|
||||
|
||||
if (index >= SMU_POWER_SOURCE_COUNT)
|
||||
return -EINVAL;
|
||||
|
||||
mapping = arcturus_pwr_src_map[index];
|
||||
if (!(mapping.valid_mapping)) {
|
||||
dev_warn(smc->adev->dev, "Unsupported SMU power source: %d\n", index);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return mapping.map_to;
|
||||
}
|
||||
|
||||
static int arcturus_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile)
|
||||
{
|
||||
struct smu_11_0_cmn2aisc_mapping mapping;
|
||||
|
||||
if (profile > PP_SMC_POWER_PROFILE_CUSTOM)
|
||||
return -EINVAL;
|
||||
|
||||
mapping = arcturus_workload_map[profile];
|
||||
if (!(mapping.valid_mapping))
|
||||
return -EINVAL;
|
||||
|
||||
return mapping.map_to;
|
||||
}
|
||||
|
||||
static int arcturus_tables_init(struct smu_context *smu, struct smu_table *tables)
|
||||
static int arcturus_tables_init(struct smu_context *smu)
|
||||
{
|
||||
struct smu_table_context *smu_table = &smu->smu_table;
|
||||
struct smu_table *tables = smu_table->tables;
|
||||
|
||||
SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
|
@ -352,6 +260,21 @@ static int arcturus_allocate_dpm_context(struct smu_context *smu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int arcturus_init_smc_tables(struct smu_context *smu)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
ret = arcturus_tables_init(smu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = arcturus_allocate_dpm_context(smu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return smu_v11_0_init_smc_tables(smu);
|
||||
}
|
||||
|
||||
static int
|
||||
arcturus_get_allowed_feature_mask(struct smu_context *smu,
|
||||
uint32_t *feature_mask, uint32_t num)
|
||||
|
@ -374,7 +297,7 @@ static int arcturus_set_default_dpm_table(struct smu_context *smu)
|
|||
|
||||
/* socclk dpm table setup */
|
||||
dpm_table = &dpm_context->dpm_tables.soc_table;
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
|
||||
ret = smu_v11_0_set_single_dpm_table(smu,
|
||||
SMU_SOCCLK,
|
||||
dpm_table);
|
||||
|
@ -392,7 +315,7 @@ static int arcturus_set_default_dpm_table(struct smu_context *smu)
|
|||
|
||||
/* gfxclk dpm table setup */
|
||||
dpm_table = &dpm_context->dpm_tables.gfx_table;
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
|
||||
ret = smu_v11_0_set_single_dpm_table(smu,
|
||||
SMU_GFXCLK,
|
||||
dpm_table);
|
||||
|
@ -410,7 +333,7 @@ static int arcturus_set_default_dpm_table(struct smu_context *smu)
|
|||
|
||||
/* memclk dpm table setup */
|
||||
dpm_table = &dpm_context->dpm_tables.uclk_table;
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
|
||||
ret = smu_v11_0_set_single_dpm_table(smu,
|
||||
SMU_UCLK,
|
||||
dpm_table);
|
||||
|
@ -428,7 +351,7 @@ static int arcturus_set_default_dpm_table(struct smu_context *smu)
|
|||
|
||||
/* fclk dpm table setup */
|
||||
dpm_table = &dpm_context->dpm_tables.fclk_table;
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
|
||||
ret = smu_v11_0_set_single_dpm_table(smu,
|
||||
SMU_FCLK,
|
||||
dpm_table);
|
||||
|
@ -488,7 +411,7 @@ static int arcturus_append_powerplay_table(struct smu_context *smu)
|
|||
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
|
||||
smc_dpm_info);
|
||||
|
||||
ret = smu_get_atom_data_table(smu, index, NULL, NULL, NULL,
|
||||
ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL,
|
||||
(uint8_t **)&smc_dpm_table);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -533,13 +456,13 @@ static int arcturus_run_btc(struct smu_context *smu)
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
ret = smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc, NULL);
|
||||
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RunAfllBtc, NULL);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "RunAfllBtc failed!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return smu_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
|
||||
return smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
|
||||
}
|
||||
|
||||
static int arcturus_populate_umd_state_clk(struct smu_context *smu)
|
||||
|
@ -621,7 +544,7 @@ static int arcturus_get_smu_metrics_data(struct smu_context *smu,
|
|||
|
||||
if (!smu_table->metrics_time ||
|
||||
time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
|
||||
ret = smu_update_table(smu,
|
||||
ret = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_SMU_METRICS,
|
||||
0,
|
||||
smu_table->metrics_table,
|
||||
|
@ -730,7 +653,9 @@ static int arcturus_get_current_clk_freq_by_table(struct smu_context *smu,
|
|||
if (!value)
|
||||
return -EINVAL;
|
||||
|
||||
clk_id = smu_clk_get_index(smu, clk_type);
|
||||
clk_id = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_CLK,
|
||||
clk_type);
|
||||
if (clk_id < 0)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -742,31 +667,31 @@ static int arcturus_get_current_clk_freq_by_table(struct smu_context *smu,
|
|||
* We can use Average_* for dpm disabled case.
|
||||
* But this is available for gfxclk/uclk/socclk/vclk/dclk.
|
||||
*/
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT))
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT))
|
||||
member_type = METRICS_CURR_GFXCLK;
|
||||
else
|
||||
member_type = METRICS_AVERAGE_GFXCLK;
|
||||
break;
|
||||
case PPCLK_UCLK:
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT))
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT))
|
||||
member_type = METRICS_CURR_UCLK;
|
||||
else
|
||||
member_type = METRICS_AVERAGE_UCLK;
|
||||
break;
|
||||
case PPCLK_SOCCLK:
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT))
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT))
|
||||
member_type = METRICS_CURR_SOCCLK;
|
||||
else
|
||||
member_type = METRICS_AVERAGE_SOCCLK;
|
||||
break;
|
||||
case PPCLK_VCLK:
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT))
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT))
|
||||
member_type = METRICS_CURR_VCLK;
|
||||
else
|
||||
member_type = METRICS_AVERAGE_VCLK;
|
||||
break;
|
||||
case PPCLK_DCLK:
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT))
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT))
|
||||
member_type = METRICS_CURR_DCLK;
|
||||
else
|
||||
member_type = METRICS_AVERAGE_DCLK;
|
||||
|
@ -912,10 +837,10 @@ static int arcturus_upload_dpm_level(struct smu_context *smu,
|
|||
uint32_t freq;
|
||||
int ret = 0;
|
||||
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
|
||||
(feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
|
||||
freq = dpm_context->dpm_tables.gfx_table.dpm_levels[level].value;
|
||||
ret = smu_send_smc_msg_with_param(smu,
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
|
||||
(PPCLK_GFXCLK << 16) | (freq & 0xffff),
|
||||
NULL);
|
||||
|
@ -926,10 +851,10 @@ static int arcturus_upload_dpm_level(struct smu_context *smu,
|
|||
}
|
||||
}
|
||||
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
|
||||
(feature_mask & FEATURE_DPM_UCLK_MASK)) {
|
||||
freq = dpm_context->dpm_tables.uclk_table.dpm_levels[level].value;
|
||||
ret = smu_send_smc_msg_with_param(smu,
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
|
||||
(PPCLK_UCLK << 16) | (freq & 0xffff),
|
||||
NULL);
|
||||
|
@ -940,10 +865,10 @@ static int arcturus_upload_dpm_level(struct smu_context *smu,
|
|||
}
|
||||
}
|
||||
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT) &&
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT) &&
|
||||
(feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
|
||||
freq = dpm_context->dpm_tables.soc_table.dpm_levels[level].value;
|
||||
ret = smu_send_smc_msg_with_param(smu,
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
|
||||
(PPCLK_SOCCLK << 16) | (freq & 0xffff),
|
||||
NULL);
|
||||
|
@ -966,7 +891,7 @@ static int arcturus_force_clk_levels(struct smu_context *smu,
|
|||
uint32_t smu_version;
|
||||
int ret = 0;
|
||||
|
||||
ret = smu_get_smc_version(smu, NULL, &smu_version);
|
||||
ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "Failed to get smu version!\n");
|
||||
return ret;
|
||||
|
@ -1283,7 +1208,7 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
|
|||
if (!buf)
|
||||
return -EINVAL;
|
||||
|
||||
result = smu_get_smc_version(smu, NULL, &smu_version);
|
||||
result = smu_cmn_get_smc_version(smu, NULL, &smu_version);
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
|
@ -1300,12 +1225,14 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
|
|||
* Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
|
||||
* Not all profile modes are supported on arcturus.
|
||||
*/
|
||||
workload_type = smu_workload_get_type(smu, i);
|
||||
workload_type = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_WORKLOAD,
|
||||
i);
|
||||
if (workload_type < 0)
|
||||
continue;
|
||||
|
||||
if (smu_version >= 0x360d00) {
|
||||
result = smu_update_table(smu,
|
||||
result = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
|
||||
workload_type,
|
||||
(void *)(&activity_monitor),
|
||||
|
@ -1368,13 +1295,13 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = smu_get_smc_version(smu, NULL, &smu_version);
|
||||
ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) &&
|
||||
(smu_version >=0x360d00)) {
|
||||
ret = smu_update_table(smu,
|
||||
ret = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
|
||||
WORKLOAD_PPLIB_CUSTOM_BIT,
|
||||
(void *)(&activity_monitor),
|
||||
|
@ -1409,7 +1336,7 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
|
|||
break;
|
||||
}
|
||||
|
||||
ret = smu_update_table(smu,
|
||||
ret = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
|
||||
WORKLOAD_PPLIB_CUSTOM_BIT,
|
||||
(void *)(&activity_monitor),
|
||||
|
@ -1424,13 +1351,15 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
|
|||
* Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
|
||||
* Not all profile modes are supported on arcturus.
|
||||
*/
|
||||
workload_type = smu_workload_get_type(smu, profile_mode);
|
||||
workload_type = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_WORKLOAD,
|
||||
profile_mode);
|
||||
if (workload_type < 0) {
|
||||
dev_err(smu->adev->dev, "Unsupported power profile mode %d on arcturus\n", profile_mode);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = smu_send_smc_msg_with_param(smu,
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_SetWorkloadMask,
|
||||
1 << workload_type,
|
||||
NULL);
|
||||
|
@ -1450,7 +1379,7 @@ static int arcturus_set_performance_level(struct smu_context *smu,
|
|||
uint32_t smu_version;
|
||||
int ret;
|
||||
|
||||
ret = smu_get_smc_version(smu, NULL, &smu_version);
|
||||
ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "Failed to get smu version!\n");
|
||||
return ret;
|
||||
|
@ -1912,7 +1841,7 @@ static bool arcturus_is_dpm_running(struct smu_context *smu)
|
|||
int ret = 0;
|
||||
uint32_t feature_mask[2];
|
||||
unsigned long feature_enabled;
|
||||
ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
|
||||
ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
|
||||
feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
|
||||
((uint64_t)feature_mask[1] << 32));
|
||||
return !!(feature_enabled & SMC_DPM_FEATURE);
|
||||
|
@ -1925,8 +1854,8 @@ static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
|
|||
int ret = 0;
|
||||
|
||||
if (enable) {
|
||||
if (!smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
|
||||
ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 1);
|
||||
if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
|
||||
ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 1);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "[EnableVCNDPM] failed!\n");
|
||||
return ret;
|
||||
|
@ -1934,8 +1863,8 @@ static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
|
|||
}
|
||||
power_gate->vcn_gated = false;
|
||||
} else {
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
|
||||
ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 0);
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
|
||||
ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 0);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "[DisableVCNDPM] failed!\n");
|
||||
return ret;
|
||||
|
@ -1947,14 +1876,12 @@ static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void arcturus_fill_eeprom_i2c_req(SwI2cRequest_t *req, bool write,
|
||||
static void arcturus_fill_i2c_req(SwI2cRequest_t *req, bool write,
|
||||
uint8_t address, uint32_t numbytes,
|
||||
uint8_t *data)
|
||||
{
|
||||
int i;
|
||||
|
||||
BUG_ON(numbytes > MAX_SW_I2C_COMMANDS);
|
||||
|
||||
req->I2CcontrollerPort = 0;
|
||||
req->I2CSpeed = 2;
|
||||
req->SlaveAddress = address;
|
||||
|
@ -1981,7 +1908,7 @@ static void arcturus_fill_eeprom_i2c_req(SwI2cRequest_t *req, bool write,
|
|||
}
|
||||
}
|
||||
|
||||
static int arcturus_i2c_eeprom_read_data(struct i2c_adapter *control,
|
||||
static int arcturus_i2c_read_data(struct i2c_adapter *control,
|
||||
uint8_t address,
|
||||
uint8_t *data,
|
||||
uint32_t numbytes)
|
||||
|
@ -1992,12 +1919,18 @@ static int arcturus_i2c_eeprom_read_data(struct i2c_adapter *control,
|
|||
struct smu_table_context *smu_table = &adev->smu.smu_table;
|
||||
struct smu_table *table = &smu_table->driver_table;
|
||||
|
||||
if (numbytes > MAX_SW_I2C_COMMANDS) {
|
||||
dev_err(adev->dev, "numbytes requested %d is over max allowed %d\n",
|
||||
numbytes, MAX_SW_I2C_COMMANDS);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memset(&req, 0, sizeof(req));
|
||||
arcturus_fill_eeprom_i2c_req(&req, false, address, numbytes, data);
|
||||
arcturus_fill_i2c_req(&req, false, address, numbytes, data);
|
||||
|
||||
mutex_lock(&adev->smu.mutex);
|
||||
/* Now read data starting with that address */
|
||||
ret = smu_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req,
|
||||
ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req,
|
||||
true);
|
||||
mutex_unlock(&adev->smu.mutex);
|
||||
|
||||
|
@ -2008,18 +1941,18 @@ static int arcturus_i2c_eeprom_read_data(struct i2c_adapter *control,
|
|||
for (i = 0; i < numbytes; i++)
|
||||
data[i] = res->SwI2cCmds[i].Data;
|
||||
|
||||
dev_dbg(adev->dev, "arcturus_i2c_eeprom_read_data, address = %x, bytes = %d, data :",
|
||||
dev_dbg(adev->dev, "arcturus_i2c_read_data, address = %x, bytes = %d, data :",
|
||||
(uint16_t)address, numbytes);
|
||||
|
||||
print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
|
||||
8, 1, data, numbytes, false);
|
||||
} else
|
||||
dev_err(adev->dev, "arcturus_i2c_eeprom_read_data - error occurred :%x", ret);
|
||||
dev_err(adev->dev, "arcturus_i2c_read_data - error occurred :%x", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int arcturus_i2c_eeprom_write_data(struct i2c_adapter *control,
|
||||
static int arcturus_i2c_write_data(struct i2c_adapter *control,
|
||||
uint8_t address,
|
||||
uint8_t *data,
|
||||
uint32_t numbytes)
|
||||
|
@ -2028,11 +1961,17 @@ static int arcturus_i2c_eeprom_write_data(struct i2c_adapter *control,
|
|||
SwI2cRequest_t req;
|
||||
struct amdgpu_device *adev = to_amdgpu_device(control);
|
||||
|
||||
if (numbytes > MAX_SW_I2C_COMMANDS) {
|
||||
dev_err(adev->dev, "numbytes requested %d is over max allowed %d\n",
|
||||
numbytes, MAX_SW_I2C_COMMANDS);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memset(&req, 0, sizeof(req));
|
||||
arcturus_fill_eeprom_i2c_req(&req, true, address, numbytes, data);
|
||||
arcturus_fill_i2c_req(&req, true, address, numbytes, data);
|
||||
|
||||
mutex_lock(&adev->smu.mutex);
|
||||
ret = smu_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, true);
|
||||
ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, true);
|
||||
mutex_unlock(&adev->smu.mutex);
|
||||
|
||||
if (!ret) {
|
||||
|
@ -2055,7 +1994,7 @@ static int arcturus_i2c_eeprom_write_data(struct i2c_adapter *control,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int arcturus_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
struct i2c_msg *msgs, int num)
|
||||
{
|
||||
uint32_t i, j, ret, data_size, data_chunk_size, next_eeprom_addr = 0;
|
||||
|
@ -2078,18 +2017,18 @@ static int arcturus_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap,
|
|||
data_chunk[1] = (next_eeprom_addr & 0xff);
|
||||
|
||||
if (msgs[i].flags & I2C_M_RD) {
|
||||
ret = arcturus_i2c_eeprom_read_data(i2c_adap,
|
||||
(uint8_t)msgs[i].addr,
|
||||
data_chunk, MAX_SW_I2C_COMMANDS);
|
||||
ret = arcturus_i2c_read_data(i2c_adap,
|
||||
(uint8_t)msgs[i].addr,
|
||||
data_chunk, MAX_SW_I2C_COMMANDS);
|
||||
|
||||
memcpy(data_ptr, data_chunk + 2, data_chunk_size);
|
||||
} else {
|
||||
|
||||
memcpy(data_chunk + 2, data_ptr, data_chunk_size);
|
||||
|
||||
ret = arcturus_i2c_eeprom_write_data(i2c_adap,
|
||||
(uint8_t)msgs[i].addr,
|
||||
data_chunk, MAX_SW_I2C_COMMANDS);
|
||||
ret = arcturus_i2c_write_data(i2c_adap,
|
||||
(uint8_t)msgs[i].addr,
|
||||
data_chunk, MAX_SW_I2C_COMMANDS);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
|
@ -2106,17 +2045,17 @@ static int arcturus_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap,
|
|||
data_chunk[1] = (next_eeprom_addr & 0xff);
|
||||
|
||||
if (msgs[i].flags & I2C_M_RD) {
|
||||
ret = arcturus_i2c_eeprom_read_data(i2c_adap,
|
||||
(uint8_t)msgs[i].addr,
|
||||
data_chunk, (data_size % data_chunk_size) + 2);
|
||||
ret = arcturus_i2c_read_data(i2c_adap,
|
||||
(uint8_t)msgs[i].addr,
|
||||
data_chunk, (data_size % data_chunk_size) + 2);
|
||||
|
||||
memcpy(data_ptr, data_chunk + 2, data_size % data_chunk_size);
|
||||
} else {
|
||||
memcpy(data_chunk + 2, data_ptr, data_size % data_chunk_size);
|
||||
|
||||
ret = arcturus_i2c_eeprom_write_data(i2c_adap,
|
||||
(uint8_t)msgs[i].addr,
|
||||
data_chunk, (data_size % data_chunk_size) + 2);
|
||||
ret = arcturus_i2c_write_data(i2c_adap,
|
||||
(uint8_t)msgs[i].addr,
|
||||
data_chunk, (data_size % data_chunk_size) + 2);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
|
@ -2130,15 +2069,15 @@ fail:
|
|||
return num;
|
||||
}
|
||||
|
||||
static u32 arcturus_i2c_eeprom_i2c_func(struct i2c_adapter *adap)
|
||||
static u32 arcturus_i2c_func(struct i2c_adapter *adap)
|
||||
{
|
||||
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
|
||||
}
|
||||
|
||||
|
||||
static const struct i2c_algorithm arcturus_i2c_eeprom_i2c_algo = {
|
||||
.master_xfer = arcturus_i2c_eeprom_i2c_xfer,
|
||||
.functionality = arcturus_i2c_eeprom_i2c_func,
|
||||
static const struct i2c_algorithm arcturus_i2c_algo = {
|
||||
.master_xfer = arcturus_i2c_xfer,
|
||||
.functionality = arcturus_i2c_func,
|
||||
};
|
||||
|
||||
static bool arcturus_i2c_adapter_is_added(struct i2c_adapter *control)
|
||||
|
@ -2148,7 +2087,7 @@ static bool arcturus_i2c_adapter_is_added(struct i2c_adapter *control)
|
|||
return control->dev.parent == &adev->pdev->dev;
|
||||
}
|
||||
|
||||
static int arcturus_i2c_eeprom_control_init(struct smu_context *smu, struct i2c_adapter *control)
|
||||
static int arcturus_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
|
||||
{
|
||||
struct amdgpu_device *adev = to_amdgpu_device(control);
|
||||
int res;
|
||||
|
@ -2160,8 +2099,8 @@ static int arcturus_i2c_eeprom_control_init(struct smu_context *smu, struct i2c_
|
|||
control->owner = THIS_MODULE;
|
||||
control->class = I2C_CLASS_SPD;
|
||||
control->dev.parent = &adev->pdev->dev;
|
||||
control->algo = &arcturus_i2c_eeprom_i2c_algo;
|
||||
snprintf(control->name, sizeof(control->name), "AMDGPU EEPROM");
|
||||
control->algo = &arcturus_i2c_algo;
|
||||
snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
|
||||
|
||||
res = i2c_add_adapter(control);
|
||||
if (res)
|
||||
|
@ -2170,7 +2109,7 @@ static int arcturus_i2c_eeprom_control_init(struct smu_context *smu, struct i2c_
|
|||
return res;
|
||||
}
|
||||
|
||||
static void arcturus_i2c_eeprom_control_fini(struct smu_context *smu, struct i2c_adapter *control)
|
||||
static void arcturus_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
|
||||
{
|
||||
if (!arcturus_i2c_adapter_is_added(control))
|
||||
return;
|
||||
|
@ -2184,7 +2123,7 @@ static void arcturus_get_unique_id(struct smu_context *smu)
|
|||
uint32_t top32 = 0, bottom32 = 0, smu_version;
|
||||
uint64_t id;
|
||||
|
||||
if (smu_get_smc_version(smu, NULL, &smu_version)) {
|
||||
if (smu_cmn_get_smc_version(smu, NULL, &smu_version)) {
|
||||
dev_warn(adev->dev, "Failed to get smu version, cannot get unique_id or serial_number\n");
|
||||
return;
|
||||
}
|
||||
|
@ -2196,8 +2135,8 @@ static void arcturus_get_unique_id(struct smu_context *smu)
|
|||
}
|
||||
|
||||
/* Get the SN to turn into a Unique ID */
|
||||
smu_send_smc_msg(smu, SMU_MSG_ReadSerialNumTop32, &top32);
|
||||
smu_send_smc_msg(smu, SMU_MSG_ReadSerialNumBottom32, &bottom32);
|
||||
smu_cmn_send_smc_msg(smu, SMU_MSG_ReadSerialNumTop32, &top32);
|
||||
smu_cmn_send_smc_msg(smu, SMU_MSG_ReadSerialNumBottom32, &bottom32);
|
||||
|
||||
id = ((uint64_t)bottom32 << 32) | top32;
|
||||
adev->unique_id = id;
|
||||
|
@ -2225,7 +2164,7 @@ static int arcturus_set_df_cstate(struct smu_context *smu,
|
|||
uint32_t smu_version;
|
||||
int ret;
|
||||
|
||||
ret = smu_get_smc_version(smu, NULL, &smu_version);
|
||||
ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "Failed to get smu version!\n");
|
||||
return ret;
|
||||
|
@ -2237,7 +2176,7 @@ static int arcturus_set_df_cstate(struct smu_context *smu,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
|
||||
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
|
||||
}
|
||||
|
||||
static int arcturus_allow_xgmi_power_down(struct smu_context *smu, bool en)
|
||||
|
@ -2245,7 +2184,7 @@ static int arcturus_allow_xgmi_power_down(struct smu_context *smu, bool en)
|
|||
uint32_t smu_version;
|
||||
int ret;
|
||||
|
||||
ret = smu_get_smc_version(smu, NULL, &smu_version);
|
||||
ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "Failed to get smu version!\n");
|
||||
return ret;
|
||||
|
@ -2258,12 +2197,12 @@ static int arcturus_allow_xgmi_power_down(struct smu_context *smu, bool en)
|
|||
}
|
||||
|
||||
if (en)
|
||||
return smu_send_smc_msg_with_param(smu,
|
||||
return smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_GmiPwrDnControl,
|
||||
1,
|
||||
NULL);
|
||||
|
||||
return smu_send_smc_msg_with_param(smu,
|
||||
return smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_GmiPwrDnControl,
|
||||
0,
|
||||
NULL);
|
||||
|
@ -2315,16 +2254,6 @@ static void arcturus_log_thermal_throttling_event(struct smu_context *smu)
|
|||
}
|
||||
|
||||
static const struct pptable_funcs arcturus_ppt_funcs = {
|
||||
/* translate smu index into arcturus specific index */
|
||||
.get_smu_msg_index = arcturus_get_smu_msg_index,
|
||||
.get_smu_clk_index = arcturus_get_smu_clk_index,
|
||||
.get_smu_feature_index = arcturus_get_smu_feature_index,
|
||||
.get_smu_table_index = arcturus_get_smu_table_index,
|
||||
.get_smu_power_index= arcturus_get_pwr_src_index,
|
||||
.get_workload_type = arcturus_get_workload_type,
|
||||
/* internal structurs allocations */
|
||||
.tables_init = arcturus_tables_init,
|
||||
.alloc_dpm_context = arcturus_allocate_dpm_context,
|
||||
/* init dpm */
|
||||
.get_allowed_feature_mask = arcturus_get_allowed_feature_mask,
|
||||
/* btc */
|
||||
|
@ -2346,13 +2275,13 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
|
|||
.get_power_limit = arcturus_get_power_limit,
|
||||
.is_dpm_running = arcturus_is_dpm_running,
|
||||
.dpm_set_vcn_enable = arcturus_dpm_set_vcn_enable,
|
||||
.i2c_eeprom_init = arcturus_i2c_eeprom_control_init,
|
||||
.i2c_eeprom_fini = arcturus_i2c_eeprom_control_fini,
|
||||
.i2c_init = arcturus_i2c_control_init,
|
||||
.i2c_fini = arcturus_i2c_control_fini,
|
||||
.get_unique_id = arcturus_get_unique_id,
|
||||
.init_microcode = smu_v11_0_init_microcode,
|
||||
.load_microcode = smu_v11_0_load_microcode,
|
||||
.fini_microcode = smu_v11_0_fini_microcode,
|
||||
.init_smc_tables = smu_v11_0_init_smc_tables,
|
||||
.init_smc_tables = arcturus_init_smc_tables,
|
||||
.fini_smc_tables = smu_v11_0_fini_smc_tables,
|
||||
.init_power = smu_v11_0_init_power,
|
||||
.fini_power = smu_v11_0_fini_power,
|
||||
|
@ -2361,15 +2290,18 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
|
|||
.setup_pptable = arcturus_setup_pptable,
|
||||
.get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
|
||||
.check_fw_version = smu_v11_0_check_fw_version,
|
||||
.write_pptable = smu_v11_0_write_pptable,
|
||||
.write_pptable = smu_cmn_write_pptable,
|
||||
.set_driver_table_location = smu_v11_0_set_driver_table_location,
|
||||
.set_tool_table_location = smu_v11_0_set_tool_table_location,
|
||||
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
|
||||
.system_features_control = smu_v11_0_system_features_control,
|
||||
.send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
|
||||
.send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
|
||||
.send_smc_msg = smu_cmn_send_smc_msg,
|
||||
.init_display_count = NULL,
|
||||
.set_allowed_mask = smu_v11_0_set_allowed_mask,
|
||||
.get_enabled_mask = smu_v11_0_get_enabled_mask,
|
||||
.get_enabled_mask = smu_cmn_get_enabled_mask,
|
||||
.feature_is_enabled = smu_cmn_feature_is_enabled,
|
||||
.disable_all_features_with_exception = smu_cmn_disable_all_features_with_exception,
|
||||
.notify_display_change = NULL,
|
||||
.set_power_limit = smu_v11_0_set_power_limit,
|
||||
.init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
|
||||
|
@ -2396,9 +2328,17 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
|
|||
.set_df_cstate = arcturus_set_df_cstate,
|
||||
.allow_xgmi_power_down = arcturus_allow_xgmi_power_down,
|
||||
.log_thermal_throttling_event = arcturus_log_thermal_throttling_event,
|
||||
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
|
||||
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
|
||||
};
|
||||
|
||||
void arcturus_set_ppt_funcs(struct smu_context *smu)
|
||||
{
|
||||
smu->ppt_funcs = &arcturus_ppt_funcs;
|
||||
smu->message_map = arcturus_message_map;
|
||||
smu->clock_map = arcturus_clk_map;
|
||||
smu->feature_map = arcturus_feature_mask_map;
|
||||
smu->table_map = arcturus_table_map;
|
||||
smu->pwr_src_map = arcturus_pwr_src_map;
|
||||
smu->workload_map = arcturus_workload_map;
|
||||
}
|
||||
|
|
|
@ -259,7 +259,7 @@ struct smu_table_context
|
|||
void *max_sustainable_clocks;
|
||||
struct smu_bios_boot_up_values boot_values;
|
||||
void *driver_pptable;
|
||||
struct smu_table *tables;
|
||||
struct smu_table tables[SMU_TABLE_COUNT];
|
||||
/*
|
||||
* The driver table is just a staging buffer for
|
||||
* uploading/downloading content from the SMU.
|
||||
|
@ -366,6 +366,17 @@ struct smu_umd_pstate_table {
|
|||
struct pstates_clk_freq dclk_pstate;
|
||||
};
|
||||
|
||||
struct cmn2asic_msg_mapping {
|
||||
int valid_mapping;
|
||||
int map_to;
|
||||
int valid_in_vf;
|
||||
};
|
||||
|
||||
struct cmn2asic_mapping {
|
||||
int valid_mapping;
|
||||
int map_to;
|
||||
};
|
||||
|
||||
#define WORKLOAD_POLICY_MAX 7
|
||||
struct smu_context
|
||||
{
|
||||
|
@ -373,6 +384,12 @@ struct smu_context
|
|||
struct amdgpu_irq_src irq_source;
|
||||
|
||||
const struct pptable_funcs *ppt_funcs;
|
||||
const struct cmn2asic_msg_mapping *message_map;
|
||||
const struct cmn2asic_mapping *clock_map;
|
||||
const struct cmn2asic_mapping *feature_map;
|
||||
const struct cmn2asic_mapping *table_map;
|
||||
const struct cmn2asic_mapping *pwr_src_map;
|
||||
const struct cmn2asic_mapping *workload_map;
|
||||
struct mutex mutex;
|
||||
struct mutex sensor_lock;
|
||||
struct mutex metrics_lock;
|
||||
|
@ -434,13 +451,6 @@ struct smu_context
|
|||
struct i2c_adapter;
|
||||
|
||||
struct pptable_funcs {
|
||||
int (*alloc_dpm_context)(struct smu_context *smu);
|
||||
int (*get_smu_msg_index)(struct smu_context *smu, uint32_t index);
|
||||
int (*get_smu_clk_index)(struct smu_context *smu, uint32_t index);
|
||||
int (*get_smu_feature_index)(struct smu_context *smu, uint32_t index);
|
||||
int (*get_smu_table_index)(struct smu_context *smu, uint32_t index);
|
||||
int (*get_smu_power_index)(struct smu_context *smu, uint32_t index);
|
||||
int (*get_workload_type)(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile);
|
||||
int (*run_btc)(struct smu_context *smu);
|
||||
int (*get_allowed_feature_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
|
||||
enum amd_pm_state_type (*get_current_power_state)(struct smu_context *smu);
|
||||
|
@ -479,10 +489,9 @@ struct pptable_funcs {
|
|||
int (*notify_smc_display_config)(struct smu_context *smu);
|
||||
int (*set_cpu_power_state)(struct smu_context *smu);
|
||||
bool (*is_dpm_running)(struct smu_context *smu);
|
||||
int (*tables_init)(struct smu_context *smu, struct smu_table *tables);
|
||||
int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed);
|
||||
int (*get_fan_speed_rpm)(struct smu_context *smu, uint32_t *speed);
|
||||
int (*set_watermarks_table)(struct smu_context *smu, void *watermarks,
|
||||
int (*set_watermarks_table)(struct smu_context *smu,
|
||||
struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges);
|
||||
int (*get_thermal_temperature_range)(struct smu_context *smu, struct smu_temperature_range *range);
|
||||
int (*get_uclk_dpm_states)(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states);
|
||||
|
@ -494,8 +503,8 @@ struct pptable_funcs {
|
|||
int (*set_df_cstate)(struct smu_context *smu, enum pp_df_cstate state);
|
||||
int (*allow_xgmi_power_down)(struct smu_context *smu, bool en);
|
||||
int (*update_pcie_parameters)(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap);
|
||||
int (*i2c_eeprom_init)(struct smu_context *smu, struct i2c_adapter *control);
|
||||
void (*i2c_eeprom_fini)(struct smu_context *smu, struct i2c_adapter *control);
|
||||
int (*i2c_init)(struct smu_context *smu, struct i2c_adapter *control);
|
||||
void (*i2c_fini)(struct smu_context *smu, struct i2c_adapter *control);
|
||||
void (*get_unique_id)(struct smu_context *smu);
|
||||
int (*get_dpm_clock_table)(struct smu_context *smu, struct dpm_clocks *clock_table);
|
||||
int (*init_microcode)(struct smu_context *smu);
|
||||
|
@ -519,9 +528,14 @@ struct pptable_funcs {
|
|||
int (*system_features_control)(struct smu_context *smu, bool en);
|
||||
int (*send_smc_msg_with_param)(struct smu_context *smu,
|
||||
enum smu_message_type msg, uint32_t param, uint32_t *read_arg);
|
||||
int (*send_smc_msg)(struct smu_context *smu,
|
||||
enum smu_message_type msg,
|
||||
uint32_t *read_arg);
|
||||
int (*init_display_count)(struct smu_context *smu, uint32_t count);
|
||||
int (*set_allowed_mask)(struct smu_context *smu);
|
||||
int (*get_enabled_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
|
||||
int (*feature_is_enabled)(struct smu_context *smu, enum smu_feature_mask mask);
|
||||
int (*disable_all_features_with_exception)(struct smu_context *smu, enum smu_feature_mask mask);
|
||||
int (*notify_display_change)(struct smu_context *smu);
|
||||
int (*set_power_limit)(struct smu_context *smu, uint32_t n);
|
||||
int (*init_max_sustainable_clocks)(struct smu_context *smu);
|
||||
|
@ -555,6 +569,7 @@ struct pptable_funcs {
|
|||
int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed);
|
||||
int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate);
|
||||
int (*gfx_off_control)(struct smu_context *smu, bool enable);
|
||||
uint32_t (*get_gfx_off_status)(struct smu_context *smu);
|
||||
int (*register_irq_handler)(struct smu_context *smu);
|
||||
int (*set_azalia_d3_pme)(struct smu_context *smu);
|
||||
int (*get_max_sustainable_clocks_by_dc)(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks);
|
||||
|
@ -571,6 +586,8 @@ struct pptable_funcs {
|
|||
int (*disable_umc_cdr_12gbps_workaround)(struct smu_context *smu);
|
||||
int (*set_power_source)(struct smu_context *smu, enum smu_power_src_type power_src);
|
||||
void (*log_thermal_throttling_event)(struct smu_context *smu);
|
||||
size_t (*get_pp_feature_mask)(struct smu_context *smu, char *buf);
|
||||
int (*set_pp_feature_mask)(struct smu_context *smu, uint64_t new_mask);
|
||||
};
|
||||
|
||||
typedef enum {
|
||||
|
@ -603,6 +620,40 @@ typedef enum {
|
|||
METRICS_CURR_FANSPEED,
|
||||
} MetricsMember_t;
|
||||
|
||||
enum smu_cmn2asic_mapping_type {
|
||||
CMN2ASIC_MAPPING_MSG,
|
||||
CMN2ASIC_MAPPING_CLK,
|
||||
CMN2ASIC_MAPPING_FEATURE,
|
||||
CMN2ASIC_MAPPING_TABLE,
|
||||
CMN2ASIC_MAPPING_PWR,
|
||||
CMN2ASIC_MAPPING_WORKLOAD,
|
||||
};
|
||||
|
||||
#define MSG_MAP(msg, index, valid_in_vf) \
|
||||
[SMU_MSG_##msg] = {1, (index), (valid_in_vf)}
|
||||
|
||||
#define CLK_MAP(clk, index) \
|
||||
[SMU_##clk] = {1, (index)}
|
||||
|
||||
#define FEA_MAP(fea) \
|
||||
[SMU_FEATURE_##fea##_BIT] = {1, FEATURE_##fea##_BIT}
|
||||
|
||||
#define TAB_MAP(tab) \
|
||||
[SMU_TABLE_##tab] = {1, TABLE_##tab}
|
||||
|
||||
#define TAB_MAP_VALID(tab) \
|
||||
[SMU_TABLE_##tab] = {1, TABLE_##tab}
|
||||
|
||||
#define TAB_MAP_INVALID(tab) \
|
||||
[SMU_TABLE_##tab] = {0, TABLE_##tab}
|
||||
|
||||
#define PWR_MAP(tab) \
|
||||
[SMU_POWER_SOURCE_##tab] = {1, POWER_SOURCE_##tab}
|
||||
|
||||
#define WORKLOAD_MAP(profile, workload) \
|
||||
[profile] = {1, (workload)}
|
||||
|
||||
#if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4)
|
||||
int smu_load_microcode(struct smu_context *smu);
|
||||
|
||||
int smu_check_fw_status(struct smu_context *smu);
|
||||
|
@ -678,25 +729,11 @@ bool smu_mode1_reset_is_support(struct smu_context *smu);
|
|||
int smu_mode1_reset(struct smu_context *smu);
|
||||
int smu_mode2_reset(struct smu_context *smu);
|
||||
|
||||
extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
|
||||
uint16_t *size, uint8_t *frev, uint8_t *crev,
|
||||
uint8_t **addr);
|
||||
|
||||
extern const struct amd_ip_funcs smu_ip_funcs;
|
||||
|
||||
extern const struct amdgpu_ip_block_version smu_v11_0_ip_block;
|
||||
extern const struct amdgpu_ip_block_version smu_v12_0_ip_block;
|
||||
|
||||
extern int smu_feature_is_enabled(struct smu_context *smu,
|
||||
enum smu_feature_mask mask);
|
||||
extern int smu_feature_set_enabled(struct smu_context *smu,
|
||||
enum smu_feature_mask mask, bool enable);
|
||||
extern int smu_feature_is_supported(struct smu_context *smu,
|
||||
enum smu_feature_mask mask);
|
||||
|
||||
int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
|
||||
void *table_data, bool drv2smu);
|
||||
|
||||
bool is_support_sw_smu(struct amdgpu_device *adev);
|
||||
int smu_reset(struct smu_context *smu);
|
||||
int smu_sys_get_pp_table(struct smu_context *smu, void **table);
|
||||
|
@ -722,7 +759,6 @@ extern int smu_handle_task(struct smu_context *smu,
|
|||
int smu_switch_power_profile(struct smu_context *smu,
|
||||
enum PP_SMC_POWER_PROFILE type,
|
||||
bool en);
|
||||
int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version);
|
||||
int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
|
||||
uint32_t *min, uint32_t *max);
|
||||
int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
|
||||
|
@ -731,9 +767,6 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu);
|
|||
int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level);
|
||||
int smu_set_display_count(struct smu_context *smu, uint32_t count);
|
||||
int smu_set_ac_dc(struct smu_context *smu);
|
||||
bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type);
|
||||
const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type);
|
||||
const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature);
|
||||
size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf);
|
||||
int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask);
|
||||
int smu_force_clk_levels(struct smu_context *smu,
|
||||
|
@ -755,4 +788,7 @@ int smu_get_uclk_dpm_states(struct smu_context *smu,
|
|||
int smu_get_dpm_clock_table(struct smu_context *smu,
|
||||
struct dpm_clocks *clock_table);
|
||||
|
||||
int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value);
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
#define SMU11_DRIVER_IF_VERSION_NV12 0x33
|
||||
#define SMU11_DRIVER_IF_VERSION_NV14 0x36
|
||||
#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x33
|
||||
#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0x2B
|
||||
#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0x2
|
||||
|
||||
/* MP Apertures */
|
||||
#define MP0_Public 0x03800000
|
||||
|
@ -52,21 +52,6 @@
|
|||
#define MAX_DPM_LEVELS 16
|
||||
#define MAX_PCIE_CONF 2
|
||||
|
||||
#define CLK_MAP(clk, index) \
|
||||
[SMU_##clk] = {1, (index)}
|
||||
|
||||
#define FEA_MAP(fea) \
|
||||
[SMU_FEATURE_##fea##_BIT] = {1, FEATURE_##fea##_BIT}
|
||||
|
||||
#define TAB_MAP(tab) \
|
||||
[SMU_TABLE_##tab] = {1, TABLE_##tab}
|
||||
|
||||
#define PWR_MAP(tab) \
|
||||
[SMU_POWER_SOURCE_##tab] = {1, POWER_SOURCE_##tab}
|
||||
|
||||
#define WORKLOAD_MAP(profile, workload) \
|
||||
[profile] = {1, (workload)}
|
||||
|
||||
#define CTF_OFFSET_EDGE 5
|
||||
#define CTF_OFFSET_HOTSPOT 5
|
||||
#define CTF_OFFSET_MEM 5
|
||||
|
@ -77,17 +62,6 @@ static const struct smu_temperature_range smu11_thermal_policy[] =
|
|||
{ 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
|
||||
};
|
||||
|
||||
struct smu_11_0_msg_mapping {
|
||||
int valid_mapping;
|
||||
int map_to;
|
||||
int valid_in_vf;
|
||||
};
|
||||
|
||||
struct smu_11_0_cmn2aisc_mapping {
|
||||
int valid_mapping;
|
||||
int map_to;
|
||||
};
|
||||
|
||||
struct smu_11_0_max_sustainable_clocks {
|
||||
uint32_t display_clock;
|
||||
uint32_t phy_clock;
|
||||
|
@ -160,6 +134,8 @@ enum smu_v11_0_baco_seq {
|
|||
BACO_SEQ_COUNT,
|
||||
};
|
||||
|
||||
#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3)
|
||||
|
||||
int smu_v11_0_init_microcode(struct smu_context *smu);
|
||||
|
||||
void smu_v11_0_fini_microcode(struct smu_context *smu);
|
||||
|
@ -182,8 +158,6 @@ int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu);
|
|||
|
||||
int smu_v11_0_check_fw_version(struct smu_context *smu);
|
||||
|
||||
int smu_v11_0_write_pptable(struct smu_context *smu);
|
||||
|
||||
int smu_v11_0_set_driver_table_location(struct smu_context *smu);
|
||||
|
||||
int smu_v11_0_set_tool_table_location(struct smu_context *smu);
|
||||
|
@ -193,19 +167,10 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu);
|
|||
int smu_v11_0_system_features_control(struct smu_context *smu,
|
||||
bool en);
|
||||
|
||||
int
|
||||
smu_v11_0_send_msg_with_param(struct smu_context *smu,
|
||||
enum smu_message_type msg,
|
||||
uint32_t param,
|
||||
uint32_t *read_arg);
|
||||
|
||||
int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count);
|
||||
|
||||
int smu_v11_0_set_allowed_mask(struct smu_context *smu);
|
||||
|
||||
int smu_v11_0_get_enabled_mask(struct smu_context *smu,
|
||||
uint32_t *feature_mask, uint32_t num);
|
||||
|
||||
int smu_v11_0_notify_display_change(struct smu_context *smu);
|
||||
|
||||
int smu_v11_0_get_current_power_limit(struct smu_context *smu,
|
||||
|
@ -300,3 +265,4 @@ int smu_v11_0_get_dpm_level_range(struct smu_context *smu,
|
|||
uint32_t *max_value);
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -31,22 +31,7 @@
|
|||
#define MP1_Public 0x03b00000
|
||||
#define MP1_SRAM 0x03c00004
|
||||
|
||||
|
||||
struct smu_12_0_cmn2aisc_mapping {
|
||||
int valid_mapping;
|
||||
int map_to;
|
||||
};
|
||||
|
||||
int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
|
||||
uint16_t msg);
|
||||
|
||||
int smu_v12_0_wait_for_response(struct smu_context *smu);
|
||||
|
||||
int
|
||||
smu_v12_0_send_msg_with_param(struct smu_context *smu,
|
||||
enum smu_message_type msg,
|
||||
uint32_t param,
|
||||
uint32_t *read_arg);
|
||||
#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3)
|
||||
|
||||
int smu_v12_0_check_fw_status(struct smu_context *smu);
|
||||
|
||||
|
@ -64,15 +49,10 @@ uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu);
|
|||
|
||||
int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable);
|
||||
|
||||
int smu_v12_0_init_smc_tables(struct smu_context *smu);
|
||||
|
||||
int smu_v12_0_fini_smc_tables(struct smu_context *smu);
|
||||
|
||||
int smu_v12_0_set_default_dpm_tables(struct smu_context *smu);
|
||||
|
||||
int smu_v12_0_get_enabled_mask(struct smu_context *smu,
|
||||
uint32_t *feature_mask, uint32_t num);
|
||||
|
||||
int smu_v12_0_mode2_reset(struct smu_context *smu);
|
||||
|
||||
int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
|
||||
|
@ -81,3 +61,4 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
|
|||
int smu_v12_0_set_driver_table_location(struct smu_context *smu);
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -21,13 +21,16 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#define SWSMU_CODE_LAYER_L2
|
||||
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/i2c.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_smu.h"
|
||||
#include "smu_internal.h"
|
||||
#include "atomfirmware.h"
|
||||
#include "amdgpu_atomfirmware.h"
|
||||
#include "amdgpu_atombios.h"
|
||||
#include "soc15_common.h"
|
||||
#include "smu_v11_0.h"
|
||||
#include "smu11_driver_if_navi10.h"
|
||||
|
@ -41,6 +44,7 @@
|
|||
#include "thm/thm_11_0_2_sh_mask.h"
|
||||
|
||||
#include "asic_reg/mp/mp_11_0_sh_mask.h"
|
||||
#include "smu_cmn.h"
|
||||
|
||||
/*
|
||||
* DO NOT use these for err/warn/info/debug messages.
|
||||
|
@ -52,6 +56,8 @@
|
|||
#undef pr_info
|
||||
#undef pr_debug
|
||||
|
||||
#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
|
||||
|
||||
#define FEATURE_MASK(feature) (1ULL << feature)
|
||||
#define SMC_DPM_FEATURE ( \
|
||||
FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | \
|
||||
|
@ -63,10 +69,7 @@
|
|||
FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \
|
||||
FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT))
|
||||
|
||||
#define MSG_MAP(msg, index, valid_in_vf) \
|
||||
[SMU_MSG_##msg] = {1, (index), (valid_in_vf)}
|
||||
|
||||
static struct smu_11_0_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
|
||||
static struct cmn2asic_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
|
||||
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
|
||||
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
|
||||
MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
|
||||
|
@ -137,7 +140,7 @@ static struct smu_11_0_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
|
|||
MSG_MAP(GetVoltageByDpmOverdrive, PPSMC_MSG_GetVoltageByDpmOverdrive, 0),
|
||||
};
|
||||
|
||||
static struct smu_11_0_cmn2aisc_mapping navi10_clk_map[SMU_CLK_COUNT] = {
|
||||
static struct cmn2asic_mapping navi10_clk_map[SMU_CLK_COUNT] = {
|
||||
CLK_MAP(GFXCLK, PPCLK_GFXCLK),
|
||||
CLK_MAP(SCLK, PPCLK_GFXCLK),
|
||||
CLK_MAP(SOCCLK, PPCLK_SOCCLK),
|
||||
|
@ -152,7 +155,7 @@ static struct smu_11_0_cmn2aisc_mapping navi10_clk_map[SMU_CLK_COUNT] = {
|
|||
CLK_MAP(PHYCLK, PPCLK_PHYCLK),
|
||||
};
|
||||
|
||||
static struct smu_11_0_cmn2aisc_mapping navi10_feature_mask_map[SMU_FEATURE_COUNT] = {
|
||||
static struct cmn2asic_mapping navi10_feature_mask_map[SMU_FEATURE_COUNT] = {
|
||||
FEA_MAP(DPM_PREFETCHER),
|
||||
FEA_MAP(DPM_GFXCLK),
|
||||
FEA_MAP(DPM_GFX_PACE),
|
||||
|
@ -198,7 +201,7 @@ static struct smu_11_0_cmn2aisc_mapping navi10_feature_mask_map[SMU_FEATURE_COUN
|
|||
FEA_MAP(APCC_DFLL),
|
||||
};
|
||||
|
||||
static struct smu_11_0_cmn2aisc_mapping navi10_table_map[SMU_TABLE_COUNT] = {
|
||||
static struct cmn2asic_mapping navi10_table_map[SMU_TABLE_COUNT] = {
|
||||
TAB_MAP(PPTABLE),
|
||||
TAB_MAP(WATERMARKS),
|
||||
TAB_MAP(AVFS),
|
||||
|
@ -213,12 +216,12 @@ static struct smu_11_0_cmn2aisc_mapping navi10_table_map[SMU_TABLE_COUNT] = {
|
|||
TAB_MAP(PACE),
|
||||
};
|
||||
|
||||
static struct smu_11_0_cmn2aisc_mapping navi10_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
|
||||
static struct cmn2asic_mapping navi10_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
|
||||
PWR_MAP(AC),
|
||||
PWR_MAP(DC),
|
||||
};
|
||||
|
||||
static struct smu_11_0_cmn2aisc_mapping navi10_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
|
||||
static struct cmn2asic_mapping navi10_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
|
||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT),
|
||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
|
||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
|
||||
|
@ -228,100 +231,6 @@ static struct smu_11_0_cmn2aisc_mapping navi10_workload_map[PP_SMC_POWER_PROFILE
|
|||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
|
||||
};
|
||||
|
||||
static int navi10_get_smu_msg_index(struct smu_context *smc, uint32_t index)
|
||||
{
|
||||
struct smu_11_0_msg_mapping mapping;
|
||||
|
||||
if (index >= SMU_MSG_MAX_COUNT)
|
||||
return -EINVAL;
|
||||
|
||||
mapping = navi10_message_map[index];
|
||||
if (!(mapping.valid_mapping)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (amdgpu_sriov_vf(smc->adev) && !mapping.valid_in_vf)
|
||||
return -EACCES;
|
||||
|
||||
return mapping.map_to;
|
||||
}
|
||||
|
||||
static int navi10_get_smu_clk_index(struct smu_context *smc, uint32_t index)
|
||||
{
|
||||
struct smu_11_0_cmn2aisc_mapping mapping;
|
||||
|
||||
if (index >= SMU_CLK_COUNT)
|
||||
return -EINVAL;
|
||||
|
||||
mapping = navi10_clk_map[index];
|
||||
if (!(mapping.valid_mapping)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return mapping.map_to;
|
||||
}
|
||||
|
||||
static int navi10_get_smu_feature_index(struct smu_context *smc, uint32_t index)
|
||||
{
|
||||
struct smu_11_0_cmn2aisc_mapping mapping;
|
||||
|
||||
if (index >= SMU_FEATURE_COUNT)
|
||||
return -EINVAL;
|
||||
|
||||
mapping = navi10_feature_mask_map[index];
|
||||
if (!(mapping.valid_mapping)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return mapping.map_to;
|
||||
}
|
||||
|
||||
static int navi10_get_smu_table_index(struct smu_context *smc, uint32_t index)
|
||||
{
|
||||
struct smu_11_0_cmn2aisc_mapping mapping;
|
||||
|
||||
if (index >= SMU_TABLE_COUNT)
|
||||
return -EINVAL;
|
||||
|
||||
mapping = navi10_table_map[index];
|
||||
if (!(mapping.valid_mapping)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return mapping.map_to;
|
||||
}
|
||||
|
||||
static int navi10_get_pwr_src_index(struct smu_context *smc, uint32_t index)
|
||||
{
|
||||
struct smu_11_0_cmn2aisc_mapping mapping;
|
||||
|
||||
if (index >= SMU_POWER_SOURCE_COUNT)
|
||||
return -EINVAL;
|
||||
|
||||
mapping = navi10_pwr_src_map[index];
|
||||
if (!(mapping.valid_mapping)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return mapping.map_to;
|
||||
}
|
||||
|
||||
|
||||
static int navi10_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile)
|
||||
{
|
||||
struct smu_11_0_cmn2aisc_mapping mapping;
|
||||
|
||||
if (profile > PP_SMC_POWER_PROFILE_CUSTOM)
|
||||
return -EINVAL;
|
||||
|
||||
mapping = navi10_workload_map[profile];
|
||||
if (!(mapping.valid_mapping)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return mapping.map_to;
|
||||
}
|
||||
|
||||
static bool is_asic_secure(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
@ -467,7 +376,7 @@ static int navi10_append_powerplay_table(struct smu_context *smu)
|
|||
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
|
||||
smc_dpm_info);
|
||||
|
||||
ret = smu_get_atom_data_table(smu, index, NULL, NULL, NULL,
|
||||
ret = amdgpu_atombios_get_data_table(adev, index, NULL, NULL, NULL,
|
||||
(uint8_t **)&smc_dpm_table);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -487,7 +396,7 @@ static int navi10_append_powerplay_table(struct smu_context *smu)
|
|||
sizeof(*smc_dpm_table) - sizeof(smc_dpm_table->table_header));
|
||||
break;
|
||||
case 7: /* nv12 */
|
||||
ret = smu_get_atom_data_table(smu, index, NULL, NULL, NULL,
|
||||
ret = amdgpu_atombios_get_data_table(adev, index, NULL, NULL, NULL,
|
||||
(uint8_t **)&smc_dpm_table_v4_7);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -543,9 +452,10 @@ static int navi10_setup_pptable(struct smu_context *smu)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables)
|
||||
static int navi10_tables_init(struct smu_context *smu)
|
||||
{
|
||||
struct smu_table_context *smu_table = &smu->smu_table;
|
||||
struct smu_table *tables = smu_table->tables;
|
||||
|
||||
SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
|
@ -553,6 +463,8 @@ static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables)
|
|||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE,
|
||||
|
@ -584,7 +496,7 @@ static int navi10_get_smu_metrics_data(struct smu_context *smu,
|
|||
mutex_lock(&smu->metrics_lock);
|
||||
if (!smu_table->metrics_time ||
|
||||
time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
|
||||
ret = smu_update_table(smu,
|
||||
ret = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_SMU_METRICS,
|
||||
0,
|
||||
smu_table->metrics_table,
|
||||
|
@ -674,9 +586,6 @@ static int navi10_allocate_dpm_context(struct smu_context *smu)
|
|||
{
|
||||
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
|
||||
|
||||
if (smu_dpm->dpm_context)
|
||||
return -EINVAL;
|
||||
|
||||
smu_dpm->dpm_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
|
||||
GFP_KERNEL);
|
||||
if (!smu_dpm->dpm_context)
|
||||
|
@ -687,6 +596,21 @@ static int navi10_allocate_dpm_context(struct smu_context *smu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int navi10_init_smc_tables(struct smu_context *smu)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
ret = navi10_tables_init(smu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = navi10_allocate_dpm_context(smu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return smu_v11_0_init_smc_tables(smu);
|
||||
}
|
||||
|
||||
static int navi10_set_default_dpm_table(struct smu_context *smu)
|
||||
{
|
||||
struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
|
||||
|
@ -696,7 +620,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
|
|||
|
||||
/* socclk dpm table setup */
|
||||
dpm_table = &dpm_context->dpm_tables.soc_table;
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
|
||||
ret = smu_v11_0_set_single_dpm_table(smu,
|
||||
SMU_SOCCLK,
|
||||
dpm_table);
|
||||
|
@ -714,7 +638,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
|
|||
|
||||
/* gfxclk dpm table setup */
|
||||
dpm_table = &dpm_context->dpm_tables.gfx_table;
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
|
||||
ret = smu_v11_0_set_single_dpm_table(smu,
|
||||
SMU_GFXCLK,
|
||||
dpm_table);
|
||||
|
@ -732,7 +656,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
|
|||
|
||||
/* uclk dpm table setup */
|
||||
dpm_table = &dpm_context->dpm_tables.uclk_table;
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
|
||||
ret = smu_v11_0_set_single_dpm_table(smu,
|
||||
SMU_UCLK,
|
||||
dpm_table);
|
||||
|
@ -750,7 +674,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
|
|||
|
||||
/* vclk dpm table setup */
|
||||
dpm_table = &dpm_context->dpm_tables.vclk_table;
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
|
||||
ret = smu_v11_0_set_single_dpm_table(smu,
|
||||
SMU_VCLK,
|
||||
dpm_table);
|
||||
|
@ -768,7 +692,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
|
|||
|
||||
/* dclk dpm table setup */
|
||||
dpm_table = &dpm_context->dpm_tables.dclk_table;
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
|
||||
ret = smu_v11_0_set_single_dpm_table(smu,
|
||||
SMU_DCLK,
|
||||
dpm_table);
|
||||
|
@ -786,7 +710,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
|
|||
|
||||
/* dcefclk dpm table setup */
|
||||
dpm_table = &dpm_context->dpm_tables.dcef_table;
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
|
||||
ret = smu_v11_0_set_single_dpm_table(smu,
|
||||
SMU_DCEFCLK,
|
||||
dpm_table);
|
||||
|
@ -804,7 +728,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
|
|||
|
||||
/* pixelclk dpm table setup */
|
||||
dpm_table = &dpm_context->dpm_tables.pixel_table;
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
|
||||
ret = smu_v11_0_set_single_dpm_table(smu,
|
||||
SMU_PIXCLK,
|
||||
dpm_table);
|
||||
|
@ -822,7 +746,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
|
|||
|
||||
/* displayclk dpm table setup */
|
||||
dpm_table = &dpm_context->dpm_tables.display_table;
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
|
||||
ret = smu_v11_0_set_single_dpm_table(smu,
|
||||
SMU_DISPCLK,
|
||||
dpm_table);
|
||||
|
@ -840,7 +764,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
|
|||
|
||||
/* phyclk dpm table setup */
|
||||
dpm_table = &dpm_context->dpm_tables.phy_table;
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
|
||||
ret = smu_v11_0_set_single_dpm_table(smu,
|
||||
SMU_PHYCLK,
|
||||
dpm_table);
|
||||
|
@ -867,15 +791,15 @@ static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
|
|||
|
||||
if (enable) {
|
||||
/* vcn dpm on is a prerequisite for vcn power gate messages */
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1, NULL);
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
power_gate->vcn_gated = false;
|
||||
} else {
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
|
||||
ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
|
||||
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -892,15 +816,15 @@ static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
|
|||
int ret = 0;
|
||||
|
||||
if (enable) {
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
|
||||
ret = smu_send_smc_msg(smu, SMU_MSG_PowerUpJpeg, NULL);
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
|
||||
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerUpJpeg, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
power_gate->jpeg_gated = false;
|
||||
} else {
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
|
||||
ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownJpeg, NULL);
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
|
||||
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownJpeg, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -917,7 +841,9 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
|
|||
MetricsMember_t member_type;
|
||||
int clk_id = 0;
|
||||
|
||||
clk_id = smu_clk_get_index(smu, clk_type);
|
||||
clk_id = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_CLK,
|
||||
clk_type);
|
||||
if (clk_id < 0)
|
||||
return clk_id;
|
||||
|
||||
|
@ -955,7 +881,9 @@ static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu
|
|||
DpmDescriptor_t *dpm_desc = NULL;
|
||||
uint32_t clk_index = 0;
|
||||
|
||||
clk_index = smu_clk_get_index(smu, clk_type);
|
||||
clk_index = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_CLK,
|
||||
clk_type);
|
||||
dpm_desc = &pptable->DpmDescriptor[clk_index];
|
||||
|
||||
/* 0 - Fine grained DPM, 1 - Discrete DPM */
|
||||
|
@ -1336,11 +1264,11 @@ static int navi10_pre_display_config_changed(struct smu_context *smu)
|
|||
int ret = 0;
|
||||
uint32_t max_freq = 0;
|
||||
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL);
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
|
||||
ret = smu_v11_0_get_dpm_ultimate_freq(smu, SMU_UCLK, NULL, &max_freq);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1357,9 +1285,9 @@ static int navi10_display_config_changed(struct smu_context *smu)
|
|||
int ret = 0;
|
||||
|
||||
if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
|
||||
smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
|
||||
smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
|
||||
smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
|
||||
smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
|
||||
smu->display_config->num_display,
|
||||
NULL);
|
||||
if (ret)
|
||||
|
@ -1412,7 +1340,7 @@ static bool navi10_is_dpm_running(struct smu_context *smu)
|
|||
int ret = 0;
|
||||
uint32_t feature_mask[2];
|
||||
unsigned long feature_enabled;
|
||||
ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
|
||||
ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
|
||||
feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
|
||||
((uint64_t)feature_mask[1] << 32));
|
||||
return !!(feature_enabled & SMC_DPM_FEATURE);
|
||||
|
@ -1483,11 +1411,13 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
|
|||
|
||||
for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
|
||||
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
|
||||
workload_type = smu_workload_get_type(smu, i);
|
||||
workload_type = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_WORKLOAD,
|
||||
i);
|
||||
if (workload_type < 0)
|
||||
return -EINVAL;
|
||||
|
||||
result = smu_update_table(smu,
|
||||
result = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type,
|
||||
(void *)(&activity_monitor), false);
|
||||
if (result) {
|
||||
|
@ -1558,7 +1488,7 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
|
|||
|
||||
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
|
||||
|
||||
ret = smu_update_table(smu,
|
||||
ret = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
|
||||
(void *)(&activity_monitor), false);
|
||||
if (ret) {
|
||||
|
@ -1602,7 +1532,7 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
|
|||
break;
|
||||
}
|
||||
|
||||
ret = smu_update_table(smu,
|
||||
ret = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
|
||||
(void *)(&activity_monitor), true);
|
||||
if (ret) {
|
||||
|
@ -1612,10 +1542,12 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
|
|||
}
|
||||
|
||||
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
|
||||
workload_type = smu_workload_get_type(smu, smu->power_profile_mode);
|
||||
workload_type = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_WORKLOAD,
|
||||
smu->power_profile_mode);
|
||||
if (workload_type < 0)
|
||||
return -EINVAL;
|
||||
smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
|
||||
smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
|
||||
1 << workload_type, NULL);
|
||||
|
||||
return ret;
|
||||
|
@ -1631,14 +1563,14 @@ static int navi10_notify_smc_display_config(struct smu_context *smu)
|
|||
min_clocks.dcef_clock_in_sr = smu->display_config->min_dcef_deep_sleep_set_clk;
|
||||
min_clocks.memory_clock = smu->display_config->min_mem_set_clock;
|
||||
|
||||
if (smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
|
||||
if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
|
||||
clock_req.clock_type = amd_pp_dcef_clock;
|
||||
clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
|
||||
|
||||
ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req);
|
||||
if (!ret) {
|
||||
if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
|
||||
ret = smu_send_smc_msg_with_param(smu,
|
||||
if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_SetMinDeepSleepDcefclk,
|
||||
min_clocks.dcef_clock_in_sr/100,
|
||||
NULL);
|
||||
|
@ -1652,7 +1584,7 @@ static int navi10_notify_smc_display_config(struct smu_context *smu)
|
|||
}
|
||||
}
|
||||
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
|
||||
ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, min_clocks.memory_clock/100, 0);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "[%s] Set hard min uclk failed!", __func__);
|
||||
|
@ -1664,68 +1596,66 @@ static int navi10_notify_smc_display_config(struct smu_context *smu)
|
|||
}
|
||||
|
||||
static int navi10_set_watermarks_table(struct smu_context *smu,
|
||||
void *watermarks, struct
|
||||
dm_pp_wm_sets_with_clock_ranges_soc15
|
||||
*clock_ranges)
|
||||
struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
|
||||
{
|
||||
int i;
|
||||
Watermarks_t *table = smu->smu_table.watermarks_table;
|
||||
int ret = 0;
|
||||
Watermarks_t *table = watermarks;
|
||||
int i;
|
||||
|
||||
if (!table || !clock_ranges)
|
||||
return -EINVAL;
|
||||
if (clock_ranges) {
|
||||
if (clock_ranges->num_wm_dmif_sets > 4 ||
|
||||
clock_ranges->num_wm_mcif_sets > 4)
|
||||
return -EINVAL;
|
||||
|
||||
if (clock_ranges->num_wm_dmif_sets > 4 ||
|
||||
clock_ranges->num_wm_mcif_sets > 4)
|
||||
return -EINVAL;
|
||||
for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
|
||||
table->WatermarkRow[1][i].MinClock =
|
||||
cpu_to_le16((uint16_t)
|
||||
(clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
|
||||
1000));
|
||||
table->WatermarkRow[1][i].MaxClock =
|
||||
cpu_to_le16((uint16_t)
|
||||
(clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
|
||||
1000));
|
||||
table->WatermarkRow[1][i].MinUclk =
|
||||
cpu_to_le16((uint16_t)
|
||||
(clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
|
||||
1000));
|
||||
table->WatermarkRow[1][i].MaxUclk =
|
||||
cpu_to_le16((uint16_t)
|
||||
(clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
|
||||
1000));
|
||||
table->WatermarkRow[1][i].WmSetting = (uint8_t)
|
||||
clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
|
||||
}
|
||||
|
||||
for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
|
||||
table->WatermarkRow[1][i].MinClock =
|
||||
cpu_to_le16((uint16_t)
|
||||
(clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
|
||||
1000));
|
||||
table->WatermarkRow[1][i].MaxClock =
|
||||
cpu_to_le16((uint16_t)
|
||||
(clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
|
||||
1000));
|
||||
table->WatermarkRow[1][i].MinUclk =
|
||||
cpu_to_le16((uint16_t)
|
||||
(clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
|
||||
1000));
|
||||
table->WatermarkRow[1][i].MaxUclk =
|
||||
cpu_to_le16((uint16_t)
|
||||
(clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
|
||||
1000));
|
||||
table->WatermarkRow[1][i].WmSetting = (uint8_t)
|
||||
clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
|
||||
for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
|
||||
table->WatermarkRow[0][i].MinClock =
|
||||
cpu_to_le16((uint16_t)
|
||||
(clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
|
||||
1000));
|
||||
table->WatermarkRow[0][i].MaxClock =
|
||||
cpu_to_le16((uint16_t)
|
||||
(clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
|
||||
1000));
|
||||
table->WatermarkRow[0][i].MinUclk =
|
||||
cpu_to_le16((uint16_t)
|
||||
(clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
|
||||
1000));
|
||||
table->WatermarkRow[0][i].MaxUclk =
|
||||
cpu_to_le16((uint16_t)
|
||||
(clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
|
||||
1000));
|
||||
table->WatermarkRow[0][i].WmSetting = (uint8_t)
|
||||
clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
|
||||
}
|
||||
|
||||
smu->watermarks_bitmap |= WATERMARKS_EXIST;
|
||||
}
|
||||
|
||||
for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
|
||||
table->WatermarkRow[0][i].MinClock =
|
||||
cpu_to_le16((uint16_t)
|
||||
(clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
|
||||
1000));
|
||||
table->WatermarkRow[0][i].MaxClock =
|
||||
cpu_to_le16((uint16_t)
|
||||
(clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
|
||||
1000));
|
||||
table->WatermarkRow[0][i].MinUclk =
|
||||
cpu_to_le16((uint16_t)
|
||||
(clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
|
||||
1000));
|
||||
table->WatermarkRow[0][i].MaxUclk =
|
||||
cpu_to_le16((uint16_t)
|
||||
(clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
|
||||
1000));
|
||||
table->WatermarkRow[0][i].WmSetting = (uint8_t)
|
||||
clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
|
||||
}
|
||||
|
||||
smu->watermarks_bitmap |= WATERMARKS_EXIST;
|
||||
|
||||
/* pass data to smu controller */
|
||||
if (!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
|
||||
ret = smu_write_watermarks_table(smu);
|
||||
if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
|
||||
!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
|
||||
ret = smu_cmn_write_watermarks_table(smu);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "Failed to update WMTABLE!");
|
||||
return ret;
|
||||
|
@ -1960,7 +1890,7 @@ static int navi10_update_pcie_parameters(struct smu_context *smu,
|
|||
((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) :
|
||||
(pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ?
|
||||
pptable->PcieLaneCount[i] : pcie_width_cap);
|
||||
ret = smu_send_smc_msg_with_param(smu,
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_OverridePcieParameters,
|
||||
smu_pcie_arg,
|
||||
NULL);
|
||||
|
@ -2012,7 +1942,7 @@ static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
|
|||
uint32_t value = 0;
|
||||
int ret;
|
||||
|
||||
ret = smu_send_smc_msg_with_param(smu,
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_GetVoltageByDpm,
|
||||
param,
|
||||
&value);
|
||||
|
@ -2046,7 +1976,7 @@ static int navi10_set_default_od_settings(struct smu_context *smu)
|
|||
(OverDriveTable_t *)smu->smu_table.boot_overdrive_table;
|
||||
int ret = 0;
|
||||
|
||||
ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, false);
|
||||
ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, false);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "Failed to get overdrive table!\n");
|
||||
return ret;
|
||||
|
@ -2180,18 +2110,11 @@ static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABL
|
|||
break;
|
||||
case PP_OD_COMMIT_DPM_TABLE:
|
||||
navi10_dump_od_table(smu, od_table);
|
||||
ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, true);
|
||||
ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, true);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "Failed to import overdrive table!\n");
|
||||
return ret;
|
||||
}
|
||||
// no lock needed because smu_od_edit_dpm_table has it
|
||||
ret = smu_handle_task(smu, smu->smu_dpm.dpm_level,
|
||||
AMD_PP_TASK_READJUST_POWER_STATE,
|
||||
false);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
break;
|
||||
case PP_OD_EDIT_VDDC_CURVE:
|
||||
if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
|
||||
|
@ -2260,7 +2183,7 @@ static int navi10_run_btc(struct smu_context *smu)
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc, NULL);
|
||||
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RunBtc, NULL);
|
||||
if (ret)
|
||||
dev_err(smu->adev->dev, "RunBtc failed!\n");
|
||||
|
||||
|
@ -2272,9 +2195,9 @@ static int navi10_dummy_pstate_control(struct smu_context *smu, bool enable)
|
|||
int result = 0;
|
||||
|
||||
if (!enable)
|
||||
result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE, NULL);
|
||||
result = smu_cmn_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE, NULL);
|
||||
else
|
||||
result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE, NULL);
|
||||
result = smu_cmn_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE, NULL);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -2303,7 +2226,7 @@ static int navi10_disable_umc_cdr_12gbps_workaround(struct smu_context *smu)
|
|||
if (!navi10_need_umc_cdr_12gbps_workaround(smu->adev))
|
||||
return 0;
|
||||
|
||||
ret = smu_get_smc_version(smu, NULL, &smu_version);
|
||||
ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -2340,19 +2263,245 @@ static int navi10_disable_umc_cdr_12gbps_workaround(struct smu_context *smu)
|
|||
return navi10_dummy_pstate_control(smu, true);
|
||||
}
|
||||
|
||||
static void navi10_fill_i2c_req(SwI2cRequest_t *req, bool write,
|
||||
uint8_t address, uint32_t numbytes,
|
||||
uint8_t *data)
|
||||
{
|
||||
int i;
|
||||
|
||||
BUG_ON(numbytes > MAX_SW_I2C_COMMANDS);
|
||||
|
||||
req->I2CcontrollerPort = 0;
|
||||
req->I2CSpeed = 2;
|
||||
req->SlaveAddress = address;
|
||||
req->NumCmds = numbytes;
|
||||
|
||||
for (i = 0; i < numbytes; i++) {
|
||||
SwI2cCmd_t *cmd = &req->SwI2cCmds[i];
|
||||
|
||||
/* First 2 bytes are always write for lower 2b EEPROM address */
|
||||
if (i < 2)
|
||||
cmd->Cmd = 1;
|
||||
else
|
||||
cmd->Cmd = write;
|
||||
|
||||
|
||||
/* Add RESTART for read after address filled */
|
||||
cmd->CmdConfig |= (i == 2 && !write) ? CMDCONFIG_RESTART_MASK : 0;
|
||||
|
||||
/* Add STOP in the end */
|
||||
cmd->CmdConfig |= (i == (numbytes - 1)) ? CMDCONFIG_STOP_MASK : 0;
|
||||
|
||||
/* Fill with data regardless if read or write to simplify code */
|
||||
cmd->RegisterAddr = data[i];
|
||||
}
|
||||
}
|
||||
|
||||
static int navi10_i2c_read_data(struct i2c_adapter *control,
|
||||
uint8_t address,
|
||||
uint8_t *data,
|
||||
uint32_t numbytes)
|
||||
{
|
||||
uint32_t i, ret = 0;
|
||||
SwI2cRequest_t req;
|
||||
struct amdgpu_device *adev = to_amdgpu_device(control);
|
||||
struct smu_table_context *smu_table = &adev->smu.smu_table;
|
||||
struct smu_table *table = &smu_table->driver_table;
|
||||
|
||||
memset(&req, 0, sizeof(req));
|
||||
navi10_fill_i2c_req(&req, false, address, numbytes, data);
|
||||
|
||||
mutex_lock(&adev->smu.mutex);
|
||||
/* Now read data starting with that address */
|
||||
ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req,
|
||||
true);
|
||||
mutex_unlock(&adev->smu.mutex);
|
||||
|
||||
if (!ret) {
|
||||
SwI2cRequest_t *res = (SwI2cRequest_t *)table->cpu_addr;
|
||||
|
||||
/* Assume SMU fills res.SwI2cCmds[i].Data with read bytes */
|
||||
for (i = 0; i < numbytes; i++)
|
||||
data[i] = res->SwI2cCmds[i].Data;
|
||||
|
||||
dev_dbg(adev->dev, "navi10_i2c_read_data, address = %x, bytes = %d, data :",
|
||||
(uint16_t)address, numbytes);
|
||||
|
||||
print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
|
||||
8, 1, data, numbytes, false);
|
||||
} else
|
||||
dev_err(adev->dev, "navi10_i2c_read_data - error occurred :%x", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int navi10_i2c_write_data(struct i2c_adapter *control,
|
||||
uint8_t address,
|
||||
uint8_t *data,
|
||||
uint32_t numbytes)
|
||||
{
|
||||
uint32_t ret;
|
||||
SwI2cRequest_t req;
|
||||
struct amdgpu_device *adev = to_amdgpu_device(control);
|
||||
|
||||
memset(&req, 0, sizeof(req));
|
||||
navi10_fill_i2c_req(&req, true, address, numbytes, data);
|
||||
|
||||
mutex_lock(&adev->smu.mutex);
|
||||
ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, true);
|
||||
mutex_unlock(&adev->smu.mutex);
|
||||
|
||||
if (!ret) {
|
||||
dev_dbg(adev->dev, "navi10_i2c_write(), address = %x, bytes = %d , data: ",
|
||||
(uint16_t)address, numbytes);
|
||||
|
||||
print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
|
||||
8, 1, data, numbytes, false);
|
||||
/*
|
||||
* According to EEPROM spec there is a MAX of 10 ms required for
|
||||
* EEPROM to flush internal RX buffer after STOP was issued at the
|
||||
* end of write transaction. During this time the EEPROM will not be
|
||||
* responsive to any more commands - so wait a bit more.
|
||||
*/
|
||||
msleep(10);
|
||||
|
||||
} else
|
||||
dev_err(adev->dev, "navi10_i2c_write- error occurred :%x", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
struct i2c_msg *msgs, int num)
|
||||
{
|
||||
uint32_t i, j, ret, data_size, data_chunk_size, next_eeprom_addr = 0;
|
||||
uint8_t *data_ptr, data_chunk[MAX_SW_I2C_COMMANDS] = { 0 };
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
/*
|
||||
* SMU interface allows at most MAX_SW_I2C_COMMANDS bytes of data at
|
||||
* once and hence the data needs to be spliced into chunks and sent each
|
||||
* chunk separately
|
||||
*/
|
||||
data_size = msgs[i].len - 2;
|
||||
data_chunk_size = MAX_SW_I2C_COMMANDS - 2;
|
||||
next_eeprom_addr = (msgs[i].buf[0] << 8 & 0xff00) | (msgs[i].buf[1] & 0xff);
|
||||
data_ptr = msgs[i].buf + 2;
|
||||
|
||||
for (j = 0; j < data_size / data_chunk_size; j++) {
|
||||
/* Insert the EEPROM dest addess, bits 0-15 */
|
||||
data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
|
||||
data_chunk[1] = (next_eeprom_addr & 0xff);
|
||||
|
||||
if (msgs[i].flags & I2C_M_RD) {
|
||||
ret = navi10_i2c_read_data(i2c_adap,
|
||||
(uint8_t)msgs[i].addr,
|
||||
data_chunk, MAX_SW_I2C_COMMANDS);
|
||||
|
||||
memcpy(data_ptr, data_chunk + 2, data_chunk_size);
|
||||
} else {
|
||||
|
||||
memcpy(data_chunk + 2, data_ptr, data_chunk_size);
|
||||
|
||||
ret = navi10_i2c_write_data(i2c_adap,
|
||||
(uint8_t)msgs[i].addr,
|
||||
data_chunk, MAX_SW_I2C_COMMANDS);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
num = -EIO;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
next_eeprom_addr += data_chunk_size;
|
||||
data_ptr += data_chunk_size;
|
||||
}
|
||||
|
||||
if (data_size % data_chunk_size) {
|
||||
data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
|
||||
data_chunk[1] = (next_eeprom_addr & 0xff);
|
||||
|
||||
if (msgs[i].flags & I2C_M_RD) {
|
||||
ret = navi10_i2c_read_data(i2c_adap,
|
||||
(uint8_t)msgs[i].addr,
|
||||
data_chunk, (data_size % data_chunk_size) + 2);
|
||||
|
||||
memcpy(data_ptr, data_chunk + 2, data_size % data_chunk_size);
|
||||
} else {
|
||||
memcpy(data_chunk + 2, data_ptr, data_size % data_chunk_size);
|
||||
|
||||
ret = navi10_i2c_write_data(i2c_adap,
|
||||
(uint8_t)msgs[i].addr,
|
||||
data_chunk, (data_size % data_chunk_size) + 2);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
num = -EIO;
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fail:
|
||||
return num;
|
||||
}
|
||||
|
||||
static u32 navi10_i2c_func(struct i2c_adapter *adap)
|
||||
{
|
||||
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
|
||||
}
|
||||
|
||||
|
||||
static const struct i2c_algorithm navi10_i2c_algo = {
|
||||
.master_xfer = navi10_i2c_xfer,
|
||||
.functionality = navi10_i2c_func,
|
||||
};
|
||||
|
||||
static bool navi10_i2c_adapter_is_added(struct i2c_adapter *control)
|
||||
{
|
||||
struct amdgpu_device *adev = to_amdgpu_device(control);
|
||||
|
||||
return control->dev.parent == &adev->pdev->dev;
|
||||
}
|
||||
|
||||
static int navi10_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
|
||||
{
|
||||
struct amdgpu_device *adev = to_amdgpu_device(control);
|
||||
int res;
|
||||
|
||||
/* smu_i2c_eeprom_init may be called twice in sriov */
|
||||
if (navi10_i2c_adapter_is_added(control))
|
||||
return 0;
|
||||
|
||||
control->owner = THIS_MODULE;
|
||||
control->class = I2C_CLASS_SPD;
|
||||
control->dev.parent = &adev->pdev->dev;
|
||||
control->algo = &navi10_i2c_algo;
|
||||
snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
|
||||
|
||||
res = i2c_add_adapter(control);
|
||||
if (res)
|
||||
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static void navi10_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
|
||||
{
|
||||
if (!navi10_i2c_adapter_is_added(control))
|
||||
return;
|
||||
|
||||
i2c_del_adapter(control);
|
||||
}
|
||||
|
||||
|
||||
static const struct pptable_funcs navi10_ppt_funcs = {
|
||||
.tables_init = navi10_tables_init,
|
||||
.alloc_dpm_context = navi10_allocate_dpm_context,
|
||||
.get_smu_msg_index = navi10_get_smu_msg_index,
|
||||
.get_smu_clk_index = navi10_get_smu_clk_index,
|
||||
.get_smu_feature_index = navi10_get_smu_feature_index,
|
||||
.get_smu_table_index = navi10_get_smu_table_index,
|
||||
.get_smu_power_index = navi10_get_pwr_src_index,
|
||||
.get_workload_type = navi10_get_workload_type,
|
||||
.get_allowed_feature_mask = navi10_get_allowed_feature_mask,
|
||||
.set_default_dpm_table = navi10_set_default_dpm_table,
|
||||
.dpm_set_vcn_enable = navi10_dpm_set_vcn_enable,
|
||||
.dpm_set_jpeg_enable = navi10_dpm_set_jpeg_enable,
|
||||
.i2c_init = navi10_i2c_control_init,
|
||||
.i2c_fini = navi10_i2c_control_fini,
|
||||
.print_clk_levels = navi10_print_clk_levels,
|
||||
.force_clk_levels = navi10_force_clk_levels,
|
||||
.populate_umd_state_clk = navi10_populate_umd_state_clk,
|
||||
|
@ -2376,7 +2525,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
|
|||
.init_microcode = smu_v11_0_init_microcode,
|
||||
.load_microcode = smu_v11_0_load_microcode,
|
||||
.fini_microcode = smu_v11_0_fini_microcode,
|
||||
.init_smc_tables = smu_v11_0_init_smc_tables,
|
||||
.init_smc_tables = navi10_init_smc_tables,
|
||||
.fini_smc_tables = smu_v11_0_fini_smc_tables,
|
||||
.init_power = smu_v11_0_init_power,
|
||||
.fini_power = smu_v11_0_fini_power,
|
||||
|
@ -2384,15 +2533,18 @@ static const struct pptable_funcs navi10_ppt_funcs = {
|
|||
.setup_pptable = navi10_setup_pptable,
|
||||
.get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
|
||||
.check_fw_version = smu_v11_0_check_fw_version,
|
||||
.write_pptable = smu_v11_0_write_pptable,
|
||||
.write_pptable = smu_cmn_write_pptable,
|
||||
.set_driver_table_location = smu_v11_0_set_driver_table_location,
|
||||
.set_tool_table_location = smu_v11_0_set_tool_table_location,
|
||||
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
|
||||
.system_features_control = smu_v11_0_system_features_control,
|
||||
.send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
|
||||
.send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
|
||||
.send_smc_msg = smu_cmn_send_smc_msg,
|
||||
.init_display_count = smu_v11_0_init_display_count,
|
||||
.set_allowed_mask = smu_v11_0_set_allowed_mask,
|
||||
.get_enabled_mask = smu_v11_0_get_enabled_mask,
|
||||
.get_enabled_mask = smu_cmn_get_enabled_mask,
|
||||
.feature_is_enabled = smu_cmn_feature_is_enabled,
|
||||
.disable_all_features_with_exception = smu_cmn_disable_all_features_with_exception,
|
||||
.notify_display_change = smu_v11_0_notify_display_change,
|
||||
.set_power_limit = smu_v11_0_set_power_limit,
|
||||
.init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
|
||||
|
@ -2421,9 +2573,17 @@ static const struct pptable_funcs navi10_ppt_funcs = {
|
|||
.run_btc = navi10_run_btc,
|
||||
.disable_umc_cdr_12gbps_workaround = navi10_disable_umc_cdr_12gbps_workaround,
|
||||
.set_power_source = smu_v11_0_set_power_source,
|
||||
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
|
||||
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
|
||||
};
|
||||
|
||||
void navi10_set_ppt_funcs(struct smu_context *smu)
|
||||
{
|
||||
smu->ppt_funcs = &navi10_ppt_funcs;
|
||||
smu->message_map = navi10_message_map;
|
||||
smu->clock_map = navi10_clk_map;
|
||||
smu->feature_map = navi10_feature_mask_map;
|
||||
smu->table_map = navi10_table_map;
|
||||
smu->pwr_src_map = navi10_pwr_src_map;
|
||||
smu->workload_map = navi10_workload_map;
|
||||
}
|
||||
|
|
|
@ -21,13 +21,15 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#define SWSMU_CODE_LAYER_L2
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_smu.h"
|
||||
#include "smu_internal.h"
|
||||
#include "smu_v12_0_ppsmc.h"
|
||||
#include "smu12_driver_if.h"
|
||||
#include "smu_v12_0.h"
|
||||
#include "renoir_ppt.h"
|
||||
#include "smu_cmn.h"
|
||||
|
||||
/*
|
||||
* DO NOT use these for err/warn/info/debug messages.
|
||||
|
@ -39,83 +41,71 @@
|
|||
#undef pr_info
|
||||
#undef pr_debug
|
||||
|
||||
#define CLK_MAP(clk, index) \
|
||||
[SMU_##clk] = {1, (index)}
|
||||
|
||||
#define MSG_MAP(msg, index) \
|
||||
[SMU_MSG_##msg] = {1, (index)}
|
||||
|
||||
#define TAB_MAP_VALID(tab) \
|
||||
[SMU_TABLE_##tab] = {1, TABLE_##tab}
|
||||
|
||||
#define TAB_MAP_INVALID(tab) \
|
||||
[SMU_TABLE_##tab] = {0, TABLE_##tab}
|
||||
|
||||
static struct smu_12_0_cmn2aisc_mapping renoir_message_map[SMU_MSG_MAX_COUNT] = {
|
||||
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage),
|
||||
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion),
|
||||
MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion),
|
||||
MSG_MAP(PowerUpGfx, PPSMC_MSG_PowerUpGfx),
|
||||
MSG_MAP(AllowGfxOff, PPSMC_MSG_EnableGfxOff),
|
||||
MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisableGfxOff),
|
||||
MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile),
|
||||
MSG_MAP(PowerUpIspByTile, PPSMC_MSG_PowerUpIspByTile),
|
||||
MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn),
|
||||
MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn),
|
||||
MSG_MAP(PowerDownSdma, PPSMC_MSG_PowerDownSdma),
|
||||
MSG_MAP(PowerUpSdma, PPSMC_MSG_PowerUpSdma),
|
||||
MSG_MAP(SetHardMinIspclkByFreq, PPSMC_MSG_SetHardMinIspclkByFreq),
|
||||
MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn),
|
||||
MSG_MAP(Spare1, PPSMC_MSG_spare1),
|
||||
MSG_MAP(Spare2, PPSMC_MSG_spare2),
|
||||
MSG_MAP(SetAllowFclkSwitch, PPSMC_MSG_SetAllowFclkSwitch),
|
||||
MSG_MAP(SetMinVideoGfxclkFreq, PPSMC_MSG_SetMinVideoGfxclkFreq),
|
||||
MSG_MAP(ActiveProcessNotify, PPSMC_MSG_ActiveProcessNotify),
|
||||
MSG_MAP(SetCustomPolicy, PPSMC_MSG_SetCustomPolicy),
|
||||
MSG_MAP(SetVideoFps, PPSMC_MSG_SetVideoFps),
|
||||
MSG_MAP(NumOfDisplays, PPSMC_MSG_SetDisplayCount),
|
||||
MSG_MAP(QueryPowerLimit, PPSMC_MSG_QueryPowerLimit),
|
||||
MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh),
|
||||
MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow),
|
||||
MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram),
|
||||
MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu),
|
||||
MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDeviceDriverReset),
|
||||
MSG_MAP(SetGfxclkOverdriveByFreqVid, PPSMC_MSG_SetGfxclkOverdriveByFreqVid),
|
||||
MSG_MAP(SetHardMinDcfclkByFreq, PPSMC_MSG_SetHardMinDcfclkByFreq),
|
||||
MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq),
|
||||
MSG_MAP(ControlIgpuATS, PPSMC_MSG_ControlIgpuATS),
|
||||
MSG_MAP(SetMinVideoFclkFreq, PPSMC_MSG_SetMinVideoFclkFreq),
|
||||
MSG_MAP(SetMinDeepSleepDcfclk, PPSMC_MSG_SetMinDeepSleepDcfclk),
|
||||
MSG_MAP(ForcePowerDownGfx, PPSMC_MSG_ForcePowerDownGfx),
|
||||
MSG_MAP(SetPhyclkVoltageByFreq, PPSMC_MSG_SetPhyclkVoltageByFreq),
|
||||
MSG_MAP(SetDppclkVoltageByFreq, PPSMC_MSG_SetDppclkVoltageByFreq),
|
||||
MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn),
|
||||
MSG_MAP(EnablePostCode, PPSMC_MSG_EnablePostCode),
|
||||
MSG_MAP(GetGfxclkFrequency, PPSMC_MSG_GetGfxclkFrequency),
|
||||
MSG_MAP(GetFclkFrequency, PPSMC_MSG_GetFclkFrequency),
|
||||
MSG_MAP(GetMinGfxclkFrequency, PPSMC_MSG_GetMinGfxclkFrequency),
|
||||
MSG_MAP(GetMaxGfxclkFrequency, PPSMC_MSG_GetMaxGfxclkFrequency),
|
||||
MSG_MAP(SoftReset, PPSMC_MSG_SoftReset),
|
||||
MSG_MAP(SetGfxCGPG, PPSMC_MSG_SetGfxCGPG),
|
||||
MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk),
|
||||
MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk),
|
||||
MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq),
|
||||
MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq),
|
||||
MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn),
|
||||
MSG_MAP(PowerGateMmHub, PPSMC_MSG_PowerGateMmHub),
|
||||
MSG_MAP(UpdatePmeRestore, PPSMC_MSG_UpdatePmeRestore),
|
||||
MSG_MAP(GpuChangeState, PPSMC_MSG_GpuChangeState),
|
||||
MSG_MAP(SetPowerLimitPercentage, PPSMC_MSG_SetPowerLimitPercentage),
|
||||
MSG_MAP(ForceGfxContentSave, PPSMC_MSG_ForceGfxContentSave),
|
||||
MSG_MAP(EnableTmdp48MHzRefclkPwrDown, PPSMC_MSG_EnableTmdp48MHzRefclkPwrDown),
|
||||
MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg),
|
||||
MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg),
|
||||
MSG_MAP(PowerGateAtHub, PPSMC_MSG_PowerGateAtHub),
|
||||
MSG_MAP(SetSoftMinJpeg, PPSMC_MSG_SetSoftMinJpeg),
|
||||
MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq),
|
||||
static struct cmn2asic_msg_mapping renoir_message_map[SMU_MSG_MAX_COUNT] = {
|
||||
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
|
||||
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
|
||||
MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
|
||||
MSG_MAP(PowerUpGfx, PPSMC_MSG_PowerUpGfx, 1),
|
||||
MSG_MAP(AllowGfxOff, PPSMC_MSG_EnableGfxOff, 1),
|
||||
MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisableGfxOff, 1),
|
||||
MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile, 1),
|
||||
MSG_MAP(PowerUpIspByTile, PPSMC_MSG_PowerUpIspByTile, 1),
|
||||
MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1),
|
||||
MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 1),
|
||||
MSG_MAP(PowerDownSdma, PPSMC_MSG_PowerDownSdma, 1),
|
||||
MSG_MAP(PowerUpSdma, PPSMC_MSG_PowerUpSdma, 1),
|
||||
MSG_MAP(SetHardMinIspclkByFreq, PPSMC_MSG_SetHardMinIspclkByFreq, 1),
|
||||
MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 1),
|
||||
MSG_MAP(Spare1, PPSMC_MSG_spare1, 1),
|
||||
MSG_MAP(Spare2, PPSMC_MSG_spare2, 1),
|
||||
MSG_MAP(SetAllowFclkSwitch, PPSMC_MSG_SetAllowFclkSwitch, 1),
|
||||
MSG_MAP(SetMinVideoGfxclkFreq, PPSMC_MSG_SetMinVideoGfxclkFreq, 1),
|
||||
MSG_MAP(ActiveProcessNotify, PPSMC_MSG_ActiveProcessNotify, 1),
|
||||
MSG_MAP(SetCustomPolicy, PPSMC_MSG_SetCustomPolicy, 1),
|
||||
MSG_MAP(SetVideoFps, PPSMC_MSG_SetVideoFps, 1),
|
||||
MSG_MAP(NumOfDisplays, PPSMC_MSG_SetDisplayCount, 1),
|
||||
MSG_MAP(QueryPowerLimit, PPSMC_MSG_QueryPowerLimit, 1),
|
||||
MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
|
||||
MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
|
||||
MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1),
|
||||
MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 1),
|
||||
MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDeviceDriverReset, 1),
|
||||
MSG_MAP(SetGfxclkOverdriveByFreqVid, PPSMC_MSG_SetGfxclkOverdriveByFreqVid, 1),
|
||||
MSG_MAP(SetHardMinDcfclkByFreq, PPSMC_MSG_SetHardMinDcfclkByFreq, 1),
|
||||
MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq, 1),
|
||||
MSG_MAP(ControlIgpuATS, PPSMC_MSG_ControlIgpuATS, 1),
|
||||
MSG_MAP(SetMinVideoFclkFreq, PPSMC_MSG_SetMinVideoFclkFreq, 1),
|
||||
MSG_MAP(SetMinDeepSleepDcfclk, PPSMC_MSG_SetMinDeepSleepDcfclk, 1),
|
||||
MSG_MAP(ForcePowerDownGfx, PPSMC_MSG_ForcePowerDownGfx, 1),
|
||||
MSG_MAP(SetPhyclkVoltageByFreq, PPSMC_MSG_SetPhyclkVoltageByFreq, 1),
|
||||
MSG_MAP(SetDppclkVoltageByFreq, PPSMC_MSG_SetDppclkVoltageByFreq, 1),
|
||||
MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn, 1),
|
||||
MSG_MAP(EnablePostCode, PPSMC_MSG_EnablePostCode, 1),
|
||||
MSG_MAP(GetGfxclkFrequency, PPSMC_MSG_GetGfxclkFrequency, 1),
|
||||
MSG_MAP(GetFclkFrequency, PPSMC_MSG_GetFclkFrequency, 1),
|
||||
MSG_MAP(GetMinGfxclkFrequency, PPSMC_MSG_GetMinGfxclkFrequency, 1),
|
||||
MSG_MAP(GetMaxGfxclkFrequency, PPSMC_MSG_GetMaxGfxclkFrequency, 1),
|
||||
MSG_MAP(SoftReset, PPSMC_MSG_SoftReset, 1),
|
||||
MSG_MAP(SetGfxCGPG, PPSMC_MSG_SetGfxCGPG, 1),
|
||||
MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 1),
|
||||
MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk, 1),
|
||||
MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq, 1),
|
||||
MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq, 1),
|
||||
MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 1),
|
||||
MSG_MAP(PowerGateMmHub, PPSMC_MSG_PowerGateMmHub, 1),
|
||||
MSG_MAP(UpdatePmeRestore, PPSMC_MSG_UpdatePmeRestore, 1),
|
||||
MSG_MAP(GpuChangeState, PPSMC_MSG_GpuChangeState, 1),
|
||||
MSG_MAP(SetPowerLimitPercentage, PPSMC_MSG_SetPowerLimitPercentage, 1),
|
||||
MSG_MAP(ForceGfxContentSave, PPSMC_MSG_ForceGfxContentSave, 1),
|
||||
MSG_MAP(EnableTmdp48MHzRefclkPwrDown, PPSMC_MSG_EnableTmdp48MHzRefclkPwrDown, 1),
|
||||
MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 1),
|
||||
MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 1),
|
||||
MSG_MAP(PowerGateAtHub, PPSMC_MSG_PowerGateAtHub, 1),
|
||||
MSG_MAP(SetSoftMinJpeg, PPSMC_MSG_SetSoftMinJpeg, 1),
|
||||
MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq, 1),
|
||||
};
|
||||
|
||||
static struct smu_12_0_cmn2aisc_mapping renoir_clk_map[SMU_CLK_COUNT] = {
|
||||
static struct cmn2asic_mapping renoir_clk_map[SMU_CLK_COUNT] = {
|
||||
CLK_MAP(GFXCLK, CLOCK_GFXCLK),
|
||||
CLK_MAP(SCLK, CLOCK_GFXCLK),
|
||||
CLK_MAP(SOCCLK, CLOCK_SOCCLK),
|
||||
|
@ -123,55 +113,20 @@ static struct smu_12_0_cmn2aisc_mapping renoir_clk_map[SMU_CLK_COUNT] = {
|
|||
CLK_MAP(MCLK, CLOCK_FCLK),
|
||||
};
|
||||
|
||||
static struct smu_12_0_cmn2aisc_mapping renoir_table_map[SMU_TABLE_COUNT] = {
|
||||
static struct cmn2asic_mapping renoir_table_map[SMU_TABLE_COUNT] = {
|
||||
TAB_MAP_VALID(WATERMARKS),
|
||||
TAB_MAP_INVALID(CUSTOM_DPM),
|
||||
TAB_MAP_VALID(DPMCLOCKS),
|
||||
TAB_MAP_VALID(SMU_METRICS),
|
||||
};
|
||||
|
||||
static int renoir_get_smu_msg_index(struct smu_context *smc, uint32_t index)
|
||||
{
|
||||
struct smu_12_0_cmn2aisc_mapping mapping;
|
||||
|
||||
if (index >= SMU_MSG_MAX_COUNT)
|
||||
return -EINVAL;
|
||||
|
||||
mapping = renoir_message_map[index];
|
||||
if (!(mapping.valid_mapping))
|
||||
return -EINVAL;
|
||||
|
||||
return mapping.map_to;
|
||||
}
|
||||
|
||||
static int renoir_get_smu_clk_index(struct smu_context *smc, uint32_t index)
|
||||
{
|
||||
struct smu_12_0_cmn2aisc_mapping mapping;
|
||||
|
||||
if (index >= SMU_CLK_COUNT)
|
||||
return -EINVAL;
|
||||
|
||||
mapping = renoir_clk_map[index];
|
||||
if (!(mapping.valid_mapping)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return mapping.map_to;
|
||||
}
|
||||
|
||||
static int renoir_get_smu_table_index(struct smu_context *smc, uint32_t index)
|
||||
{
|
||||
struct smu_12_0_cmn2aisc_mapping mapping;
|
||||
|
||||
if (index >= SMU_TABLE_COUNT)
|
||||
return -EINVAL;
|
||||
|
||||
mapping = renoir_table_map[index];
|
||||
if (!(mapping.valid_mapping))
|
||||
return -EINVAL;
|
||||
|
||||
return mapping.map_to;
|
||||
}
|
||||
static struct cmn2asic_mapping renoir_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
|
||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
|
||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
|
||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
|
||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
|
||||
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
|
||||
};
|
||||
|
||||
static int renoir_get_metrics_table(struct smu_context *smu,
|
||||
SmuMetrics_t *metrics_table)
|
||||
|
@ -181,7 +136,7 @@ static int renoir_get_metrics_table(struct smu_context *smu,
|
|||
|
||||
mutex_lock(&smu->metrics_lock);
|
||||
if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) {
|
||||
ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
|
||||
ret = smu_cmn_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
|
||||
(void *)smu_table->metrics_table, false);
|
||||
if (ret) {
|
||||
dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
|
||||
|
@ -197,9 +152,10 @@ static int renoir_get_metrics_table(struct smu_context *smu,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int renoir_tables_init(struct smu_context *smu, struct smu_table *tables)
|
||||
static int renoir_init_smc_tables(struct smu_context *smu)
|
||||
{
|
||||
struct smu_table_context *smu_table = &smu->smu_table;
|
||||
struct smu_table *tables = smu_table->tables;
|
||||
|
||||
SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
|
@ -301,7 +257,7 @@ static int renoir_get_dpm_ultimate_freq(struct smu_context *smu,
|
|||
uint32_t mclk_mask, soc_mask;
|
||||
uint32_t clock_limit;
|
||||
|
||||
if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
|
||||
if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
|
||||
switch (clk_type) {
|
||||
case SMU_MCLK:
|
||||
case SMU_UCLK:
|
||||
|
@ -340,7 +296,7 @@ static int renoir_get_dpm_ultimate_freq(struct smu_context *smu,
|
|||
switch (clk_type) {
|
||||
case SMU_GFXCLK:
|
||||
case SMU_SCLK:
|
||||
ret = smu_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency, max);
|
||||
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency, max);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "Attempt to get max GX frequency from SMC Failed !\n");
|
||||
goto failed;
|
||||
|
@ -368,7 +324,7 @@ static int renoir_get_dpm_ultimate_freq(struct smu_context *smu,
|
|||
switch (clk_type) {
|
||||
case SMU_GFXCLK:
|
||||
case SMU_SCLK:
|
||||
ret = smu_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency, min);
|
||||
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency, min);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "Attempt to get min GX frequency from SMC Failed !\n");
|
||||
goto failed;
|
||||
|
@ -509,15 +465,15 @@ static int renoir_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
|
|||
|
||||
if (enable) {
|
||||
/* vcn dpm on is a prerequisite for vcn power gate messages */
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
power_gate->vcn_gated = false;
|
||||
} else {
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
|
||||
ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
|
||||
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -534,15 +490,15 @@ static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
|
|||
int ret = 0;
|
||||
|
||||
if (enable) {
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
power_gate->jpeg_gated = false;
|
||||
} else {
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -563,7 +519,9 @@ static int renoir_get_current_clk_freq_by_table(struct smu_context *smu,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
clk_id = smu_clk_get_index(smu, clk_type);
|
||||
clk_id = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_CLK,
|
||||
clk_type);
|
||||
if (clk_id < 0)
|
||||
return clk_id;
|
||||
|
||||
|
@ -615,7 +573,7 @@ static int renoir_unforce_dpm_levels(struct smu_context *smu) {
|
|||
};
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) {
|
||||
if (!smu_feature_is_enabled(smu, clk_feature_map[i].feature))
|
||||
if (!smu_cmn_feature_is_enabled(smu, clk_feature_map[i].feature))
|
||||
continue;
|
||||
|
||||
clk_type = clk_feature_map[i].clk_type;
|
||||
|
@ -676,35 +634,6 @@ static int renoir_get_current_activity_percent(struct smu_context *smu,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int renoir_get_workload_type(struct smu_context *smu, uint32_t profile)
|
||||
{
|
||||
|
||||
uint32_t pplib_workload = 0;
|
||||
|
||||
switch (profile) {
|
||||
case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
|
||||
pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
|
||||
break;
|
||||
case PP_SMC_POWER_PROFILE_CUSTOM:
|
||||
pplib_workload = WORKLOAD_PPLIB_COUNT;
|
||||
break;
|
||||
case PP_SMC_POWER_PROFILE_VIDEO:
|
||||
pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
|
||||
break;
|
||||
case PP_SMC_POWER_PROFILE_VR:
|
||||
pplib_workload = WORKLOAD_PPLIB_VR_BIT;
|
||||
break;
|
||||
case PP_SMC_POWER_PROFILE_COMPUTE:
|
||||
pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return pplib_workload;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* This interface get dpm clock table for dc
|
||||
*/
|
||||
|
@ -760,13 +689,13 @@ static int renoir_force_clk_levels(struct smu_context *smu,
|
|||
ret = renoir_get_dpm_ultimate_freq(smu, SMU_GFXCLK, &min_freq, &max_freq);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
|
||||
soft_max_level == 0 ? min_freq :
|
||||
soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq,
|
||||
NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
|
||||
soft_min_level == 2 ? max_freq :
|
||||
soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq,
|
||||
NULL);
|
||||
|
@ -780,10 +709,10 @@ static int renoir_force_clk_levels(struct smu_context *smu,
|
|||
ret = renoir_get_dpm_clk_limited(smu, clk_type, soft_max_level, &max_freq);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq, NULL);
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq, NULL);
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
|
@ -795,10 +724,10 @@ static int renoir_force_clk_levels(struct smu_context *smu,
|
|||
ret = renoir_get_dpm_clk_limited(smu, clk_type, soft_max_level, &max_freq);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq, NULL);
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq, NULL);
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
|
@ -820,7 +749,9 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u
|
|||
}
|
||||
|
||||
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
|
||||
workload_type = smu_workload_get_type(smu, profile_mode);
|
||||
workload_type = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_WORKLOAD,
|
||||
profile_mode);
|
||||
if (workload_type < 0) {
|
||||
/*
|
||||
* TODO: If some case need switch to powersave/default power mode
|
||||
|
@ -830,7 +761,7 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
|
||||
1 << workload_type,
|
||||
NULL);
|
||||
if (ret) {
|
||||
|
@ -912,60 +843,59 @@ static int renoir_set_performance_level(struct smu_context *smu,
|
|||
*/
|
||||
static int renoir_set_watermarks_table(
|
||||
struct smu_context *smu,
|
||||
void *watermarks,
|
||||
struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
|
||||
{
|
||||
int i;
|
||||
Watermarks_t *table = smu->smu_table.watermarks_table;
|
||||
int ret = 0;
|
||||
Watermarks_t *table = watermarks;
|
||||
int i;
|
||||
|
||||
if (!table || !clock_ranges)
|
||||
return -EINVAL;
|
||||
if (clock_ranges) {
|
||||
if (clock_ranges->num_wm_dmif_sets > 4 ||
|
||||
clock_ranges->num_wm_mcif_sets > 4)
|
||||
return -EINVAL;
|
||||
|
||||
if (clock_ranges->num_wm_dmif_sets > 4 ||
|
||||
clock_ranges->num_wm_mcif_sets > 4)
|
||||
return -EINVAL;
|
||||
/* save into smu->smu_table.tables[SMU_TABLE_WATERMARKS]->cpu_addr*/
|
||||
for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
|
||||
table->WatermarkRow[WM_DCFCLK][i].MinClock =
|
||||
cpu_to_le16((uint16_t)
|
||||
(clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz));
|
||||
table->WatermarkRow[WM_DCFCLK][i].MaxClock =
|
||||
cpu_to_le16((uint16_t)
|
||||
(clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz));
|
||||
table->WatermarkRow[WM_DCFCLK][i].MinMclk =
|
||||
cpu_to_le16((uint16_t)
|
||||
(clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz));
|
||||
table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
|
||||
cpu_to_le16((uint16_t)
|
||||
(clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz));
|
||||
table->WatermarkRow[WM_DCFCLK][i].WmSetting = (uint8_t)
|
||||
clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
|
||||
}
|
||||
|
||||
/* save into smu->smu_table.tables[SMU_TABLE_WATERMARKS]->cpu_addr*/
|
||||
for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
|
||||
table->WatermarkRow[WM_DCFCLK][i].MinClock =
|
||||
cpu_to_le16((uint16_t)
|
||||
(clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz));
|
||||
table->WatermarkRow[WM_DCFCLK][i].MaxClock =
|
||||
cpu_to_le16((uint16_t)
|
||||
(clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz));
|
||||
table->WatermarkRow[WM_DCFCLK][i].MinMclk =
|
||||
cpu_to_le16((uint16_t)
|
||||
(clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz));
|
||||
table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
|
||||
cpu_to_le16((uint16_t)
|
||||
(clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz));
|
||||
table->WatermarkRow[WM_DCFCLK][i].WmSetting = (uint8_t)
|
||||
clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
|
||||
for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
|
||||
table->WatermarkRow[WM_SOCCLK][i].MinClock =
|
||||
cpu_to_le16((uint16_t)
|
||||
(clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz));
|
||||
table->WatermarkRow[WM_SOCCLK][i].MaxClock =
|
||||
cpu_to_le16((uint16_t)
|
||||
(clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz));
|
||||
table->WatermarkRow[WM_SOCCLK][i].MinMclk =
|
||||
cpu_to_le16((uint16_t)
|
||||
(clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz));
|
||||
table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
|
||||
cpu_to_le16((uint16_t)
|
||||
(clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz));
|
||||
table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
|
||||
clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
|
||||
}
|
||||
|
||||
smu->watermarks_bitmap |= WATERMARKS_EXIST;
|
||||
}
|
||||
|
||||
for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
|
||||
table->WatermarkRow[WM_SOCCLK][i].MinClock =
|
||||
cpu_to_le16((uint16_t)
|
||||
(clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz));
|
||||
table->WatermarkRow[WM_SOCCLK][i].MaxClock =
|
||||
cpu_to_le16((uint16_t)
|
||||
(clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz));
|
||||
table->WatermarkRow[WM_SOCCLK][i].MinMclk =
|
||||
cpu_to_le16((uint16_t)
|
||||
(clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz));
|
||||
table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
|
||||
cpu_to_le16((uint16_t)
|
||||
(clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz));
|
||||
table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
|
||||
clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
|
||||
}
|
||||
|
||||
smu->watermarks_bitmap |= WATERMARKS_EXIST;
|
||||
|
||||
/* pass data to smu controller */
|
||||
if (!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
|
||||
ret = smu_write_watermarks_table(smu);
|
||||
if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
|
||||
!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
|
||||
ret = smu_cmn_write_watermarks_table(smu);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "Failed to update WMTABLE!");
|
||||
return ret;
|
||||
|
@ -998,7 +928,9 @@ static int renoir_get_power_profile_mode(struct smu_context *smu,
|
|||
* Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
|
||||
* Not all profile modes are supported on arcturus.
|
||||
*/
|
||||
workload_type = smu_workload_get_type(smu, i);
|
||||
workload_type = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_WORKLOAD,
|
||||
i);
|
||||
if (workload_type < 0)
|
||||
continue;
|
||||
|
||||
|
@ -1064,16 +996,11 @@ static bool renoir_is_dpm_running(struct smu_context *smu)
|
|||
}
|
||||
|
||||
static const struct pptable_funcs renoir_ppt_funcs = {
|
||||
.get_smu_msg_index = renoir_get_smu_msg_index,
|
||||
.get_smu_clk_index = renoir_get_smu_clk_index,
|
||||
.get_smu_table_index = renoir_get_smu_table_index,
|
||||
.tables_init = renoir_tables_init,
|
||||
.set_power_state = NULL,
|
||||
.print_clk_levels = renoir_print_clk_levels,
|
||||
.get_current_power_state = renoir_get_current_power_state,
|
||||
.dpm_set_vcn_enable = renoir_dpm_set_vcn_enable,
|
||||
.dpm_set_jpeg_enable = renoir_dpm_set_jpeg_enable,
|
||||
.get_workload_type = renoir_get_workload_type,
|
||||
.force_clk_levels = renoir_force_clk_levels,
|
||||
.set_power_profile_mode = renoir_set_power_profile_mode,
|
||||
.set_performance_level = renoir_set_performance_level,
|
||||
|
@ -1084,23 +1011,33 @@ static const struct pptable_funcs renoir_ppt_funcs = {
|
|||
.check_fw_status = smu_v12_0_check_fw_status,
|
||||
.check_fw_version = smu_v12_0_check_fw_version,
|
||||
.powergate_sdma = smu_v12_0_powergate_sdma,
|
||||
.send_smc_msg_with_param = smu_v12_0_send_msg_with_param,
|
||||
.send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
|
||||
.send_smc_msg = smu_cmn_send_smc_msg,
|
||||
.set_gfx_cgpg = smu_v12_0_set_gfx_cgpg,
|
||||
.gfx_off_control = smu_v12_0_gfx_off_control,
|
||||
.init_smc_tables = smu_v12_0_init_smc_tables,
|
||||
.get_gfx_off_status = smu_v12_0_get_gfxoff_status,
|
||||
.init_smc_tables = renoir_init_smc_tables,
|
||||
.fini_smc_tables = smu_v12_0_fini_smc_tables,
|
||||
.set_default_dpm_table = smu_v12_0_set_default_dpm_tables,
|
||||
.get_enabled_mask = smu_v12_0_get_enabled_mask,
|
||||
.get_enabled_mask = smu_cmn_get_enabled_mask,
|
||||
.feature_is_enabled = smu_cmn_feature_is_enabled,
|
||||
.disable_all_features_with_exception = smu_cmn_disable_all_features_with_exception,
|
||||
.get_dpm_ultimate_freq = renoir_get_dpm_ultimate_freq,
|
||||
.mode2_reset = smu_v12_0_mode2_reset,
|
||||
.set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range,
|
||||
.set_driver_table_location = smu_v12_0_set_driver_table_location,
|
||||
.is_dpm_running = renoir_is_dpm_running,
|
||||
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
|
||||
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
|
||||
};
|
||||
|
||||
void renoir_set_ppt_funcs(struct smu_context *smu)
|
||||
{
|
||||
smu->ppt_funcs = &renoir_ppt_funcs;
|
||||
smu->message_map = renoir_message_map;
|
||||
smu->clock_map = renoir_clk_map;
|
||||
smu->table_map = renoir_table_map;
|
||||
smu->workload_map = renoir_workload_map;
|
||||
smu->smc_driver_if_version = SMU12_DRIVER_IF_VERSION;
|
||||
smu->is_apu = true;
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,633 @@
|
|||
/*
|
||||
* Copyright 2020 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#define SWSMU_CODE_LAYER_L4
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_smu.h"
|
||||
#include "smu_cmn.h"
|
||||
#include "soc15_common.h"
|
||||
|
||||
/*
|
||||
* DO NOT use these for err/warn/info/debug messages.
|
||||
* Use dev_err, dev_warn, dev_info and dev_dbg instead.
|
||||
* They are more MGPU friendly.
|
||||
*/
|
||||
#undef pr_err
|
||||
#undef pr_warn
|
||||
#undef pr_info
|
||||
#undef pr_debug
|
||||
|
||||
/*
|
||||
* Although these are defined in each ASIC's specific header file.
|
||||
* They share the same definitions and values. That makes common
|
||||
* APIs for SMC messages issuing for all ASICs possible.
|
||||
*/
|
||||
#define mmMP1_SMN_C2PMSG_66 0x0282
|
||||
#define mmMP1_SMN_C2PMSG_66_BASE_IDX 0
|
||||
|
||||
#define mmMP1_SMN_C2PMSG_82 0x0292
|
||||
#define mmMP1_SMN_C2PMSG_82_BASE_IDX 0
|
||||
|
||||
#define mmMP1_SMN_C2PMSG_90 0x029a
|
||||
#define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
|
||||
|
||||
#define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
|
||||
|
||||
#undef __SMU_DUMMY_MAP
|
||||
#define __SMU_DUMMY_MAP(type) #type
|
||||
static const char* __smu_message_names[] = {
|
||||
SMU_MESSAGE_TYPES
|
||||
};
|
||||
|
||||
static const char *smu_get_message_name(struct smu_context *smu,
|
||||
enum smu_message_type type)
|
||||
{
|
||||
if (type < 0 || type >= SMU_MSG_MAX_COUNT)
|
||||
return "unknown smu message";
|
||||
|
||||
return __smu_message_names[type];
|
||||
}
|
||||
|
||||
static void smu_cmn_send_msg_without_waiting(struct smu_context *smu,
|
||||
uint16_t msg)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
||||
WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
|
||||
}
|
||||
|
||||
static void smu_cmn_read_arg(struct smu_context *smu,
|
||||
uint32_t *arg)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
||||
*arg = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82);
|
||||
}
|
||||
|
||||
static int smu_cmn_wait_for_response(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
uint32_t cur_value, i, timeout = adev->usec_timeout * 10;
|
||||
|
||||
for (i = 0; i < timeout; i++) {
|
||||
cur_value = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90);
|
||||
if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
|
||||
return cur_value == 0x1 ? 0 : -EIO;
|
||||
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
/* timeout means wrong logic */
|
||||
if (i == timeout)
|
||||
return -ETIME;
|
||||
|
||||
return RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
|
||||
}
|
||||
|
||||
int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
|
||||
enum smu_message_type msg,
|
||||
uint32_t param,
|
||||
uint32_t *read_arg)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0, index = 0;
|
||||
|
||||
index = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_MSG,
|
||||
msg);
|
||||
if (index < 0)
|
||||
return index == -EACCES ? 0 : index;
|
||||
|
||||
mutex_lock(&smu->message_lock);
|
||||
ret = smu_cmn_wait_for_response(smu);
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "Msg issuing pre-check failed and "
|
||||
"SMU may be not in the right state!\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
|
||||
|
||||
WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
|
||||
|
||||
smu_cmn_send_msg_without_waiting(smu, (uint16_t)index);
|
||||
|
||||
ret = smu_cmn_wait_for_response(smu);
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
|
||||
smu_get_message_name(smu, msg), index, param, ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (read_arg)
|
||||
smu_cmn_read_arg(smu, read_arg);
|
||||
|
||||
out:
|
||||
mutex_unlock(&smu->message_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int smu_cmn_send_smc_msg(struct smu_context *smu,
|
||||
enum smu_message_type msg,
|
||||
uint32_t *read_arg)
|
||||
{
|
||||
return smu_cmn_send_smc_msg_with_param(smu,
|
||||
msg,
|
||||
0,
|
||||
read_arg);
|
||||
}
|
||||
|
||||
int smu_cmn_to_asic_specific_index(struct smu_context *smu,
|
||||
enum smu_cmn2asic_mapping_type type,
|
||||
uint32_t index)
|
||||
{
|
||||
struct cmn2asic_msg_mapping msg_mapping;
|
||||
struct cmn2asic_mapping mapping;
|
||||
|
||||
switch (type) {
|
||||
case CMN2ASIC_MAPPING_MSG:
|
||||
if (index > SMU_MSG_MAX_COUNT ||
|
||||
!smu->message_map)
|
||||
return -EINVAL;
|
||||
|
||||
msg_mapping = smu->message_map[index];
|
||||
if (!msg_mapping.valid_mapping)
|
||||
return -EINVAL;
|
||||
|
||||
if (amdgpu_sriov_vf(smu->adev) &&
|
||||
!msg_mapping.valid_in_vf)
|
||||
return -EACCES;
|
||||
|
||||
return msg_mapping.map_to;
|
||||
|
||||
case CMN2ASIC_MAPPING_CLK:
|
||||
if (index > SMU_CLK_COUNT ||
|
||||
!smu->clock_map)
|
||||
return -EINVAL;
|
||||
|
||||
mapping = smu->clock_map[index];
|
||||
if (!mapping.valid_mapping)
|
||||
return -EINVAL;
|
||||
|
||||
return mapping.map_to;
|
||||
|
||||
case CMN2ASIC_MAPPING_FEATURE:
|
||||
if (index > SMU_FEATURE_COUNT ||
|
||||
!smu->feature_map)
|
||||
return -EINVAL;
|
||||
|
||||
mapping = smu->feature_map[index];
|
||||
if (!mapping.valid_mapping)
|
||||
return -EINVAL;
|
||||
|
||||
return mapping.map_to;
|
||||
|
||||
case CMN2ASIC_MAPPING_TABLE:
|
||||
if (index > SMU_TABLE_COUNT ||
|
||||
!smu->table_map)
|
||||
return -EINVAL;
|
||||
|
||||
mapping = smu->table_map[index];
|
||||
if (!mapping.valid_mapping)
|
||||
return -EINVAL;
|
||||
|
||||
return mapping.map_to;
|
||||
|
||||
case CMN2ASIC_MAPPING_PWR:
|
||||
if (index > SMU_POWER_SOURCE_COUNT ||
|
||||
!smu->pwr_src_map)
|
||||
return -EINVAL;
|
||||
|
||||
mapping = smu->pwr_src_map[index];
|
||||
if (!mapping.valid_mapping)
|
||||
return -EINVAL;
|
||||
|
||||
return mapping.map_to;
|
||||
|
||||
case CMN2ASIC_MAPPING_WORKLOAD:
|
||||
if (index > PP_SMC_POWER_PROFILE_CUSTOM ||
|
||||
!smu->workload_map)
|
||||
return -EINVAL;
|
||||
|
||||
mapping = smu->workload_map[index];
|
||||
if (!mapping.valid_mapping)
|
||||
return -EINVAL;
|
||||
|
||||
return mapping.map_to;
|
||||
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
int smu_cmn_feature_is_supported(struct smu_context *smu,
|
||||
enum smu_feature_mask mask)
|
||||
{
|
||||
struct smu_feature *feature = &smu->smu_feature;
|
||||
int feature_id;
|
||||
int ret = 0;
|
||||
|
||||
feature_id = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_FEATURE,
|
||||
mask);
|
||||
if (feature_id < 0)
|
||||
return 0;
|
||||
|
||||
WARN_ON(feature_id > feature->feature_num);
|
||||
|
||||
mutex_lock(&feature->mutex);
|
||||
ret = test_bit(feature_id, feature->supported);
|
||||
mutex_unlock(&feature->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int smu_cmn_feature_is_enabled(struct smu_context *smu,
|
||||
enum smu_feature_mask mask)
|
||||
{
|
||||
struct smu_feature *feature = &smu->smu_feature;
|
||||
int feature_id;
|
||||
int ret = 0;
|
||||
|
||||
if (smu->is_apu)
|
||||
return 1;
|
||||
feature_id = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_FEATURE,
|
||||
mask);
|
||||
if (feature_id < 0)
|
||||
return 0;
|
||||
|
||||
WARN_ON(feature_id > feature->feature_num);
|
||||
|
||||
mutex_lock(&feature->mutex);
|
||||
ret = test_bit(feature_id, feature->enabled);
|
||||
mutex_unlock(&feature->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
|
||||
enum smu_clk_type clk_type)
|
||||
{
|
||||
enum smu_feature_mask feature_id = 0;
|
||||
|
||||
switch (clk_type) {
|
||||
case SMU_MCLK:
|
||||
case SMU_UCLK:
|
||||
feature_id = SMU_FEATURE_DPM_UCLK_BIT;
|
||||
break;
|
||||
case SMU_GFXCLK:
|
||||
case SMU_SCLK:
|
||||
feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
|
||||
break;
|
||||
case SMU_SOCCLK:
|
||||
feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
|
||||
break;
|
||||
default:
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!smu_cmn_feature_is_enabled(smu, feature_id))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int smu_cmn_get_enabled_mask(struct smu_context *smu,
|
||||
uint32_t *feature_mask,
|
||||
uint32_t num)
|
||||
{
|
||||
uint32_t feature_mask_high = 0, feature_mask_low = 0;
|
||||
struct smu_feature *feature = &smu->smu_feature;
|
||||
int ret = 0;
|
||||
|
||||
if (!feature_mask || num < 2)
|
||||
return -EINVAL;
|
||||
|
||||
if (bitmap_empty(feature->enabled, feature->feature_num)) {
|
||||
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
feature_mask[0] = feature_mask_low;
|
||||
feature_mask[1] = feature_mask_high;
|
||||
} else {
|
||||
bitmap_copy((unsigned long *)feature_mask, feature->enabled,
|
||||
feature->feature_num);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int smu_cmn_feature_update_enable_state(struct smu_context *smu,
|
||||
uint64_t feature_mask,
|
||||
bool enabled)
|
||||
{
|
||||
struct smu_feature *feature = &smu->smu_feature;
|
||||
int ret = 0;
|
||||
|
||||
if (enabled) {
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_EnableSmuFeaturesLow,
|
||||
lower_32_bits(feature_mask),
|
||||
NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_EnableSmuFeaturesHigh,
|
||||
upper_32_bits(feature_mask),
|
||||
NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_DisableSmuFeaturesLow,
|
||||
lower_32_bits(feature_mask),
|
||||
NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_DisableSmuFeaturesHigh,
|
||||
upper_32_bits(feature_mask),
|
||||
NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
mutex_lock(&feature->mutex);
|
||||
if (enabled)
|
||||
bitmap_or(feature->enabled, feature->enabled,
|
||||
(unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
|
||||
else
|
||||
bitmap_andnot(feature->enabled, feature->enabled,
|
||||
(unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
|
||||
mutex_unlock(&feature->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int smu_cmn_feature_set_enabled(struct smu_context *smu,
|
||||
enum smu_feature_mask mask,
|
||||
bool enable)
|
||||
{
|
||||
struct smu_feature *feature = &smu->smu_feature;
|
||||
int feature_id;
|
||||
|
||||
feature_id = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_FEATURE,
|
||||
mask);
|
||||
if (feature_id < 0)
|
||||
return -EINVAL;
|
||||
|
||||
WARN_ON(feature_id > feature->feature_num);
|
||||
|
||||
return smu_cmn_feature_update_enable_state(smu,
|
||||
1ULL << feature_id,
|
||||
enable);
|
||||
}
|
||||
|
||||
#undef __SMU_DUMMY_MAP
|
||||
#define __SMU_DUMMY_MAP(fea) #fea
|
||||
static const char* __smu_feature_names[] = {
|
||||
SMU_FEATURE_MASKS
|
||||
};
|
||||
|
||||
static const char *smu_get_feature_name(struct smu_context *smu,
|
||||
enum smu_feature_mask feature)
|
||||
{
|
||||
if (feature < 0 || feature >= SMU_FEATURE_COUNT)
|
||||
return "unknown smu feature";
|
||||
return __smu_feature_names[feature];
|
||||
}
|
||||
|
||||
size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
|
||||
char *buf)
|
||||
{
|
||||
uint32_t feature_mask[2] = { 0 };
|
||||
int32_t feature_index = 0;
|
||||
uint32_t count = 0;
|
||||
uint32_t sort_feature[SMU_FEATURE_COUNT];
|
||||
uint64_t hw_feature_count = 0;
|
||||
size_t size = 0;
|
||||
int ret = 0, i;
|
||||
|
||||
ret = smu_cmn_get_enabled_mask(smu,
|
||||
feature_mask,
|
||||
2);
|
||||
if (ret)
|
||||
return 0;
|
||||
|
||||
size = sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
|
||||
feature_mask[1], feature_mask[0]);
|
||||
|
||||
for (i = 0; i < SMU_FEATURE_COUNT; i++) {
|
||||
feature_index = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_FEATURE,
|
||||
i);
|
||||
if (feature_index < 0)
|
||||
continue;
|
||||
sort_feature[feature_index] = i;
|
||||
hw_feature_count++;
|
||||
}
|
||||
|
||||
for (i = 0; i < hw_feature_count; i++) {
|
||||
size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
|
||||
count++,
|
||||
smu_get_feature_name(smu, sort_feature[i]),
|
||||
i,
|
||||
!!smu_cmn_feature_is_enabled(smu, sort_feature[i]) ?
|
||||
"enabled" : "disabled");
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
|
||||
uint64_t new_mask)
|
||||
{
|
||||
int ret = 0;
|
||||
uint32_t feature_mask[2] = { 0 };
|
||||
uint64_t feature_2_enabled = 0;
|
||||
uint64_t feature_2_disabled = 0;
|
||||
uint64_t feature_enables = 0;
|
||||
|
||||
ret = smu_cmn_get_enabled_mask(smu,
|
||||
feature_mask,
|
||||
2);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
feature_enables = ((uint64_t)feature_mask[1] << 32 |
|
||||
(uint64_t)feature_mask[0]);
|
||||
|
||||
feature_2_enabled = ~feature_enables & new_mask;
|
||||
feature_2_disabled = feature_enables & ~new_mask;
|
||||
|
||||
if (feature_2_enabled) {
|
||||
ret = smu_cmn_feature_update_enable_state(smu,
|
||||
feature_2_enabled,
|
||||
true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
if (feature_2_disabled) {
|
||||
ret = smu_cmn_feature_update_enable_state(smu,
|
||||
feature_2_disabled,
|
||||
false);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
|
||||
enum smu_feature_mask mask)
|
||||
{
|
||||
uint64_t features_to_disable = U64_MAX;
|
||||
int skipped_feature_id;
|
||||
|
||||
skipped_feature_id = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_FEATURE,
|
||||
mask);
|
||||
if (skipped_feature_id < 0)
|
||||
return -EINVAL;
|
||||
|
||||
features_to_disable &= ~(1ULL << skipped_feature_id);
|
||||
|
||||
return smu_cmn_feature_update_enable_state(smu,
|
||||
features_to_disable,
|
||||
0);
|
||||
}
|
||||
|
||||
int smu_cmn_get_smc_version(struct smu_context *smu,
|
||||
uint32_t *if_version,
|
||||
uint32_t *smu_version)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!if_version && !smu_version)
|
||||
return -EINVAL;
|
||||
|
||||
if (smu->smc_fw_if_version && smu->smc_fw_version)
|
||||
{
|
||||
if (if_version)
|
||||
*if_version = smu->smc_fw_if_version;
|
||||
|
||||
if (smu_version)
|
||||
*smu_version = smu->smc_fw_version;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (if_version) {
|
||||
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
smu->smc_fw_if_version = *if_version;
|
||||
}
|
||||
|
||||
if (smu_version) {
|
||||
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
smu->smc_fw_version = *smu_version;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int smu_cmn_update_table(struct smu_context *smu,
|
||||
enum smu_table_id table_index,
|
||||
int argument,
|
||||
void *table_data,
|
||||
bool drv2smu)
|
||||
{
|
||||
struct smu_table_context *smu_table = &smu->smu_table;
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
struct smu_table *table = &smu_table->driver_table;
|
||||
int table_id = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_TABLE,
|
||||
table_index);
|
||||
uint32_t table_size;
|
||||
int ret = 0;
|
||||
if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
|
||||
return -EINVAL;
|
||||
|
||||
table_size = smu_table->tables[table_index].size;
|
||||
|
||||
if (drv2smu) {
|
||||
memcpy(table->cpu_addr, table_data, table_size);
|
||||
/*
|
||||
* Flush hdp cache: to guard the content seen by
|
||||
* GPU is consitent with CPU.
|
||||
*/
|
||||
amdgpu_asic_flush_hdp(adev, NULL);
|
||||
}
|
||||
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ?
|
||||
SMU_MSG_TransferTableDram2Smu :
|
||||
SMU_MSG_TransferTableSmu2Dram,
|
||||
table_id | ((argument & 0xFFFF) << 16),
|
||||
NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!drv2smu) {
|
||||
amdgpu_asic_flush_hdp(adev, NULL);
|
||||
memcpy(table_data, table->cpu_addr, table_size);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int smu_cmn_write_watermarks_table(struct smu_context *smu)
|
||||
{
|
||||
void *watermarks_table = smu->smu_table.watermarks_table;
|
||||
|
||||
if (!watermarks_table)
|
||||
return -EINVAL;
|
||||
|
||||
return smu_cmn_update_table(smu,
|
||||
SMU_TABLE_WATERMARKS,
|
||||
0,
|
||||
watermarks_table,
|
||||
true);
|
||||
}
|
||||
|
||||
int smu_cmn_write_pptable(struct smu_context *smu)
|
||||
{
|
||||
void *pptable = smu->smu_table.driver_pptable;
|
||||
|
||||
return smu_cmn_update_table(smu,
|
||||
SMU_TABLE_PPTABLE,
|
||||
0,
|
||||
pptable,
|
||||
true);
|
||||
}
|
|
@ -0,0 +1,83 @@
|
|||
/*
|
||||
* Copyright 2020 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __SMU_CMN_H__
|
||||
#define __SMU_CMN_H__
|
||||
|
||||
#include "amdgpu_smu.h"
|
||||
|
||||
#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3) || defined(SWSMU_CODE_LAYER_L4)
|
||||
int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
|
||||
enum smu_message_type msg,
|
||||
uint32_t param,
|
||||
uint32_t *read_arg);
|
||||
|
||||
int smu_cmn_send_smc_msg(struct smu_context *smu,
|
||||
enum smu_message_type msg,
|
||||
uint32_t *read_arg);
|
||||
|
||||
int smu_cmn_to_asic_specific_index(struct smu_context *smu,
|
||||
enum smu_cmn2asic_mapping_type type,
|
||||
uint32_t index);
|
||||
|
||||
int smu_cmn_feature_is_supported(struct smu_context *smu,
|
||||
enum smu_feature_mask mask);
|
||||
|
||||
int smu_cmn_feature_is_enabled(struct smu_context *smu,
|
||||
enum smu_feature_mask mask);
|
||||
|
||||
bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
|
||||
enum smu_clk_type clk_type);
|
||||
|
||||
int smu_cmn_get_enabled_mask(struct smu_context *smu,
|
||||
uint32_t *feature_mask,
|
||||
uint32_t num);
|
||||
|
||||
int smu_cmn_feature_set_enabled(struct smu_context *smu,
|
||||
enum smu_feature_mask mask,
|
||||
bool enable);
|
||||
|
||||
size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
|
||||
char *buf);
|
||||
|
||||
int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
|
||||
uint64_t new_mask);
|
||||
|
||||
int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
|
||||
enum smu_feature_mask mask);
|
||||
|
||||
int smu_cmn_get_smc_version(struct smu_context *smu,
|
||||
uint32_t *if_version,
|
||||
uint32_t *smu_version);
|
||||
|
||||
int smu_cmn_update_table(struct smu_context *smu,
|
||||
enum smu_table_id table_index,
|
||||
int argument,
|
||||
void *table_data,
|
||||
bool drv2smu);
|
||||
|
||||
int smu_cmn_write_watermarks_table(struct smu_context *smu);
|
||||
|
||||
int smu_cmn_write_pptable(struct smu_context *smu);
|
||||
|
||||
#endif
|
||||
#endif
|
|
@ -25,6 +25,8 @@
|
|||
|
||||
#include "amdgpu_smu.h"
|
||||
|
||||
#if defined(SWSMU_CODE_LAYER_L1)
|
||||
|
||||
#define smu_ppt_funcs(intf, ret, smu, args...) \
|
||||
((smu)->ppt_funcs ? ((smu)->ppt_funcs->intf ? (smu)->ppt_funcs->intf(smu, ##args) : ret) : -EINVAL)
|
||||
|
||||
|
@ -44,22 +46,23 @@
|
|||
#define smu_set_tool_table_location(smu) smu_ppt_funcs(set_tool_table_location, 0, smu)
|
||||
#define smu_notify_memory_pool_location(smu) smu_ppt_funcs(notify_memory_pool_location, 0, smu)
|
||||
#define smu_gfx_off_control(smu, enable) smu_ppt_funcs(gfx_off_control, 0, smu, enable)
|
||||
#define smu_get_gfx_off_status(smu) smu_ppt_funcs(get_gfx_off_status, 0, smu)
|
||||
#define smu_set_last_dcef_min_deep_sleep_clk(smu) smu_ppt_funcs(set_last_dcef_min_deep_sleep_clk, 0, smu)
|
||||
#define smu_system_features_control(smu, en) smu_ppt_funcs(system_features_control, 0, smu, en)
|
||||
#define smu_init_max_sustainable_clocks(smu) smu_ppt_funcs(init_max_sustainable_clocks, 0, smu)
|
||||
#define smu_set_default_od_settings(smu) smu_ppt_funcs(set_default_od_settings, 0, smu)
|
||||
#define smu_send_smc_msg_with_param(smu, msg, param, read_arg) smu_ppt_funcs(send_smc_msg_with_param, 0, smu, msg, param, read_arg)
|
||||
#define smu_send_smc_msg(smu, msg, read_arg) smu_ppt_funcs(send_smc_msg_with_param, 0, smu, msg, 0, read_arg)
|
||||
#define smu_alloc_dpm_context(smu) smu_ppt_funcs(alloc_dpm_context, 0, smu)
|
||||
#define smu_send_smc_msg(smu, msg, read_arg) smu_ppt_funcs(send_smc_msg, 0, smu, msg, read_arg)
|
||||
#define smu_init_display_count(smu, count) smu_ppt_funcs(init_display_count, 0, smu, count)
|
||||
#define smu_feature_set_allowed_mask(smu) smu_ppt_funcs(set_allowed_mask, 0, smu)
|
||||
#define smu_feature_get_enabled_mask(smu, mask, num) smu_ppt_funcs(get_enabled_mask, 0, smu, mask, num)
|
||||
#define smu_feature_is_enabled(smu, mask) smu_ppt_funcs(feature_is_enabled, 0, smu, mask)
|
||||
#define smu_disable_all_features_with_exception(smu, mask) smu_ppt_funcs(disable_all_features_with_exception, 0, smu, mask)
|
||||
#define smu_is_dpm_running(smu) smu_ppt_funcs(is_dpm_running, 0 , smu)
|
||||
#define smu_notify_display_change(smu) smu_ppt_funcs(notify_display_change, 0, smu)
|
||||
#define smu_set_default_dpm_table(smu) smu_ppt_funcs(set_default_dpm_table, 0, smu)
|
||||
#define smu_populate_umd_state_clk(smu) smu_ppt_funcs(populate_umd_state_clk, 0, smu)
|
||||
#define smu_set_default_od8_settings(smu) smu_ppt_funcs(set_default_od8_settings, 0, smu)
|
||||
#define smu_tables_init(smu, tab) smu_ppt_funcs(tables_init, 0, smu, tab)
|
||||
#define smu_enable_thermal_alert(smu) smu_ppt_funcs(enable_thermal_alert, 0, smu)
|
||||
#define smu_disable_thermal_alert(smu) smu_ppt_funcs(disable_thermal_alert, 0, smu)
|
||||
#define smu_smc_read_sensor(smu, sensor, data, size) smu_ppt_funcs(read_sensor, -EINVAL, smu, sensor, data, size)
|
||||
|
@ -68,12 +71,6 @@
|
|||
#define smu_apply_clocks_adjust_rules(smu) smu_ppt_funcs(apply_clocks_adjust_rules, 0, smu)
|
||||
#define smu_notify_smc_display_config(smu) smu_ppt_funcs(notify_smc_display_config, 0, smu)
|
||||
#define smu_set_cpu_power_state(smu) smu_ppt_funcs(set_cpu_power_state, 0, smu)
|
||||
#define smu_msg_get_index(smu, msg) smu_ppt_funcs(get_smu_msg_index, -EINVAL, smu, msg)
|
||||
#define smu_clk_get_index(smu, clk) smu_ppt_funcs(get_smu_clk_index, -EINVAL, smu, clk)
|
||||
#define smu_feature_get_index(smu, fea) smu_ppt_funcs(get_smu_feature_index, -EINVAL, smu, fea)
|
||||
#define smu_table_get_index(smu, tab) smu_ppt_funcs(get_smu_table_index, -EINVAL, smu, tab)
|
||||
#define smu_power_get_index(smu, src) smu_ppt_funcs(get_smu_power_index, -EINVAL, smu, src)
|
||||
#define smu_workload_get_type(smu, type) smu_ppt_funcs(get_workload_type, -EINVAL, smu, type)
|
||||
#define smu_run_btc(smu) smu_ppt_funcs(run_btc, 0, smu)
|
||||
#define smu_get_allowed_feature_mask(smu, feature_mask, num) smu_ppt_funcs(get_allowed_feature_mask, 0, smu, feature_mask, num)
|
||||
#define smu_store_cc6_data(smu, st, cc6_dis, pst_dis, pst_sw_dis) smu_ppt_funcs(store_cc6_data, 0, smu, st, cc6_dis, pst_dis, pst_sw_dis)
|
||||
|
@ -82,7 +79,7 @@
|
|||
#define smu_get_current_shallow_sleep_clocks(smu, clocks) smu_ppt_funcs(get_current_shallow_sleep_clocks, 0, smu, clocks)
|
||||
#define smu_dpm_set_vcn_enable(smu, enable) smu_ppt_funcs(dpm_set_vcn_enable, 0, smu, enable)
|
||||
#define smu_dpm_set_jpeg_enable(smu, enable) smu_ppt_funcs(dpm_set_jpeg_enable, 0, smu, enable)
|
||||
#define smu_set_watermarks_table(smu, tab, clock_ranges) smu_ppt_funcs(set_watermarks_table, 0, smu, tab, clock_ranges)
|
||||
#define smu_set_watermarks_table(smu, clock_ranges) smu_ppt_funcs(set_watermarks_table, 0, smu, clock_ranges)
|
||||
#define smu_thermal_temperature_range_update(smu, range, rw) smu_ppt_funcs(thermal_temperature_range_update, 0, smu, range, rw)
|
||||
#define smu_register_irq_handler(smu) smu_ppt_funcs(register_irq_handler, 0, smu)
|
||||
#define smu_get_dpm_ultimate_freq(smu, param, min, max) smu_ppt_funcs(get_dpm_ultimate_freq, 0, smu, param, min, max)
|
||||
|
@ -91,10 +88,13 @@
|
|||
#define smu_update_pcie_parameters(smu, pcie_gen_cap, pcie_width_cap) smu_ppt_funcs(update_pcie_parameters, 0, smu, pcie_gen_cap, pcie_width_cap)
|
||||
#define smu_disable_umc_cdr_12gbps_workaround(smu) smu_ppt_funcs(disable_umc_cdr_12gbps_workaround, 0, smu)
|
||||
#define smu_set_power_source(smu, power_src) smu_ppt_funcs(set_power_source, 0, smu, power_src)
|
||||
#define smu_i2c_eeprom_init(smu, control) smu_ppt_funcs(i2c_eeprom_init, 0, smu, control)
|
||||
#define smu_i2c_eeprom_fini(smu, control) smu_ppt_funcs(i2c_eeprom_fini, 0, smu, control)
|
||||
#define smu_i2c_init(smu, control) smu_ppt_funcs(i2c_init, 0, smu, control)
|
||||
#define smu_i2c_fini(smu, control) smu_ppt_funcs(i2c_fini, 0, smu, control)
|
||||
#define smu_get_unique_id(smu) smu_ppt_funcs(get_unique_id, 0, smu)
|
||||
#define smu_log_thermal_throttling(smu) smu_ppt_funcs(log_thermal_throttling_event, 0, smu)
|
||||
#define smu_get_asic_power_limits(smu) smu_ppt_funcs(get_power_limit, 0, smu)
|
||||
#define smu_get_pp_feature_mask(smu, buf) smu_ppt_funcs(get_pp_feature_mask, 0, smu, buf)
|
||||
#define smu_set_pp_feature_mask(smu, new_mask) smu_ppt_funcs(set_pp_feature_mask, 0, smu, new_mask)
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -26,16 +26,18 @@
|
|||
#include <linux/reboot.h>
|
||||
|
||||
#define SMU_11_0_PARTIAL_PPTABLE
|
||||
#define SWSMU_CODE_LAYER_L3
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_smu.h"
|
||||
#include "smu_internal.h"
|
||||
#include "atomfirmware.h"
|
||||
#include "amdgpu_atomfirmware.h"
|
||||
#include "amdgpu_atombios.h"
|
||||
#include "smu_v11_0.h"
|
||||
#include "soc15_common.h"
|
||||
#include "atom.h"
|
||||
#include "amdgpu_ras.h"
|
||||
#include "smu_cmn.h"
|
||||
|
||||
#include "asic_reg/thm/thm_11_0_2_offset.h"
|
||||
#include "asic_reg/thm/thm_11_0_2_sh_mask.h"
|
||||
|
@ -65,89 +67,6 @@ MODULE_FIRMWARE("amdgpu/navy_flounder_smc.bin");
|
|||
|
||||
#define SMU11_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
|
||||
|
||||
static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
|
||||
uint16_t msg)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
||||
*arg = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int smu_v11_0_wait_for_response(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
uint32_t cur_value, i, timeout = adev->usec_timeout * 10;
|
||||
|
||||
for (i = 0; i < timeout; i++) {
|
||||
cur_value = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90);
|
||||
if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
|
||||
return cur_value == 0x1 ? 0 : -EIO;
|
||||
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
/* timeout means wrong logic */
|
||||
if (i == timeout)
|
||||
return -ETIME;
|
||||
|
||||
return RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
|
||||
}
|
||||
|
||||
int
|
||||
smu_v11_0_send_msg_with_param(struct smu_context *smu,
|
||||
enum smu_message_type msg,
|
||||
uint32_t param,
|
||||
uint32_t *read_arg)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0, index = 0;
|
||||
|
||||
index = smu_msg_get_index(smu, msg);
|
||||
if (index < 0)
|
||||
return index == -EACCES ? 0 : index;
|
||||
|
||||
mutex_lock(&smu->message_lock);
|
||||
ret = smu_v11_0_wait_for_response(smu);
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "Msg issuing pre-check failed and "
|
||||
"SMU may be not in the right state!\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
|
||||
|
||||
WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
|
||||
|
||||
smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
|
||||
|
||||
ret = smu_v11_0_wait_for_response(smu);
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
|
||||
smu_get_message_name(smu, msg), index, param, ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (read_arg) {
|
||||
ret = smu_v11_0_read_arg(smu, read_arg);
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "failed to read message arg: %10s (%d) \tparam: 0x%08x response %#x\n",
|
||||
smu_get_message_name(smu, msg), index, param, ret);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&smu->message_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int smu_v11_0_init_microcode(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
@ -285,7 +204,7 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
|
|||
uint8_t smu_minor, smu_debug;
|
||||
int ret = 0;
|
||||
|
||||
ret = smu_get_smc_version(smu, &if_version, &smu_version);
|
||||
ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -416,7 +335,7 @@ int smu_v11_0_setup_pptable(struct smu_context *smu)
|
|||
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
|
||||
powerplayinfo);
|
||||
|
||||
ret = smu_get_atom_data_table(smu, index, &atom_table_size, &frev, &crev,
|
||||
ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev,
|
||||
(uint8_t **)&table);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -431,70 +350,24 @@ int smu_v11_0_setup_pptable(struct smu_context *smu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int smu_v11_0_init_dpm_context(struct smu_context *smu)
|
||||
{
|
||||
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
|
||||
|
||||
if (smu_dpm->dpm_context || smu_dpm->dpm_context_size != 0)
|
||||
return -EINVAL;
|
||||
|
||||
return smu_alloc_dpm_context(smu);
|
||||
}
|
||||
|
||||
static int smu_v11_0_fini_dpm_context(struct smu_context *smu)
|
||||
{
|
||||
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
|
||||
|
||||
if (!smu_dpm->dpm_context || smu_dpm->dpm_context_size == 0)
|
||||
return -EINVAL;
|
||||
|
||||
kfree(smu_dpm->dpm_context);
|
||||
kfree(smu_dpm->golden_dpm_context);
|
||||
kfree(smu_dpm->dpm_current_power_state);
|
||||
kfree(smu_dpm->dpm_request_power_state);
|
||||
smu_dpm->dpm_context = NULL;
|
||||
smu_dpm->golden_dpm_context = NULL;
|
||||
smu_dpm->dpm_context_size = 0;
|
||||
smu_dpm->dpm_current_power_state = NULL;
|
||||
smu_dpm->dpm_request_power_state = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int smu_v11_0_init_smc_tables(struct smu_context *smu)
|
||||
{
|
||||
struct smu_table_context *smu_table = &smu->smu_table;
|
||||
struct smu_table *tables = NULL;
|
||||
struct smu_table *tables = smu_table->tables;
|
||||
int ret = 0;
|
||||
|
||||
tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
|
||||
GFP_KERNEL);
|
||||
if (!tables) {
|
||||
ret = -ENOMEM;
|
||||
goto err0_out;
|
||||
}
|
||||
smu_table->tables = tables;
|
||||
|
||||
ret = smu_tables_init(smu, tables);
|
||||
if (ret)
|
||||
goto err1_out;
|
||||
|
||||
ret = smu_v11_0_init_dpm_context(smu);
|
||||
if (ret)
|
||||
goto err1_out;
|
||||
|
||||
smu_table->driver_pptable =
|
||||
kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL);
|
||||
if (!smu_table->driver_pptable) {
|
||||
ret = -ENOMEM;
|
||||
goto err2_out;
|
||||
goto err0_out;
|
||||
}
|
||||
|
||||
smu_table->max_sustainable_clocks =
|
||||
kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks), GFP_KERNEL);
|
||||
if (!smu_table->max_sustainable_clocks) {
|
||||
ret = -ENOMEM;
|
||||
goto err3_out;
|
||||
goto err1_out;
|
||||
}
|
||||
|
||||
/* Arcturus does not support OVERDRIVE */
|
||||
|
@ -503,29 +376,25 @@ int smu_v11_0_init_smc_tables(struct smu_context *smu)
|
|||
kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
|
||||
if (!smu_table->overdrive_table) {
|
||||
ret = -ENOMEM;
|
||||
goto err4_out;
|
||||
goto err2_out;
|
||||
}
|
||||
|
||||
smu_table->boot_overdrive_table =
|
||||
kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
|
||||
if (!smu_table->boot_overdrive_table) {
|
||||
ret = -ENOMEM;
|
||||
goto err5_out;
|
||||
goto err3_out;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err5_out:
|
||||
kfree(smu_table->overdrive_table);
|
||||
err4_out:
|
||||
kfree(smu_table->max_sustainable_clocks);
|
||||
err3_out:
|
||||
kfree(smu_table->driver_pptable);
|
||||
kfree(smu_table->overdrive_table);
|
||||
err2_out:
|
||||
smu_v11_0_fini_dpm_context(smu);
|
||||
kfree(smu_table->max_sustainable_clocks);
|
||||
err1_out:
|
||||
kfree(tables);
|
||||
kfree(smu_table->driver_pptable);
|
||||
err0_out:
|
||||
return ret;
|
||||
}
|
||||
|
@ -533,10 +402,7 @@ err0_out:
|
|||
int smu_v11_0_fini_smc_tables(struct smu_context *smu)
|
||||
{
|
||||
struct smu_table_context *smu_table = &smu->smu_table;
|
||||
int ret = 0;
|
||||
|
||||
if (!smu_table->tables)
|
||||
return -EINVAL;
|
||||
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
|
||||
|
||||
kfree(smu_table->boot_overdrive_table);
|
||||
kfree(smu_table->overdrive_table);
|
||||
|
@ -549,17 +415,22 @@ int smu_v11_0_fini_smc_tables(struct smu_context *smu)
|
|||
kfree(smu_table->hardcode_pptable);
|
||||
smu_table->hardcode_pptable = NULL;
|
||||
|
||||
kfree(smu_table->tables);
|
||||
kfree(smu_table->metrics_table);
|
||||
kfree(smu_table->watermarks_table);
|
||||
smu_table->tables = NULL;
|
||||
smu_table->metrics_table = NULL;
|
||||
smu_table->watermarks_table = NULL;
|
||||
smu_table->metrics_time = 0;
|
||||
|
||||
ret = smu_v11_0_fini_dpm_context(smu);
|
||||
if (ret)
|
||||
return ret;
|
||||
kfree(smu_dpm->dpm_context);
|
||||
kfree(smu_dpm->golden_dpm_context);
|
||||
kfree(smu_dpm->dpm_current_power_state);
|
||||
kfree(smu_dpm->dpm_request_power_state);
|
||||
smu_dpm->dpm_context = NULL;
|
||||
smu_dpm->golden_dpm_context = NULL;
|
||||
smu_dpm->dpm_context_size = 0;
|
||||
smu_dpm->dpm_current_power_state = NULL;
|
||||
smu_dpm->dpm_request_power_state = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -631,7 +502,7 @@ int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
|
|||
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
|
||||
firmwareinfo);
|
||||
|
||||
ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev,
|
||||
ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
|
||||
(uint8_t **)&header);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -727,13 +598,13 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
|
|||
address_high = (uint32_t)upper_32_bits(address);
|
||||
address_low = (uint32_t)lower_32_bits(address);
|
||||
|
||||
ret = smu_send_smc_msg_with_param(smu,
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_SetSystemVirtualDramAddrHigh,
|
||||
address_high,
|
||||
NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = smu_send_smc_msg_with_param(smu,
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_SetSystemVirtualDramAddrLow,
|
||||
address_low,
|
||||
NULL);
|
||||
|
@ -744,15 +615,15 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
|
|||
address_high = (uint32_t)upper_32_bits(address);
|
||||
address_low = (uint32_t)lower_32_bits(address);
|
||||
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
|
||||
address_high, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
|
||||
address_low, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
|
||||
(uint32_t)memory_pool->size, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -760,22 +631,11 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int smu_v11_0_write_pptable(struct smu_context *smu)
|
||||
{
|
||||
struct smu_table_context *table_context = &smu->smu_table;
|
||||
int ret = 0;
|
||||
|
||||
ret = smu_update_table(smu, SMU_TABLE_PPTABLE, 0,
|
||||
table_context->driver_pptable, true);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int smu_v11_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = smu_send_smc_msg_with_param(smu,
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL);
|
||||
if (ret)
|
||||
dev_err(smu->adev->dev, "SMU11 attempt to set divider for DCEFCLK Failed!");
|
||||
|
@ -789,12 +649,12 @@ int smu_v11_0_set_driver_table_location(struct smu_context *smu)
|
|||
int ret = 0;
|
||||
|
||||
if (driver_table->mc_address) {
|
||||
ret = smu_send_smc_msg_with_param(smu,
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_SetDriverDramAddrHigh,
|
||||
upper_32_bits(driver_table->mc_address),
|
||||
NULL);
|
||||
if (!ret)
|
||||
ret = smu_send_smc_msg_with_param(smu,
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_SetDriverDramAddrLow,
|
||||
lower_32_bits(driver_table->mc_address),
|
||||
NULL);
|
||||
|
@ -809,12 +669,12 @@ int smu_v11_0_set_tool_table_location(struct smu_context *smu)
|
|||
struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
|
||||
|
||||
if (tool_table->mc_address) {
|
||||
ret = smu_send_smc_msg_with_param(smu,
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_SetToolsDramAddrHigh,
|
||||
upper_32_bits(tool_table->mc_address),
|
||||
NULL);
|
||||
if (!ret)
|
||||
ret = smu_send_smc_msg_with_param(smu,
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_SetToolsDramAddrLow,
|
||||
lower_32_bits(tool_table->mc_address),
|
||||
NULL);
|
||||
|
@ -835,7 +695,7 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
|
|||
if (!smu->pm_enabled)
|
||||
return ret;
|
||||
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL);
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -852,12 +712,12 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu)
|
|||
|
||||
bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
|
||||
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
|
||||
feature_mask[1], NULL);
|
||||
if (ret)
|
||||
goto failed;
|
||||
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
|
||||
feature_mask[0], NULL);
|
||||
if (ret)
|
||||
goto failed;
|
||||
|
@ -867,35 +727,6 @@ failed:
|
|||
return ret;
|
||||
}
|
||||
|
||||
int smu_v11_0_get_enabled_mask(struct smu_context *smu,
|
||||
uint32_t *feature_mask, uint32_t num)
|
||||
{
|
||||
uint32_t feature_mask_high = 0, feature_mask_low = 0;
|
||||
struct smu_feature *feature = &smu->smu_feature;
|
||||
int ret = 0;
|
||||
|
||||
if (!feature_mask || num < 2)
|
||||
return -EINVAL;
|
||||
|
||||
if (bitmap_empty(feature->enabled, feature->feature_num)) {
|
||||
ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
feature_mask[0] = feature_mask_low;
|
||||
feature_mask[1] = feature_mask_high;
|
||||
} else {
|
||||
bitmap_copy((unsigned long *)feature_mask, feature->enabled,
|
||||
feature->feature_num);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int smu_v11_0_system_features_control(struct smu_context *smu,
|
||||
bool en)
|
||||
{
|
||||
|
@ -903,7 +734,7 @@ int smu_v11_0_system_features_control(struct smu_context *smu,
|
|||
uint32_t feature_mask[2];
|
||||
int ret = 0;
|
||||
|
||||
ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
|
||||
ret = smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
|
||||
SMU_MSG_DisableAllSmuFeatures), NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -912,7 +743,7 @@ int smu_v11_0_system_features_control(struct smu_context *smu,
|
|||
bitmap_zero(feature->supported, feature->feature_num);
|
||||
|
||||
if (en) {
|
||||
ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
|
||||
ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -932,9 +763,9 @@ int smu_v11_0_notify_display_change(struct smu_context *smu)
|
|||
if (!smu->pm_enabled)
|
||||
return ret;
|
||||
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
|
||||
smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -946,15 +777,17 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
|
|||
int ret = 0;
|
||||
int clk_id;
|
||||
|
||||
if ((smu_msg_get_index(smu, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
|
||||
(smu_msg_get_index(smu, SMU_MSG_GetMaxDpmFreq) < 0))
|
||||
if ((smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
|
||||
(smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetMaxDpmFreq) < 0))
|
||||
return 0;
|
||||
|
||||
clk_id = smu_clk_get_index(smu, clock_select);
|
||||
clk_id = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_CLK,
|
||||
clock_select);
|
||||
if (clk_id < 0)
|
||||
return -EINVAL;
|
||||
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
|
||||
clk_id << 16, clock);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
|
||||
|
@ -965,7 +798,7 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
|
|||
return 0;
|
||||
|
||||
/* if DC limit is zero, return AC limit */
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
|
||||
clk_id << 16, clock);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "[GetMaxSustainableClock] failed to get max AC clock from SMC!");
|
||||
|
@ -988,7 +821,7 @@ int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
|
|||
max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
|
||||
max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
|
||||
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
|
||||
ret = smu_v11_0_get_max_sustainable_clock(smu,
|
||||
&(max_sustainable_clocks->uclock),
|
||||
SMU_UCLK);
|
||||
|
@ -999,7 +832,7 @@ int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
|
|||
}
|
||||
}
|
||||
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
|
||||
ret = smu_v11_0_get_max_sustainable_clock(smu,
|
||||
&(max_sustainable_clocks->soc_clock),
|
||||
SMU_SOCCLK);
|
||||
|
@ -1010,7 +843,7 @@ int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
|
|||
}
|
||||
}
|
||||
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
|
||||
ret = smu_v11_0_get_max_sustainable_clock(smu,
|
||||
&(max_sustainable_clocks->dcef_clock),
|
||||
SMU_DCEFCLK);
|
||||
|
@ -1058,17 +891,18 @@ int smu_v11_0_get_current_power_limit(struct smu_context *smu,
|
|||
int power_src;
|
||||
int ret = 0;
|
||||
|
||||
if (!smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
|
||||
if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
|
||||
return -EINVAL;
|
||||
|
||||
power_src = smu_power_get_index(smu,
|
||||
power_src = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_PWR,
|
||||
smu->adev->pm.ac_power ?
|
||||
SMU_POWER_SOURCE_AC :
|
||||
SMU_POWER_SOURCE_DC);
|
||||
if (power_src < 0)
|
||||
return -EINVAL;
|
||||
|
||||
ret = smu_send_smc_msg_with_param(smu,
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_GetPptLimit,
|
||||
power_src << 16,
|
||||
power_limit);
|
||||
|
@ -1082,12 +916,12 @@ int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
|
||||
if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
|
||||
dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL);
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__);
|
||||
return ret;
|
||||
|
@ -1145,8 +979,8 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
|
|||
enum smu_clk_type clk_select = 0;
|
||||
uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
|
||||
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
|
||||
smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
|
||||
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
|
||||
smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
|
||||
switch (clk_type) {
|
||||
case amd_pp_dcef_clock:
|
||||
clk_select = SMU_DCEFCLK;
|
||||
|
@ -1198,9 +1032,9 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
|
|||
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
|
||||
return 0;
|
||||
if (enable)
|
||||
ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
|
||||
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
|
||||
else
|
||||
ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
|
||||
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -1212,7 +1046,7 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
|
|||
uint32_t
|
||||
smu_v11_0_get_fan_control_mode(struct smu_context *smu)
|
||||
{
|
||||
if (!smu_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
|
||||
if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
|
||||
return AMD_FAN_CTRL_MANUAL;
|
||||
else
|
||||
return AMD_FAN_CTRL_AUTO;
|
||||
|
@ -1223,10 +1057,10 @@ smu_v11_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control)
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!smu_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
|
||||
if (!smu_cmn_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
|
||||
return 0;
|
||||
|
||||
ret = smu_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control);
|
||||
ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control);
|
||||
if (ret)
|
||||
dev_err(smu->adev->dev, "[%s]%s smc FAN CONTROL feature failed!",
|
||||
__func__, (auto_fan_control ? "Start" : "Stop"));
|
||||
|
@ -1336,7 +1170,7 @@ int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
|
|||
uint32_t pstate)
|
||||
{
|
||||
int ret = 0;
|
||||
ret = smu_send_smc_msg_with_param(smu,
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_SetXgmiMode,
|
||||
pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3,
|
||||
NULL);
|
||||
|
@ -1410,7 +1244,7 @@ static int smu_v11_0_set_irq_state(struct amdgpu_device *adev,
|
|||
|
||||
static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu)
|
||||
{
|
||||
return smu_send_smc_msg(smu,
|
||||
return smu_cmn_send_smc_msg(smu,
|
||||
SMU_MSG_ReenableAcDcInterrupt,
|
||||
NULL);
|
||||
}
|
||||
|
@ -1568,14 +1402,14 @@ int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL);
|
||||
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq)
|
||||
{
|
||||
return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL);
|
||||
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL);
|
||||
}
|
||||
|
||||
bool smu_v11_0_baco_is_support(struct smu_context *smu)
|
||||
|
@ -1591,8 +1425,8 @@ bool smu_v11_0_baco_is_support(struct smu_context *smu)
|
|||
return false;
|
||||
|
||||
/* Arcturus does not support this bit mask */
|
||||
if (smu_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
|
||||
!smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
|
||||
if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
|
||||
!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
@ -1629,21 +1463,15 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
|
|||
data |= 0x80000000;
|
||||
WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
|
||||
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL);
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL);
|
||||
} else {
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1, NULL);
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1, NULL);
|
||||
}
|
||||
} else {
|
||||
ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL);
|
||||
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (ras && ras->supported) {
|
||||
ret = smu_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* clear vbios scratch 6 and 7 for coming asic reinit */
|
||||
WREG32(adev->bios_scratch_reg_offset + 6, 0);
|
||||
WREG32(adev->bios_scratch_reg_offset + 7, 0);
|
||||
|
@ -1693,7 +1521,7 @@ int smu_v11_0_mode1_reset(struct smu_context *smu)
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
ret = smu_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
|
||||
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
|
||||
if (!ret)
|
||||
msleep(SMU11_MODE1_RESET_WAIT_TIME_IN_MS);
|
||||
|
||||
|
@ -1707,7 +1535,7 @@ int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
|
|||
uint32_t param = 0;
|
||||
uint32_t clock_limit;
|
||||
|
||||
if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
|
||||
if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
|
||||
switch (clk_type) {
|
||||
case SMU_MCLK:
|
||||
case SMU_UCLK:
|
||||
|
@ -1734,7 +1562,9 @@ int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
|
|||
return 0;
|
||||
}
|
||||
|
||||
clk_id = smu_clk_get_index(smu, clk_type);
|
||||
clk_id = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_CLK,
|
||||
clk_type);
|
||||
if (clk_id < 0) {
|
||||
ret = -EINVAL;
|
||||
goto failed;
|
||||
|
@ -1742,13 +1572,13 @@ int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
|
|||
param = (clk_id & 0xffff) << 16;
|
||||
|
||||
if (max) {
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param, max);
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param, max);
|
||||
if (ret)
|
||||
goto failed;
|
||||
}
|
||||
|
||||
if (min) {
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min);
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min);
|
||||
if (ret)
|
||||
goto failed;
|
||||
}
|
||||
|
@ -1766,7 +1596,12 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
|
|||
int ret = 0, clk_id = 0;
|
||||
uint32_t param;
|
||||
|
||||
clk_id = smu_clk_get_index(smu, clk_type);
|
||||
if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
|
||||
return 0;
|
||||
|
||||
clk_id = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_CLK,
|
||||
clk_type);
|
||||
if (clk_id < 0)
|
||||
return clk_id;
|
||||
|
||||
|
@ -1775,7 +1610,7 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
|
|||
|
||||
if (max > 0) {
|
||||
param = (uint32_t)((clk_id << 16) | (max & 0xffff));
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
|
||||
param, NULL);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
@ -1783,7 +1618,7 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
|
|||
|
||||
if (min > 0) {
|
||||
param = (uint32_t)((clk_id << 16) | (min & 0xffff));
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
|
||||
param, NULL);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
@ -1807,16 +1642,18 @@ int smu_v11_0_set_hard_freq_limited_range(struct smu_context *smu,
|
|||
if (min <= 0 && max <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (!smu_clk_dpm_is_enabled(smu, clk_type))
|
||||
if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
|
||||
return 0;
|
||||
|
||||
clk_id = smu_clk_get_index(smu, clk_type);
|
||||
clk_id = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_CLK,
|
||||
clk_type);
|
||||
if (clk_id < 0)
|
||||
return clk_id;
|
||||
|
||||
if (max > 0) {
|
||||
param = (uint32_t)((clk_id << 16) | (max & 0xffff));
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
|
||||
param, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1824,7 +1661,7 @@ int smu_v11_0_set_hard_freq_limited_range(struct smu_context *smu,
|
|||
|
||||
if (min > 0) {
|
||||
param = (uint32_t)((clk_id << 16) | (min & 0xffff));
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
|
||||
param, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1939,11 +1776,13 @@ int smu_v11_0_set_power_source(struct smu_context *smu,
|
|||
{
|
||||
int pwr_source;
|
||||
|
||||
pwr_source = smu_power_get_index(smu, (uint32_t)power_src);
|
||||
pwr_source = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_PWR,
|
||||
(uint32_t)power_src);
|
||||
if (pwr_source < 0)
|
||||
return -EINVAL;
|
||||
|
||||
return smu_send_smc_msg_with_param(smu,
|
||||
return smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_NotifyPowerSource,
|
||||
pwr_source,
|
||||
NULL);
|
||||
|
@ -1960,16 +1799,18 @@ int smu_v11_0_get_dpm_freq_by_index(struct smu_context *smu,
|
|||
if (!value)
|
||||
return -EINVAL;
|
||||
|
||||
if (!smu_clk_dpm_is_enabled(smu, clk_type))
|
||||
if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
|
||||
return 0;
|
||||
|
||||
clk_id = smu_clk_get_index(smu, clk_type);
|
||||
clk_id = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_CLK,
|
||||
clk_type);
|
||||
if (clk_id < 0)
|
||||
return clk_id;
|
||||
|
||||
param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
|
||||
|
||||
ret = smu_send_smc_msg_with_param(smu,
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_GetDpmFreqByIndex,
|
||||
param,
|
||||
value);
|
||||
|
|
|
@ -20,15 +20,17 @@
|
|||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#define SWSMU_CODE_LAYER_L3
|
||||
|
||||
#include <linux/firmware.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_smu.h"
|
||||
#include "smu_internal.h"
|
||||
#include "atomfirmware.h"
|
||||
#include "amdgpu_atomfirmware.h"
|
||||
#include "smu_v12_0.h"
|
||||
#include "soc15_common.h"
|
||||
#include "atom.h"
|
||||
#include "smu_cmn.h"
|
||||
|
||||
#include "asic_reg/mp/mp_12_0_0_offset.h"
|
||||
#include "asic_reg/mp/mp_12_0_0_sh_mask.h"
|
||||
|
@ -52,86 +54,6 @@
|
|||
|
||||
#define smnMP1_FIRMWARE_FLAGS 0x3010024
|
||||
|
||||
int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
|
||||
uint16_t msg)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
||||
WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
||||
*arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int smu_v12_0_wait_for_response(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
uint32_t cur_value, i;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
|
||||
if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
|
||||
return cur_value == 0x1 ? 0 : -EIO;
|
||||
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
/* timeout means wrong logic */
|
||||
return -ETIME;
|
||||
}
|
||||
|
||||
int
|
||||
smu_v12_0_send_msg_with_param(struct smu_context *smu,
|
||||
enum smu_message_type msg,
|
||||
uint32_t param,
|
||||
uint32_t *read_arg)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0, index = 0;
|
||||
|
||||
index = smu_msg_get_index(smu, msg);
|
||||
if (index < 0)
|
||||
return index;
|
||||
|
||||
mutex_lock(&smu->message_lock);
|
||||
ret = smu_v12_0_wait_for_response(smu);
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "Msg issuing pre-check failed and "
|
||||
"SMU may be not in the right state!\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
|
||||
|
||||
WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
|
||||
|
||||
smu_v12_0_send_msg_without_waiting(smu, (uint16_t)index);
|
||||
|
||||
ret = smu_v12_0_wait_for_response(smu);
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "Failed to send message 0x%x, response 0x%x param 0x%x\n",
|
||||
index, ret, param);
|
||||
goto out;
|
||||
}
|
||||
if (read_arg) {
|
||||
ret = smu_v12_0_read_arg(smu, read_arg);
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "Failed to read message arg 0x%x, response 0x%x param 0x%x\n",
|
||||
index, ret, param);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&smu->message_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int smu_v12_0_check_fw_status(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
@ -154,7 +76,7 @@ int smu_v12_0_check_fw_version(struct smu_context *smu)
|
|||
uint8_t smu_minor, smu_debug;
|
||||
int ret = 0;
|
||||
|
||||
ret = smu_get_smc_version(smu, &if_version, &smu_version);
|
||||
ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -187,9 +109,9 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
|
|||
return 0;
|
||||
|
||||
if (gate)
|
||||
return smu_send_smc_msg(smu, SMU_MSG_PowerDownSdma, NULL);
|
||||
return smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownSdma, NULL);
|
||||
else
|
||||
return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma, NULL);
|
||||
return smu_cmn_send_smc_msg(smu, SMU_MSG_PowerUpSdma, NULL);
|
||||
}
|
||||
|
||||
int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
|
||||
|
@ -197,7 +119,7 @@ int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
|
|||
if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
|
||||
return 0;
|
||||
|
||||
return smu_v12_0_send_msg_with_param(smu,
|
||||
return smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_SetGfxCGPG,
|
||||
enable ? 1 : 0,
|
||||
NULL);
|
||||
|
@ -233,10 +155,10 @@ int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
|
|||
int ret = 0, timeout = 500;
|
||||
|
||||
if (enable) {
|
||||
ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
|
||||
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
|
||||
|
||||
} else {
|
||||
ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
|
||||
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
|
||||
|
||||
/* confirm gfx is back to "on" state, timeout is 0.5 second */
|
||||
while (!(smu_v12_0_get_gfxoff_status(smu) == 2)) {
|
||||
|
@ -252,36 +174,18 @@ int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int smu_v12_0_init_smc_tables(struct smu_context *smu)
|
||||
{
|
||||
struct smu_table_context *smu_table = &smu->smu_table;
|
||||
struct smu_table *tables = NULL;
|
||||
|
||||
if (smu_table->tables)
|
||||
return -EINVAL;
|
||||
|
||||
tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
|
||||
GFP_KERNEL);
|
||||
if (!tables)
|
||||
return -ENOMEM;
|
||||
|
||||
smu_table->tables = tables;
|
||||
|
||||
return smu_tables_init(smu, tables);
|
||||
}
|
||||
|
||||
int smu_v12_0_fini_smc_tables(struct smu_context *smu)
|
||||
{
|
||||
struct smu_table_context *smu_table = &smu->smu_table;
|
||||
|
||||
if (!smu_table->tables)
|
||||
return -EINVAL;
|
||||
|
||||
kfree(smu_table->clocks_table);
|
||||
kfree(smu_table->tables);
|
||||
|
||||
smu_table->clocks_table = NULL;
|
||||
smu_table->tables = NULL;
|
||||
|
||||
kfree(smu_table->metrics_table);
|
||||
smu_table->metrics_table = NULL;
|
||||
|
||||
kfree(smu_table->watermarks_table);
|
||||
smu_table->watermarks_table = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -290,34 +194,11 @@ int smu_v12_0_set_default_dpm_tables(struct smu_context *smu)
|
|||
{
|
||||
struct smu_table_context *smu_table = &smu->smu_table;
|
||||
|
||||
return smu_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
|
||||
}
|
||||
|
||||
int smu_v12_0_get_enabled_mask(struct smu_context *smu,
|
||||
uint32_t *feature_mask, uint32_t num)
|
||||
{
|
||||
uint32_t feature_mask_high = 0, feature_mask_low = 0;
|
||||
int ret = 0;
|
||||
|
||||
if (!feature_mask || num < 2)
|
||||
return -EINVAL;
|
||||
|
||||
ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
feature_mask[0] = feature_mask_low;
|
||||
feature_mask[1] = feature_mask_high;
|
||||
|
||||
return ret;
|
||||
return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
|
||||
}
|
||||
|
||||
int smu_v12_0_mode2_reset(struct smu_context *smu){
|
||||
return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2, NULL);
|
||||
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2, NULL);
|
||||
}
|
||||
|
||||
int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
|
||||
|
@ -325,42 +206,45 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
|
||||
return 0;
|
||||
|
||||
switch (clk_type) {
|
||||
case SMU_GFXCLK:
|
||||
case SMU_SCLK:
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min, NULL);
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max, NULL);
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
case SMU_FCLK:
|
||||
case SMU_MCLK:
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL);
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max, NULL);
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
case SMU_SOCCLK:
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min, NULL);
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max, NULL);
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
case SMU_VCLK:
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min, NULL);
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max, NULL);
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
|
@ -377,12 +261,12 @@ int smu_v12_0_set_driver_table_location(struct smu_context *smu)
|
|||
int ret = 0;
|
||||
|
||||
if (driver_table->mc_address) {
|
||||
ret = smu_send_smc_msg_with_param(smu,
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_SetDriverDramAddrHigh,
|
||||
upper_32_bits(driver_table->mc_address),
|
||||
NULL);
|
||||
if (!ret)
|
||||
ret = smu_send_smc_msg_with_param(smu,
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_SetDriverDramAddrLow,
|
||||
lower_32_bits(driver_table->mc_address),
|
||||
NULL);
|
||||
|
|
|
@ -522,11 +522,9 @@ static int vega20_smu_init(struct pp_hwmgr *hwmgr)
|
|||
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].version = 0x01;
|
||||
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t);
|
||||
|
||||
if (adev->psp.ras.ras) {
|
||||
ret = smu_v11_0_i2c_eeprom_control_init(&adev->pm.smu_i2c);
|
||||
if (ret)
|
||||
goto err4;
|
||||
}
|
||||
ret = smu_v11_0_i2c_control_init(&adev->pm.smu_i2c);
|
||||
if (ret)
|
||||
goto err4;
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -562,8 +560,7 @@ static int vega20_smu_fini(struct pp_hwmgr *hwmgr)
|
|||
(struct vega20_smumgr *)(hwmgr->smu_backend);
|
||||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
|
||||
if (adev->psp.ras.ras)
|
||||
smu_v11_0_i2c_eeprom_control_fini(&adev->pm.smu_i2c);
|
||||
smu_v11_0_i2c_control_fini(&adev->pm.smu_i2c);
|
||||
|
||||
if (priv) {
|
||||
amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
|
||||
|
|
|
@ -644,9 +644,6 @@ static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
|
|||
|
||||
/* sclk is bigger than max sclk in the dependence table */
|
||||
*voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
|
||||
vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
|
||||
(dep_table->entries[i - 1].vddc -
|
||||
(uint16_t)VDDC_VDDCI_DELTA));
|
||||
|
||||
if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
|
||||
*voltage |= (data->vbios_boot_state.vddci_bootup_value *
|
||||
|
@ -654,8 +651,13 @@ static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
|
|||
else if (dep_table->entries[i - 1].vddci)
|
||||
*voltage |= (dep_table->entries[i - 1].vddci *
|
||||
VOLTAGE_SCALE) << VDDC_SHIFT;
|
||||
else
|
||||
else {
|
||||
vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
|
||||
(dep_table->entries[i - 1].vddc -
|
||||
(uint16_t)VDDC_VDDCI_DELTA));
|
||||
|
||||
*voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
|
||||
}
|
||||
|
||||
if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
|
||||
*mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
|
||||
|
|
Loading…
Reference in New Issue