Merge tag 'amd-drm-next-5.15-2021-07-29' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
amd-drm-next-5.15-2021-07-29: amdgpu: - VCN/JPEG power down sequencing fixes - Various navi pcie link handling fixes - Clockgating fixes - Yellow Carp fixes - Beige Goby fixes - Misc code cleanups - S0ix fixes - SMU i2c bus rework - EEPROM handling rework - PSP ucode handling cleanup - SMU error handling rework - AMD HDMI freesync fixes - USB PD firmware update rework - MMIO based vram access rework - Misc display fixes - Backlight fixes - Add initial Cyan Skillfish support - Overclocking fixes suspend/resume amdkfd: - Sysfs leak fix - Add counters for vm faults and migration - GPUVM TLB optimizations radeon: - Misc fixes Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210730033455.3852-1-alexander.deucher@amd.com
This commit is contained in:
commit
04d505de7f
|
@ -15481,6 +15481,8 @@ M: Pan, Xinhui <Xinhui.Pan@amd.com>
|
|||
L: amd-gfx@lists.freedesktop.org
|
||||
S: Supported
|
||||
T: git https://gitlab.freedesktop.org/agd5f/linux.git
|
||||
B: https://gitlab.freedesktop.org/drm/amd/-/issues
|
||||
C: irc://irc.oftc.net/radeon
|
||||
F: drivers/gpu/drm/amd/
|
||||
F: drivers/gpu/drm/radeon/
|
||||
F: include/uapi/drm/amdgpu_drm.h
|
||||
|
|
|
@ -57,7 +57,8 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
|
|||
amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
|
||||
amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
|
||||
amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
|
||||
amdgpu_fw_attestation.o amdgpu_securedisplay.o amdgpu_hdp.o
|
||||
amdgpu_fw_attestation.o amdgpu_securedisplay.o amdgpu_hdp.o \
|
||||
amdgpu_eeprom.o
|
||||
|
||||
amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o
|
||||
|
||||
|
@ -75,7 +76,7 @@ amdgpu-y += \
|
|||
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \
|
||||
arct_reg_init.o navi12_reg_init.o mxgpu_nv.o sienna_cichlid_reg_init.o vangogh_reg_init.o \
|
||||
nbio_v7_2.o dimgrey_cavefish_reg_init.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o \
|
||||
beige_goby_reg_init.o yellow_carp_reg_init.o
|
||||
beige_goby_reg_init.o yellow_carp_reg_init.o cyan_skillfish_reg_init.o
|
||||
|
||||
# add DF block
|
||||
amdgpu-y += \
|
||||
|
@ -111,6 +112,7 @@ amdgpu-y += \
|
|||
psp_v3_1.o \
|
||||
psp_v10_0.o \
|
||||
psp_v11_0.o \
|
||||
psp_v11_0_8.o \
|
||||
psp_v12_0.o \
|
||||
psp_v13_0.o
|
||||
|
||||
|
|
|
@ -1108,8 +1108,13 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev);
|
|||
|
||||
int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
|
||||
|
||||
void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
|
||||
void *buf, size_t size, bool write);
|
||||
size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
|
||||
void *buf, size_t size, bool write);
|
||||
|
||||
void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
|
||||
uint32_t *buf, size_t size, bool write);
|
||||
void *buf, size_t size, bool write);
|
||||
uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
|
||||
uint32_t reg, uint32_t acc_flags);
|
||||
void amdgpu_device_wreg(struct amdgpu_device *adev,
|
||||
|
@ -1385,12 +1390,12 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta
|
|||
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
|
||||
|
||||
void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
|
||||
bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev);
|
||||
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
|
||||
void amdgpu_acpi_detect(void);
|
||||
#else
|
||||
static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
|
||||
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
|
||||
static inline bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev) { return false; }
|
||||
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
|
||||
static inline void amdgpu_acpi_detect(void) { }
|
||||
static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
|
||||
static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
|
||||
|
|
|
@ -160,17 +160,28 @@ static int acp_poweron(struct generic_pm_domain *genpd)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct device *get_mfd_cell_dev(const char *device_name, int r)
|
||||
static int acp_genpd_add_device(struct device *dev, void *data)
|
||||
{
|
||||
char auto_dev_name[25];
|
||||
struct device *dev;
|
||||
struct generic_pm_domain *gpd = data;
|
||||
int ret;
|
||||
|
||||
snprintf(auto_dev_name, sizeof(auto_dev_name),
|
||||
"%s.%d.auto", device_name, r);
|
||||
dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name);
|
||||
dev_info(dev, "device %s added to pm domain\n", auto_dev_name);
|
||||
ret = pm_genpd_add_device(gpd, dev);
|
||||
if (ret)
|
||||
dev_err(dev, "Failed to add dev to genpd %d\n", ret);
|
||||
|
||||
return dev;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int acp_genpd_remove_device(struct device *dev, void *data)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = pm_genpd_remove_device(dev);
|
||||
if (ret)
|
||||
dev_err(dev, "Failed to remove dev from genpd %d\n", ret);
|
||||
|
||||
/* Continue to remove */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -181,11 +192,10 @@ static struct device *get_mfd_cell_dev(const char *device_name, int r)
|
|||
*/
|
||||
static int acp_hw_init(void *handle)
|
||||
{
|
||||
int r, i;
|
||||
int r;
|
||||
uint64_t acp_base;
|
||||
u32 val = 0;
|
||||
u32 count = 0;
|
||||
struct device *dev;
|
||||
struct i2s_platform_data *i2s_pdata = NULL;
|
||||
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
@ -341,15 +351,10 @@ static int acp_hw_init(void *handle)
|
|||
if (r)
|
||||
goto failure;
|
||||
|
||||
for (i = 0; i < ACP_DEVS ; i++) {
|
||||
dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
|
||||
r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
|
||||
if (r) {
|
||||
dev_err(dev, "Failed to add dev to genpd\n");
|
||||
goto failure;
|
||||
}
|
||||
}
|
||||
|
||||
r = device_for_each_child(adev->acp.parent, &adev->acp.acp_genpd->gpd,
|
||||
acp_genpd_add_device);
|
||||
if (r)
|
||||
goto failure;
|
||||
|
||||
/* Assert Soft reset of ACP */
|
||||
val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
|
||||
|
@ -410,10 +415,8 @@ failure:
|
|||
*/
|
||||
static int acp_hw_fini(void *handle)
|
||||
{
|
||||
int i, ret;
|
||||
u32 val = 0;
|
||||
u32 count = 0;
|
||||
struct device *dev;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/* return early if no ACP */
|
||||
|
@ -458,13 +461,8 @@ static int acp_hw_fini(void *handle)
|
|||
udelay(100);
|
||||
}
|
||||
|
||||
for (i = 0; i < ACP_DEVS ; i++) {
|
||||
dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
|
||||
ret = pm_genpd_remove_device(dev);
|
||||
/* If removal fails, dont giveup and try rest */
|
||||
if (ret)
|
||||
dev_err(dev, "remove dev from genpd failed\n");
|
||||
}
|
||||
device_for_each_child(adev->acp.parent, NULL,
|
||||
acp_genpd_remove_device);
|
||||
|
||||
mfd_remove_devices(adev->acp.parent);
|
||||
kfree(adev->acp.acp_res);
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/power_supply.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <acpi/video.h>
|
||||
#include <acpi/actbl.h>
|
||||
|
||||
|
@ -853,8 +854,8 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
|
|||
if (amdgpu_device_has_dc_support(adev)) {
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
struct amdgpu_display_manager *dm = &adev->dm;
|
||||
if (dm->backlight_dev)
|
||||
atif->bd = dm->backlight_dev;
|
||||
if (dm->backlight_dev[0])
|
||||
atif->bd = dm->backlight_dev[0];
|
||||
#endif
|
||||
} else {
|
||||
struct drm_encoder *tmp;
|
||||
|
@ -1031,18 +1032,18 @@ void amdgpu_acpi_detect(void)
|
|||
}
|
||||
|
||||
/**
|
||||
* amdgpu_acpi_is_s0ix_supported
|
||||
* amdgpu_acpi_is_s0ix_active
|
||||
*
|
||||
* @adev: amdgpu_device_pointer
|
||||
*
|
||||
* returns true if supported, false if not.
|
||||
*/
|
||||
bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev)
|
||||
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
|
||||
{
|
||||
#if defined(CONFIG_AMD_PMC) || defined(CONFIG_AMD_PMC_MODULE)
|
||||
if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
return true;
|
||||
return pm_suspend_target_state == PM_SUSPEND_TO_IDLE;
|
||||
}
|
||||
#endif
|
||||
return false;
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
*/
|
||||
|
||||
#include "amdgpu_amdkfd.h"
|
||||
#include "amd_pcie.h"
|
||||
#include "amd_shared.h"
|
||||
|
||||
#include "amdgpu.h"
|
||||
|
@ -553,6 +554,88 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s
|
|||
return (uint8_t)ret;
|
||||
}
|
||||
|
||||
int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct kgd_dev *dst, struct kgd_dev *src, bool is_min)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)dst, *peer_adev;
|
||||
int num_links;
|
||||
|
||||
if (adev->asic_type != CHIP_ALDEBARAN)
|
||||
return 0;
|
||||
|
||||
if (src)
|
||||
peer_adev = (struct amdgpu_device *)src;
|
||||
|
||||
/* num links returns 0 for indirect peers since indirect route is unknown. */
|
||||
num_links = is_min ? 1 : amdgpu_xgmi_get_num_links(adev, peer_adev);
|
||||
if (num_links < 0) {
|
||||
DRM_ERROR("amdgpu: failed to get xgmi num links between node %d and %d. ret = %d\n",
|
||||
adev->gmc.xgmi.physical_node_id,
|
||||
peer_adev->gmc.xgmi.physical_node_id, num_links);
|
||||
num_links = 0;
|
||||
}
|
||||
|
||||
/* Aldebaran xGMI DPM is defeatured so assume x16 x 25Gbps for bandwidth. */
|
||||
return (num_links * 16 * 25000)/BITS_PER_BYTE;
|
||||
}
|
||||
|
||||
int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct kgd_dev *dev, bool is_min)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)dev;
|
||||
int num_lanes_shift = (is_min ? ffs(adev->pm.pcie_mlw_mask) :
|
||||
fls(adev->pm.pcie_mlw_mask)) - 1;
|
||||
int gen_speed_shift = (is_min ? ffs(adev->pm.pcie_gen_mask &
|
||||
CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) :
|
||||
fls(adev->pm.pcie_gen_mask &
|
||||
CAIL_PCIE_LINK_SPEED_SUPPORT_MASK)) - 1;
|
||||
uint32_t num_lanes_mask = 1 << num_lanes_shift;
|
||||
uint32_t gen_speed_mask = 1 << gen_speed_shift;
|
||||
int num_lanes_factor = 0, gen_speed_mbits_factor = 0;
|
||||
|
||||
switch (num_lanes_mask) {
|
||||
case CAIL_PCIE_LINK_WIDTH_SUPPORT_X1:
|
||||
num_lanes_factor = 1;
|
||||
break;
|
||||
case CAIL_PCIE_LINK_WIDTH_SUPPORT_X2:
|
||||
num_lanes_factor = 2;
|
||||
break;
|
||||
case CAIL_PCIE_LINK_WIDTH_SUPPORT_X4:
|
||||
num_lanes_factor = 4;
|
||||
break;
|
||||
case CAIL_PCIE_LINK_WIDTH_SUPPORT_X8:
|
||||
num_lanes_factor = 8;
|
||||
break;
|
||||
case CAIL_PCIE_LINK_WIDTH_SUPPORT_X12:
|
||||
num_lanes_factor = 12;
|
||||
break;
|
||||
case CAIL_PCIE_LINK_WIDTH_SUPPORT_X16:
|
||||
num_lanes_factor = 16;
|
||||
break;
|
||||
case CAIL_PCIE_LINK_WIDTH_SUPPORT_X32:
|
||||
num_lanes_factor = 32;
|
||||
break;
|
||||
}
|
||||
|
||||
switch (gen_speed_mask) {
|
||||
case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1:
|
||||
gen_speed_mbits_factor = 2500;
|
||||
break;
|
||||
case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2:
|
||||
gen_speed_mbits_factor = 5000;
|
||||
break;
|
||||
case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3:
|
||||
gen_speed_mbits_factor = 8000;
|
||||
break;
|
||||
case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4:
|
||||
gen_speed_mbits_factor = 16000;
|
||||
break;
|
||||
case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5:
|
||||
gen_speed_mbits_factor = 32000;
|
||||
break;
|
||||
}
|
||||
|
||||
return (num_lanes_factor * gen_speed_mbits_factor)/BITS_PER_BYTE;
|
||||
}
|
||||
|
||||
uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
|
||||
|
|
|
@ -226,6 +226,8 @@ uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd);
|
|||
uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd);
|
||||
int amdgpu_amdkfd_get_noretry(struct kgd_dev *kgd);
|
||||
uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src);
|
||||
int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct kgd_dev *dst, struct kgd_dev *src, bool is_min);
|
||||
int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct kgd_dev *dev, bool is_min);
|
||||
|
||||
/* Read user wptr from a specified user address space with page fault
|
||||
* disabled. The memory must be pinned and mapped to the hardware when
|
||||
|
@ -330,7 +332,7 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd);
|
|||
int kgd2kfd_post_reset(struct kfd_dev *kfd);
|
||||
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
|
||||
void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
|
||||
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask);
|
||||
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask);
|
||||
#else
|
||||
static inline int kgd2kfd_init(void)
|
||||
{
|
||||
|
@ -389,7 +391,7 @@ void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
|
|||
}
|
||||
|
||||
static inline
|
||||
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask)
|
||||
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -1285,11 +1285,22 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
|
|||
if (avm->process_info)
|
||||
return -EINVAL;
|
||||
|
||||
/* Free the original amdgpu allocated pasid,
|
||||
* will be replaced with kfd allocated pasid.
|
||||
*/
|
||||
if (avm->pasid) {
|
||||
amdgpu_pasid_free(avm->pasid);
|
||||
amdgpu_vm_set_pasid(adev, avm, 0);
|
||||
}
|
||||
|
||||
/* Convert VM into a compute VM */
|
||||
ret = amdgpu_vm_make_compute(adev, avm, pasid);
|
||||
ret = amdgpu_vm_make_compute(adev, avm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = amdgpu_vm_set_pasid(adev, avm, pasid);
|
||||
if (ret)
|
||||
return ret;
|
||||
/* Initialize KFD part of the VM and process info */
|
||||
ret = init_kfd_vm(avm, process_info, ef);
|
||||
if (ret)
|
||||
|
|
|
@ -116,6 +116,7 @@ const char *amdgpu_asic_name[] = {
|
|||
"RENOIR",
|
||||
"ALDEBARAN",
|
||||
"NAVI10",
|
||||
"CYAN_SKILLFISH",
|
||||
"NAVI14",
|
||||
"NAVI12",
|
||||
"SIENNA_CICHLID",
|
||||
|
@ -287,7 +288,7 @@ bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
|
|||
*/
|
||||
|
||||
/**
|
||||
* amdgpu_device_vram_access - read/write a buffer in vram
|
||||
* amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @pos: offset of the buffer in vram
|
||||
|
@ -295,22 +296,65 @@ bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
|
|||
* @size: read/write size, sizeof(@buf) must > @size
|
||||
* @write: true - write to vram, otherwise - read from vram
|
||||
*/
|
||||
void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
|
||||
uint32_t *buf, size_t size, bool write)
|
||||
void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
|
||||
void *buf, size_t size, bool write)
|
||||
{
|
||||
unsigned long flags;
|
||||
uint32_t hi = ~0;
|
||||
uint32_t hi = ~0, tmp = 0;
|
||||
uint32_t *data = buf;
|
||||
uint64_t last;
|
||||
int idx;
|
||||
|
||||
if (!drm_dev_enter(&adev->ddev, &idx))
|
||||
return;
|
||||
|
||||
BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
|
||||
|
||||
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
|
||||
for (last = pos + size; pos < last; pos += 4) {
|
||||
tmp = pos >> 31;
|
||||
|
||||
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
|
||||
if (tmp != hi) {
|
||||
WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
|
||||
hi = tmp;
|
||||
}
|
||||
if (write)
|
||||
WREG32_NO_KIQ(mmMM_DATA, *data++);
|
||||
else
|
||||
*data++ = RREG32_NO_KIQ(mmMM_DATA);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
|
||||
drm_dev_exit(idx);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_device_vram_access - access vram by vram aperature
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @pos: offset of the buffer in vram
|
||||
* @buf: virtual address of the buffer in system memory
|
||||
* @size: read/write size, sizeof(@buf) must > @size
|
||||
* @write: true - write to vram, otherwise - read from vram
|
||||
*
|
||||
* The return value means how many bytes have been transferred.
|
||||
*/
|
||||
size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
|
||||
void *buf, size_t size, bool write)
|
||||
{
|
||||
#ifdef CONFIG_64BIT
|
||||
void __iomem *addr;
|
||||
size_t count = 0;
|
||||
uint64_t last;
|
||||
|
||||
if (!adev->mman.aper_base_kaddr)
|
||||
return 0;
|
||||
|
||||
last = min(pos + size, adev->gmc.visible_vram_size);
|
||||
if (last > pos) {
|
||||
void __iomem *addr = adev->mman.aper_base_kaddr + pos;
|
||||
size_t count = last - pos;
|
||||
addr = adev->mman.aper_base_kaddr + pos;
|
||||
count = last - pos;
|
||||
|
||||
if (write) {
|
||||
memcpy_toio(addr, buf, count);
|
||||
|
@ -322,35 +366,37 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
|
|||
memcpy_fromio(buf, addr, count);
|
||||
}
|
||||
|
||||
if (count == size)
|
||||
goto exit;
|
||||
}
|
||||
|
||||
return count;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_device_vram_access - read/write a buffer in vram
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @pos: offset of the buffer in vram
|
||||
* @buf: virtual address of the buffer in system memory
|
||||
* @size: read/write size, sizeof(@buf) must > @size
|
||||
* @write: true - write to vram, otherwise - read from vram
|
||||
*/
|
||||
void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
|
||||
void *buf, size_t size, bool write)
|
||||
{
|
||||
size_t count;
|
||||
|
||||
/* try to using vram apreature to access vram first */
|
||||
count = amdgpu_device_aper_access(adev, pos, buf, size, write);
|
||||
size -= count;
|
||||
if (size) {
|
||||
/* using MM to access rest vram */
|
||||
pos += count;
|
||||
buf += count / 4;
|
||||
size -= count;
|
||||
buf += count;
|
||||
amdgpu_device_mm_access(adev, pos, buf, size, write);
|
||||
}
|
||||
#endif
|
||||
|
||||
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
|
||||
for (last = pos + size; pos < last; pos += 4) {
|
||||
uint32_t tmp = pos >> 31;
|
||||
|
||||
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
|
||||
if (tmp != hi) {
|
||||
WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
|
||||
hi = tmp;
|
||||
}
|
||||
if (write)
|
||||
WREG32_NO_KIQ(mmMM_DATA, *buf++);
|
||||
else
|
||||
*buf++ = RREG32_NO_KIQ(mmMM_DATA);
|
||||
}
|
||||
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
exit:
|
||||
#endif
|
||||
drm_dev_exit(idx);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -518,7 +564,7 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
|
|||
adev->gfx.rlc.funcs &&
|
||||
adev->gfx.rlc.funcs->is_rlcg_access_range) {
|
||||
if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
|
||||
return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v, 0, 0);
|
||||
return adev->gfx.rlc.funcs->sriov_wreg(adev, reg, v, 0, 0);
|
||||
} else {
|
||||
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
|
||||
}
|
||||
|
@ -1395,6 +1441,10 @@ static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
|
|||
break;
|
||||
case CHIP_YELLOW_CARP:
|
||||
break;
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
if (adev->pdev->device == 0x13FE)
|
||||
adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -2101,6 +2151,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
|
|||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_YELLOW_CARP:
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
if (adev->asic_type == CHIP_VANGOGH)
|
||||
adev->family = AMDGPU_FAMILY_VGH;
|
||||
else if (adev->asic_type == CHIP_YELLOW_CARP)
|
||||
|
@ -3505,13 +3556,13 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||
r = amdgpu_device_get_job_timeout_settings(adev);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
|
||||
goto failed_unmap;
|
||||
return r;
|
||||
}
|
||||
|
||||
/* early init functions */
|
||||
r = amdgpu_device_ip_early_init(adev);
|
||||
if (r)
|
||||
goto failed_unmap;
|
||||
return r;
|
||||
|
||||
/* doorbell bar mapping and doorbell index init*/
|
||||
amdgpu_device_doorbell_init(adev);
|
||||
|
@ -3625,6 +3676,8 @@ fence_driver_init:
|
|||
goto release_ras_con;
|
||||
}
|
||||
|
||||
amdgpu_fence_driver_hw_init(adev);
|
||||
|
||||
dev_info(adev->dev,
|
||||
"SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
|
||||
adev->gfx.config.max_shader_engines,
|
||||
|
@ -3737,10 +3790,6 @@ release_ras_con:
|
|||
failed:
|
||||
amdgpu_vf_error_trans_all(adev);
|
||||
|
||||
failed_unmap:
|
||||
iounmap(adev->rmmio);
|
||||
adev->rmmio = NULL;
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -3796,7 +3845,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
|
|||
else
|
||||
drm_atomic_helper_shutdown(adev_to_drm(adev));
|
||||
}
|
||||
amdgpu_fence_driver_fini_hw(adev);
|
||||
amdgpu_fence_driver_hw_fini(adev);
|
||||
|
||||
if (adev->pm_sysfs_en)
|
||||
amdgpu_pm_sysfs_fini(adev);
|
||||
|
@ -3818,7 +3867,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
|
|||
void amdgpu_device_fini_sw(struct amdgpu_device *adev)
|
||||
{
|
||||
amdgpu_device_ip_fini(adev);
|
||||
amdgpu_fence_driver_fini_sw(adev);
|
||||
amdgpu_fence_driver_sw_fini(adev);
|
||||
release_firmware(adev->firmware.gpu_info_fw);
|
||||
adev->firmware.gpu_info_fw = NULL;
|
||||
adev->accel_working = false;
|
||||
|
@ -3893,7 +3942,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
|
|||
/* evict vram memory */
|
||||
amdgpu_bo_evict_vram(adev);
|
||||
|
||||
amdgpu_fence_driver_suspend(adev);
|
||||
amdgpu_fence_driver_hw_fini(adev);
|
||||
|
||||
amdgpu_device_ip_suspend_phase2(adev);
|
||||
/* evict remaining vram memory
|
||||
|
@ -3938,7 +3987,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
|
|||
dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
amdgpu_fence_driver_resume(adev);
|
||||
amdgpu_fence_driver_hw_init(adev);
|
||||
|
||||
|
||||
r = amdgpu_device_ip_late_init(adev);
|
||||
|
@ -4428,7 +4477,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
|
|||
amdgpu_fence_driver_force_completion(ring);
|
||||
}
|
||||
|
||||
if(job)
|
||||
if (job && job->vm)
|
||||
drm_sched_increase_karma(&job->base);
|
||||
|
||||
r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
|
||||
|
@ -4892,7 +4941,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
|||
DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
|
||||
job ? job->base.id : -1, hive->hive_id);
|
||||
amdgpu_put_xgmi_hive(hive);
|
||||
if (job)
|
||||
if (job && job->vm)
|
||||
drm_sched_increase_karma(&job->base);
|
||||
return 0;
|
||||
}
|
||||
|
@ -4916,7 +4965,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
|||
job ? job->base.id : -1);
|
||||
|
||||
/* even we skipped this reset, still need to set the job to guilty */
|
||||
if (job)
|
||||
if (job && job->vm)
|
||||
drm_sched_increase_karma(&job->base);
|
||||
goto skip_recovery;
|
||||
}
|
||||
|
@ -5283,6 +5332,10 @@ int amdgpu_device_baco_exit(struct drm_device *dev)
|
|||
adev->nbio.funcs->enable_doorbell_interrupt)
|
||||
adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
|
||||
|
||||
if (amdgpu_passthrough(adev) &&
|
||||
adev->nbio.funcs->clear_doorbell_interrupt)
|
||||
adev->nbio.funcs->clear_doorbell_interrupt(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -870,11 +870,10 @@ MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legac
|
|||
module_param_named(reset_method, amdgpu_reset_method, int, 0444);
|
||||
|
||||
/**
|
||||
* DOC: bad_page_threshold (int)
|
||||
* Bad page threshold is to specify the threshold value of faulty pages
|
||||
* detected by RAS ECC, that may result in GPU entering bad status if total
|
||||
* faulty pages by ECC exceed threshold value and leave it for user's further
|
||||
* check.
|
||||
* DOC: bad_page_threshold (int) Bad page threshold is specifies the
|
||||
* threshold value of faulty pages detected by RAS ECC, which may
|
||||
* result in the GPU entering bad status when the number of total
|
||||
* faulty pages by ECC exceeds the threshold value.
|
||||
*/
|
||||
MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = auto(default value), 0 = disable bad page retirement)");
|
||||
module_param_named(bad_page_threshold, amdgpu_bad_page_threshold, int, 0444);
|
||||
|
@ -1213,6 +1212,9 @@ static const struct pci_device_id pciidlist[] = {
|
|||
{0x1002, 0x740F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
|
||||
{0x1002, 0x7410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
|
||||
|
||||
/* CYAN_SKILLFISH */
|
||||
{0x1002, 0x13FE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
|
||||
|
||||
{0, 0, 0}
|
||||
};
|
||||
|
||||
|
@ -1467,7 +1469,7 @@ static int amdgpu_pmops_suspend(struct device *dev)
|
|||
struct amdgpu_device *adev = drm_to_adev(drm_dev);
|
||||
int r;
|
||||
|
||||
if (amdgpu_acpi_is_s0ix_supported(adev))
|
||||
if (amdgpu_acpi_is_s0ix_active(adev))
|
||||
adev->in_s0ix = true;
|
||||
adev->in_s3 = true;
|
||||
r = amdgpu_device_suspend(drm_dev, true);
|
||||
|
@ -1483,7 +1485,7 @@ static int amdgpu_pmops_resume(struct device *dev)
|
|||
int r;
|
||||
|
||||
r = amdgpu_device_resume(drm_dev, true);
|
||||
if (amdgpu_acpi_is_s0ix_supported(adev))
|
||||
if (amdgpu_acpi_is_s0ix_active(adev))
|
||||
adev->in_s0ix = false;
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,239 @@
|
|||
/*
|
||||
* Copyright 2021 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "amdgpu_eeprom.h"
|
||||
#include "amdgpu.h"
|
||||
|
||||
/* AT24CM02 and M24M02-R have a 256-byte write page size.
|
||||
*/
|
||||
#define EEPROM_PAGE_BITS 8
|
||||
#define EEPROM_PAGE_SIZE (1U << EEPROM_PAGE_BITS)
|
||||
#define EEPROM_PAGE_MASK (EEPROM_PAGE_SIZE - 1)
|
||||
|
||||
#define EEPROM_OFFSET_SIZE 2
|
||||
|
||||
/* EEPROM memory addresses are 19-bits long, which can
|
||||
* be partitioned into 3, 8, 8 bits, for a total of 19.
|
||||
* The upper 3 bits are sent as part of the 7-bit
|
||||
* "Device Type Identifier"--an I2C concept, which for EEPROM devices
|
||||
* is hard-coded as 1010b, indicating that it is an EEPROM
|
||||
* device--this is the wire format, followed by the upper
|
||||
* 3 bits of the 19-bit address, followed by the direction,
|
||||
* followed by two bytes holding the rest of the 16-bits of
|
||||
* the EEPROM memory address. The format on the wire for EEPROM
|
||||
* devices is: 1010XYZD, A15:A8, A7:A0,
|
||||
* Where D is the direction and sequenced out by the hardware.
|
||||
* Bits XYZ are memory address bits 18, 17 and 16.
|
||||
* These bits are compared to how pins 1-3 of the part are connected,
|
||||
* depending on the size of the part, more on that later.
|
||||
*
|
||||
* Note that of this wire format, a client is in control
|
||||
* of, and needs to specify only XYZ, A15:A8, A7:0, bits,
|
||||
* which is exactly the EEPROM memory address, or offset,
|
||||
* in order to address up to 8 EEPROM devices on the I2C bus.
|
||||
*
|
||||
* For instance, a 2-Mbit I2C EEPROM part, addresses all its bytes,
|
||||
* using an 18-bit address, bit 17 to 0 and thus would use all but one bit of
|
||||
* the 19 bits previously mentioned. The designer would then not connect
|
||||
* pins 1 and 2, and pin 3 usually named "A_2" or "E2", would be connected to
|
||||
* either Vcc or GND. This would allow for up to two 2-Mbit parts on
|
||||
* the same bus, where one would be addressable with bit 18 as 1, and
|
||||
* the other with bit 18 of the address as 0.
|
||||
*
|
||||
* For a 2-Mbit part, bit 18 is usually known as the "Chip Enable" or
|
||||
* "Hardware Address Bit". This bit is compared to the load on pin 3
|
||||
* of the device, described above, and if there is a match, then this
|
||||
* device responds to the command. This way, you can connect two
|
||||
* 2-Mbit EEPROM devices on the same bus, but see one contiguous
|
||||
* memory from 0 to 7FFFFh, where address 0 to 3FFFF is in the device
|
||||
* whose pin 3 is connected to GND, and address 40000 to 7FFFFh is in
|
||||
* the 2nd device, whose pin 3 is connected to Vcc.
|
||||
*
|
||||
* This addressing you encode in the 32-bit "eeprom_addr" below,
|
||||
* namely the 19-bits "XYZ,A15:A0", as a single 19-bit address. For
|
||||
* instance, eeprom_addr = 0x6DA01, is 110_1101_1010_0000_0001, where
|
||||
* XYZ=110b, and A15:A0=DA01h. The XYZ bits become part of the device
|
||||
* address, and the rest of the address bits are sent as the memory
|
||||
* address bytes.
|
||||
*
|
||||
* That is, for an I2C EEPROM driver everything is controlled by
|
||||
* the "eeprom_addr".
|
||||
*
|
||||
* P.S. If you need to write, lock and read the Identification Page,
|
||||
* (M24M02-DR device only, which we do not use), change the "7" to
|
||||
* "0xF" in the macro below, and let the client set bit 20 to 1 in
|
||||
* "eeprom_addr", and set A10 to 0 to write into it, and A10 and A1 to
|
||||
* 1 to lock it permanently.
|
||||
*/
|
||||
#define MAKE_I2C_ADDR(_aa) ((0xA << 3) | (((_aa) >> 16) & 7))
|
||||
|
||||
static int __amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
|
||||
u8 *eeprom_buf, u16 buf_size, bool read)
|
||||
{
|
||||
u8 eeprom_offset_buf[EEPROM_OFFSET_SIZE];
|
||||
struct i2c_msg msgs[] = {
|
||||
{
|
||||
.flags = 0,
|
||||
.len = EEPROM_OFFSET_SIZE,
|
||||
.buf = eeprom_offset_buf,
|
||||
},
|
||||
{
|
||||
.flags = read ? I2C_M_RD : 0,
|
||||
},
|
||||
};
|
||||
const u8 *p = eeprom_buf;
|
||||
int r;
|
||||
u16 len;
|
||||
|
||||
for (r = 0; buf_size > 0;
|
||||
buf_size -= len, eeprom_addr += len, eeprom_buf += len) {
|
||||
/* Set the EEPROM address we want to write to/read from.
|
||||
*/
|
||||
msgs[0].addr = MAKE_I2C_ADDR(eeprom_addr);
|
||||
msgs[1].addr = msgs[0].addr;
|
||||
msgs[0].buf[0] = (eeprom_addr >> 8) & 0xff;
|
||||
msgs[0].buf[1] = eeprom_addr & 0xff;
|
||||
|
||||
if (!read) {
|
||||
/* Write the maximum amount of data, without
|
||||
* crossing the device's page boundary, as per
|
||||
* its spec. Partial page writes are allowed,
|
||||
* starting at any location within the page,
|
||||
* so long as the page boundary isn't crossed
|
||||
* over (actually the page pointer rolls
|
||||
* over).
|
||||
*
|
||||
* As per the AT24CM02 EEPROM spec, after
|
||||
* writing into a page, the I2C driver should
|
||||
* terminate the transfer, i.e. in
|
||||
* "i2c_transfer()" below, with a STOP
|
||||
* condition, so that the self-timed write
|
||||
* cycle begins. This is implied for the
|
||||
* "i2c_transfer()" abstraction.
|
||||
*/
|
||||
len = min(EEPROM_PAGE_SIZE - (eeprom_addr &
|
||||
EEPROM_PAGE_MASK),
|
||||
(u32)buf_size);
|
||||
} else {
|
||||
/* Reading from the EEPROM has no limitation
|
||||
* on the number of bytes read from the EEPROM
|
||||
* device--they are simply sequenced out.
|
||||
*/
|
||||
len = buf_size;
|
||||
}
|
||||
msgs[1].len = len;
|
||||
msgs[1].buf = eeprom_buf;
|
||||
|
||||
/* This constitutes a START-STOP transaction.
|
||||
*/
|
||||
r = i2c_transfer(i2c_adap, msgs, ARRAY_SIZE(msgs));
|
||||
if (r != ARRAY_SIZE(msgs))
|
||||
break;
|
||||
|
||||
if (!read) {
|
||||
/* According to EEPROM specs the length of the
|
||||
* self-writing cycle, tWR (tW), is 10 ms.
|
||||
*
|
||||
* TODO: Use polling on ACK, aka Acknowledge
|
||||
* Polling, to minimize waiting for the
|
||||
* internal write cycle to complete, as it is
|
||||
* usually smaller than tWR (tW).
|
||||
*/
|
||||
msleep(10);
|
||||
}
|
||||
}
|
||||
|
||||
return r < 0 ? r : eeprom_buf - p;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_eeprom_xfer -- Read/write from/to an I2C EEPROM device
|
||||
* @i2c_adap: pointer to the I2C adapter to use
|
||||
* @eeprom_addr: EEPROM address from which to read/write
|
||||
* @eeprom_buf: pointer to data buffer to read into/write from
|
||||
* @buf_size: the size of @eeprom_buf
|
||||
* @read: True if reading from the EEPROM, false if writing
|
||||
*
|
||||
* Returns the number of bytes read/written; -errno on error.
|
||||
*/
|
||||
static int amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
|
||||
u8 *eeprom_buf, u16 buf_size, bool read)
|
||||
{
|
||||
const struct i2c_adapter_quirks *quirks = i2c_adap->quirks;
|
||||
u16 limit;
|
||||
|
||||
if (!quirks)
|
||||
limit = 0;
|
||||
else if (read)
|
||||
limit = quirks->max_read_len;
|
||||
else
|
||||
limit = quirks->max_write_len;
|
||||
|
||||
if (limit == 0) {
|
||||
return __amdgpu_eeprom_xfer(i2c_adap, eeprom_addr,
|
||||
eeprom_buf, buf_size, read);
|
||||
} else if (limit <= EEPROM_OFFSET_SIZE) {
|
||||
dev_err_ratelimited(&i2c_adap->dev,
|
||||
"maddr:0x%04X size:0x%02X:quirk max_%s_len must be > %d",
|
||||
eeprom_addr, buf_size,
|
||||
read ? "read" : "write", EEPROM_OFFSET_SIZE);
|
||||
return -EINVAL;
|
||||
} else {
|
||||
u16 ps; /* Partial size */
|
||||
int res = 0, r;
|
||||
|
||||
/* The "limit" includes all data bytes sent/received,
|
||||
* which would include the EEPROM_OFFSET_SIZE bytes.
|
||||
* Account for them here.
|
||||
*/
|
||||
limit -= EEPROM_OFFSET_SIZE;
|
||||
for ( ; buf_size > 0;
|
||||
buf_size -= ps, eeprom_addr += ps, eeprom_buf += ps) {
|
||||
ps = min(limit, buf_size);
|
||||
|
||||
r = __amdgpu_eeprom_xfer(i2c_adap, eeprom_addr,
|
||||
eeprom_buf, ps, read);
|
||||
if (r < 0)
|
||||
return r;
|
||||
res += r;
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap,
|
||||
u32 eeprom_addr, u8 *eeprom_buf,
|
||||
u16 bytes)
|
||||
{
|
||||
return amdgpu_eeprom_xfer(i2c_adap, eeprom_addr, eeprom_buf, bytes,
|
||||
true);
|
||||
}
|
||||
|
||||
int amdgpu_eeprom_write(struct i2c_adapter *i2c_adap,
|
||||
u32 eeprom_addr, u8 *eeprom_buf,
|
||||
u16 bytes)
|
||||
{
|
||||
return amdgpu_eeprom_xfer(i2c_adap, eeprom_addr, eeprom_buf, bytes,
|
||||
false);
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
* Copyright 2021 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _AMDGPU_EEPROM_H
|
||||
#define _AMDGPU_EEPROM_H
|
||||
|
||||
#include <linux/i2c.h>
|
||||
|
||||
int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap,
|
||||
u32 eeprom_addr, u8 *eeprom_buf,
|
||||
u16 bytes);
|
||||
|
||||
int amdgpu_eeprom_write(struct i2c_adapter *i2c_adap,
|
||||
u32 eeprom_addr, u8 *eeprom_buf,
|
||||
u16 bytes);
|
||||
|
||||
#endif
|
|
@ -417,9 +417,6 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
|
|||
}
|
||||
amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
|
||||
|
||||
if (irq_src)
|
||||
amdgpu_irq_get(adev, irq_src, irq_type);
|
||||
|
||||
ring->fence_drv.irq_src = irq_src;
|
||||
ring->fence_drv.irq_type = irq_type;
|
||||
ring->fence_drv.initialized = true;
|
||||
|
@ -525,7 +522,7 @@ int amdgpu_fence_driver_init(struct amdgpu_device *adev)
|
|||
*
|
||||
* Tear down the fence driver for all possible rings (all asics).
|
||||
*/
|
||||
void amdgpu_fence_driver_fini_hw(struct amdgpu_device *adev)
|
||||
void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, r;
|
||||
|
||||
|
@ -553,7 +550,7 @@ void amdgpu_fence_driver_fini_hw(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
|
||||
void amdgpu_fence_driver_fini_sw(struct amdgpu_device *adev)
|
||||
void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned int i, j;
|
||||
|
||||
|
@ -572,49 +569,18 @@ void amdgpu_fence_driver_fini_sw(struct amdgpu_device *adev)
|
|||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_driver_suspend - suspend the fence driver
|
||||
* amdgpu_fence_driver_hw_init - enable the fence driver
|
||||
* for all possible rings.
|
||||
*
|
||||
* @adev: amdgpu device pointer
|
||||
*
|
||||
* Suspend the fence driver for all possible rings (all asics).
|
||||
*/
|
||||
void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, r;
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
|
||||
struct amdgpu_ring *ring = adev->rings[i];
|
||||
if (!ring || !ring->fence_drv.initialized)
|
||||
continue;
|
||||
|
||||
/* wait for gpu to finish processing current batch */
|
||||
r = amdgpu_fence_wait_empty(ring);
|
||||
if (r) {
|
||||
/* delay GPU reset to resume */
|
||||
amdgpu_fence_driver_force_completion(ring);
|
||||
}
|
||||
|
||||
/* disable the interrupt */
|
||||
if (ring->fence_drv.irq_src)
|
||||
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
|
||||
ring->fence_drv.irq_type);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_driver_resume - resume the fence driver
|
||||
* for all possible rings.
|
||||
*
|
||||
* @adev: amdgpu device pointer
|
||||
*
|
||||
* Resume the fence driver for all possible rings (all asics).
|
||||
* Enable the fence driver for all possible rings (all asics).
|
||||
* Not all asics have all rings, so each asic will only
|
||||
* start the fence driver on the rings it has using
|
||||
* amdgpu_fence_driver_start_ring().
|
||||
* Returns 0 for success.
|
||||
*/
|
||||
void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
|
||||
void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
|
|
@ -27,10 +27,10 @@
|
|||
#include "smu_v11_0_i2c.h"
|
||||
#include "atom.h"
|
||||
#include "amdgpu_fru_eeprom.h"
|
||||
#include "amdgpu_eeprom.h"
|
||||
|
||||
#define I2C_PRODUCT_INFO_ADDR 0xAC
|
||||
#define I2C_PRODUCT_INFO_ADDR_SIZE 0x2
|
||||
#define I2C_PRODUCT_INFO_OFFSET 0xC0
|
||||
#define FRU_EEPROM_MADDR 0x60000
|
||||
#define I2C_PRODUCT_INFO_OFFSET 0xC0
|
||||
|
||||
static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
|
||||
{
|
||||
|
@ -62,19 +62,11 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
|
|||
}
|
||||
|
||||
static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
|
||||
unsigned char *buff)
|
||||
unsigned char *buff)
|
||||
{
|
||||
int ret, size;
|
||||
struct i2c_msg msg = {
|
||||
.addr = I2C_PRODUCT_INFO_ADDR,
|
||||
.flags = I2C_M_RD,
|
||||
.buf = buff,
|
||||
};
|
||||
buff[0] = 0;
|
||||
buff[1] = addrptr;
|
||||
msg.len = I2C_PRODUCT_INFO_ADDR_SIZE + 1;
|
||||
ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
|
||||
|
||||
ret = amdgpu_eeprom_read(&adev->pm.smu_i2c, addrptr, buff, 1);
|
||||
if (ret < 1) {
|
||||
DRM_WARN("FRU: Failed to get size field");
|
||||
return ret;
|
||||
|
@ -83,13 +75,9 @@ static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
|
|||
/* The size returned by the i2c requires subtraction of 0xC0 since the
|
||||
* size apparently always reports as 0xC0+actual size.
|
||||
*/
|
||||
size = buff[2] - I2C_PRODUCT_INFO_OFFSET;
|
||||
/* Add 1 since address field was 1 byte */
|
||||
buff[1] = addrptr + 1;
|
||||
|
||||
msg.len = I2C_PRODUCT_INFO_ADDR_SIZE + size;
|
||||
ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
|
||||
size = buff[0] - I2C_PRODUCT_INFO_OFFSET;
|
||||
|
||||
ret = amdgpu_eeprom_read(&adev->pm.smu_i2c, addrptr + 1, buff, size);
|
||||
if (ret < 1) {
|
||||
DRM_WARN("FRU: Failed to get data field");
|
||||
return ret;
|
||||
|
@ -101,8 +89,8 @@ static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
|
|||
int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned char buff[34];
|
||||
int addrptr, size;
|
||||
int len;
|
||||
u32 addrptr;
|
||||
int size, len;
|
||||
|
||||
if (!is_fru_eeprom_supported(adev))
|
||||
return 0;
|
||||
|
@ -125,7 +113,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
|
|||
* Bytes 8-a are all 1-byte and refer to the size of the entire struct,
|
||||
* and the language field, so just start from 0xb, manufacturer size
|
||||
*/
|
||||
addrptr = 0xb;
|
||||
addrptr = FRU_EEPROM_MADDR + 0xb;
|
||||
size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
|
||||
if (size < 1) {
|
||||
DRM_ERROR("Failed to read FRU Manufacturer, ret:%d", size);
|
||||
|
|
|
@ -584,7 +584,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
|
|||
{
|
||||
int i, j, k;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
|
||||
amdgpu_restore_msix(adev);
|
||||
|
||||
for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
|
||||
|
|
|
@ -374,8 +374,8 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
|
|||
fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_SOS:
|
||||
fw_info->ver = adev->psp.sos_fw_version;
|
||||
fw_info->feature = adev->psp.sos_feature_version;
|
||||
fw_info->ver = adev->psp.sos.fw_version;
|
||||
fw_info->feature = adev->psp.sos.feature_version;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_ASD:
|
||||
fw_info->ver = adev->psp.asd_fw_version;
|
||||
|
@ -390,8 +390,8 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
|
|||
fw_info->feature = 0;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_TOC:
|
||||
fw_info->ver = adev->psp.toc_fw_version;
|
||||
fw_info->feature = adev->psp.toc_feature_version;
|
||||
fw_info->ver = adev->psp.toc.fw_version;
|
||||
fw_info->feature = adev->psp.toc.feature_version;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -1179,10 +1179,14 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
|
|||
pasid = 0;
|
||||
}
|
||||
|
||||
r = amdgpu_vm_init(adev, &fpriv->vm, pasid);
|
||||
r = amdgpu_vm_init(adev, &fpriv->vm);
|
||||
if (r)
|
||||
goto error_pasid;
|
||||
|
||||
r = amdgpu_vm_set_pasid(adev, &fpriv->vm, pasid);
|
||||
if (r)
|
||||
goto error_vm;
|
||||
|
||||
fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
|
||||
if (!fpriv->prt_va) {
|
||||
r = -ENOMEM;
|
||||
|
@ -1210,8 +1214,10 @@ error_vm:
|
|||
amdgpu_vm_fini(adev, &fpriv->vm);
|
||||
|
||||
error_pasid:
|
||||
if (pasid)
|
||||
if (pasid) {
|
||||
amdgpu_pasid_free(pasid);
|
||||
amdgpu_vm_set_pasid(adev, &fpriv->vm, 0);
|
||||
}
|
||||
|
||||
kfree(fpriv);
|
||||
|
||||
|
|
|
@ -95,6 +95,7 @@ struct amdgpu_nbio_funcs {
|
|||
void (*program_aspm)(struct amdgpu_device *adev);
|
||||
void (*apply_lc_spc_mode_wa)(struct amdgpu_device *adev);
|
||||
void (*apply_l1_link_width_reconfig_wa)(struct amdgpu_device *adev);
|
||||
void (*clear_doorbell_interrupt)(struct amdgpu_device *adev);
|
||||
};
|
||||
|
||||
struct amdgpu_nbio {
|
||||
|
|
|
@ -731,7 +731,7 @@ retry:
|
|||
/**
|
||||
* amdgpu_bo_add_to_shadow_list - add a BO to the shadow list
|
||||
*
|
||||
* @bo: BO that will be inserted into the shadow list
|
||||
* @vmbo: BO that will be inserted into the shadow list
|
||||
*
|
||||
* Insert a BO to the shadow list.
|
||||
*/
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
*/
|
||||
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <drm/drm_drv.h>
|
||||
|
||||
#include "amdgpu.h"
|
||||
|
@ -34,6 +33,7 @@
|
|||
#include "psp_v3_1.h"
|
||||
#include "psp_v10_0.h"
|
||||
#include "psp_v11_0.h"
|
||||
#include "psp_v11_0_8.h"
|
||||
#include "psp_v12_0.h"
|
||||
#include "psp_v13_0.h"
|
||||
|
||||
|
@ -122,6 +122,12 @@ static int psp_early_init(void *handle)
|
|||
psp_v13_0_set_psp_funcs(psp);
|
||||
psp->autoload_supported = true;
|
||||
break;
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
|
||||
psp_v11_0_8_set_psp_funcs(psp);
|
||||
psp->autoload_supported = false;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -356,6 +362,44 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
|
|||
return -ETIME;
|
||||
}
|
||||
|
||||
static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
|
||||
{
|
||||
switch (cmd_id) {
|
||||
case GFX_CMD_ID_LOAD_TA:
|
||||
return "LOAD_TA";
|
||||
case GFX_CMD_ID_UNLOAD_TA:
|
||||
return "UNLOAD_TA";
|
||||
case GFX_CMD_ID_INVOKE_CMD:
|
||||
return "INVOKE_CMD";
|
||||
case GFX_CMD_ID_LOAD_ASD:
|
||||
return "LOAD_ASD";
|
||||
case GFX_CMD_ID_SETUP_TMR:
|
||||
return "SETUP_TMR";
|
||||
case GFX_CMD_ID_LOAD_IP_FW:
|
||||
return "LOAD_IP_FW";
|
||||
case GFX_CMD_ID_DESTROY_TMR:
|
||||
return "DESTROY_TMR";
|
||||
case GFX_CMD_ID_SAVE_RESTORE:
|
||||
return "SAVE_RESTORE_IP_FW";
|
||||
case GFX_CMD_ID_SETUP_VMR:
|
||||
return "SETUP_VMR";
|
||||
case GFX_CMD_ID_DESTROY_VMR:
|
||||
return "DESTROY_VMR";
|
||||
case GFX_CMD_ID_PROG_REG:
|
||||
return "PROG_REG";
|
||||
case GFX_CMD_ID_GET_FW_ATTESTATION:
|
||||
return "GET_FW_ATTESTATION";
|
||||
case GFX_CMD_ID_LOAD_TOC:
|
||||
return "ID_LOAD_TOC";
|
||||
case GFX_CMD_ID_AUTOLOAD_RLC:
|
||||
return "AUTOLOAD_RLC";
|
||||
case GFX_CMD_ID_BOOT_CFG:
|
||||
return "BOOT_CFG";
|
||||
default:
|
||||
return "UNKNOWN CMD";
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
psp_cmd_submit_buf(struct psp_context *psp,
|
||||
struct amdgpu_firmware_info *ucode,
|
||||
|
@ -417,10 +461,10 @@ psp_cmd_submit_buf(struct psp_context *psp,
|
|||
*/
|
||||
if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
|
||||
if (ucode)
|
||||
DRM_WARN("failed to load ucode id (%d) ",
|
||||
ucode->ucode_id);
|
||||
DRM_WARN("psp command (0x%X) failed and response status is (0x%X)\n",
|
||||
psp->cmd_buf_mem->cmd_id,
|
||||
DRM_WARN("failed to load ucode (%s) ",
|
||||
amdgpu_ucode_name(ucode->ucode_id));
|
||||
DRM_WARN("psp gfx command (%s) failed and response status is (0x%X)\n",
|
||||
psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id),
|
||||
psp->cmd_buf_mem->resp.status);
|
||||
if (!timeout) {
|
||||
ret = -EINVAL;
|
||||
|
@ -479,9 +523,9 @@ static int psp_load_toc(struct psp_context *psp,
|
|||
if (!cmd)
|
||||
return -ENOMEM;
|
||||
/* Copy toc to psp firmware private buffer */
|
||||
psp_copy_fw(psp, psp->toc_start_addr, psp->toc_bin_size);
|
||||
psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes);
|
||||
|
||||
psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc_bin_size);
|
||||
psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes);
|
||||
|
||||
ret = psp_cmd_submit_buf(psp, NULL, cmd,
|
||||
psp->fence_buf_mc_addr);
|
||||
|
@ -511,8 +555,8 @@ static int psp_tmr_init(struct psp_context *psp)
|
|||
/* For ASICs support RLC autoload, psp will parse the toc
|
||||
* and calculate the total size of TMR needed */
|
||||
if (!amdgpu_sriov_vf(psp->adev) &&
|
||||
psp->toc_start_addr &&
|
||||
psp->toc_bin_size &&
|
||||
psp->toc.start_addr &&
|
||||
psp->toc.size_bytes &&
|
||||
psp->fw_pri_buf) {
|
||||
ret = psp_load_toc(psp, &tmr_size);
|
||||
if (ret) {
|
||||
|
@ -691,18 +735,18 @@ static int psp_rl_load(struct amdgpu_device *adev)
|
|||
struct psp_context *psp = &adev->psp;
|
||||
struct psp_gfx_cmd_resp *cmd = psp->cmd;
|
||||
|
||||
if (psp->rl_bin_size == 0)
|
||||
if (!is_psp_fw_valid(psp->rl))
|
||||
return 0;
|
||||
|
||||
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
|
||||
memcpy(psp->fw_pri_buf, psp->rl_start_addr, psp->rl_bin_size);
|
||||
memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes);
|
||||
|
||||
memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
|
||||
|
||||
cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
|
||||
cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr);
|
||||
cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr);
|
||||
cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl_bin_size;
|
||||
cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes;
|
||||
cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST;
|
||||
|
||||
return psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
|
||||
|
@ -1041,6 +1085,12 @@ int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
|
||||
{
|
||||
return psp->adev->asic_type == CHIP_ALDEBARAN &&
|
||||
psp->ta_xgmi_ucode_version >= 0x2000000b;
|
||||
}
|
||||
|
||||
int psp_xgmi_get_topology_info(struct psp_context *psp,
|
||||
int number_devices,
|
||||
struct psp_xgmi_topology_info *topology)
|
||||
|
@ -1084,6 +1134,23 @@ int psp_xgmi_get_topology_info(struct psp_context *psp,
|
|||
topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine;
|
||||
}
|
||||
|
||||
/* Invoke xgmi ta again to get the link information */
|
||||
if (psp_xgmi_peer_link_info_supported(psp)) {
|
||||
struct ta_xgmi_cmd_get_peer_link_info_output *link_info_output;
|
||||
|
||||
xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS;
|
||||
|
||||
ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_PEER_LINKS);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info;
|
||||
for (i = 0; i < topology->num_nodes; i++)
|
||||
topology->nodes[i].num_links =
|
||||
link_info_output->nodes[i].num_links;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2069,7 +2136,7 @@ static int psp_hw_start(struct psp_context *psp)
|
|||
int ret;
|
||||
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
if (psp->kdb_bin_size &&
|
||||
if ((is_psp_fw_valid(psp->kdb)) &&
|
||||
(psp->funcs->bootloader_load_kdb != NULL)) {
|
||||
ret = psp_bootloader_load_kdb(psp);
|
||||
if (ret) {
|
||||
|
@ -2078,7 +2145,8 @@ static int psp_hw_start(struct psp_context *psp)
|
|||
}
|
||||
}
|
||||
|
||||
if (psp->spl_bin_size) {
|
||||
if ((is_psp_fw_valid(psp->spl)) &&
|
||||
(psp->funcs->bootloader_load_spl != NULL)) {
|
||||
ret = psp_bootloader_load_spl(psp);
|
||||
if (ret) {
|
||||
DRM_ERROR("PSP load spl failed!\n");
|
||||
|
@ -2086,16 +2154,49 @@ static int psp_hw_start(struct psp_context *psp)
|
|||
}
|
||||
}
|
||||
|
||||
ret = psp_bootloader_load_sysdrv(psp);
|
||||
if (ret) {
|
||||
DRM_ERROR("PSP load sysdrv failed!\n");
|
||||
return ret;
|
||||
if ((is_psp_fw_valid(psp->sys)) &&
|
||||
(psp->funcs->bootloader_load_sysdrv != NULL)) {
|
||||
ret = psp_bootloader_load_sysdrv(psp);
|
||||
if (ret) {
|
||||
DRM_ERROR("PSP load sys drv failed!\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ret = psp_bootloader_load_sos(psp);
|
||||
if (ret) {
|
||||
DRM_ERROR("PSP load sos failed!\n");
|
||||
return ret;
|
||||
if ((is_psp_fw_valid(psp->soc_drv)) &&
|
||||
(psp->funcs->bootloader_load_soc_drv != NULL)) {
|
||||
ret = psp_bootloader_load_soc_drv(psp);
|
||||
if (ret) {
|
||||
DRM_ERROR("PSP load soc drv failed!\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if ((is_psp_fw_valid(psp->intf_drv)) &&
|
||||
(psp->funcs->bootloader_load_intf_drv != NULL)) {
|
||||
ret = psp_bootloader_load_intf_drv(psp);
|
||||
if (ret) {
|
||||
DRM_ERROR("PSP load intf drv failed!\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if ((is_psp_fw_valid(psp->dbg_drv)) &&
|
||||
(psp->funcs->bootloader_load_dbg_drv != NULL)) {
|
||||
ret = psp_bootloader_load_dbg_drv(psp);
|
||||
if (ret) {
|
||||
DRM_ERROR("PSP load dbg drv failed!\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if ((is_psp_fw_valid(psp->sos)) &&
|
||||
(psp->funcs->bootloader_load_sos != NULL)) {
|
||||
ret = psp_bootloader_load_sos(psp);
|
||||
if (ret) {
|
||||
DRM_ERROR("PSP load sos failed!\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2313,7 +2414,7 @@ static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int psp_execute_np_fw_load(struct psp_context *psp,
|
||||
static int psp_execute_non_psp_fw_load(struct psp_context *psp,
|
||||
struct amdgpu_firmware_info *ucode)
|
||||
{
|
||||
int ret = 0;
|
||||
|
@ -2349,7 +2450,7 @@ static int psp_load_smu_fw(struct psp_context *psp)
|
|||
}
|
||||
}
|
||||
|
||||
ret = psp_execute_np_fw_load(psp, ucode);
|
||||
ret = psp_execute_non_psp_fw_load(psp, ucode);
|
||||
|
||||
if (ret)
|
||||
DRM_ERROR("PSP load smu failed!\n");
|
||||
|
@ -2404,14 +2505,14 @@ int psp_load_fw_list(struct psp_context *psp,
|
|||
for (i = 0; i < ucode_count; ++i) {
|
||||
ucode = ucode_list[i];
|
||||
psp_print_fw_hdr(psp, ucode);
|
||||
ret = psp_execute_np_fw_load(psp, ucode);
|
||||
ret = psp_execute_non_psp_fw_load(psp, ucode);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int psp_np_fw_load(struct psp_context *psp)
|
||||
static int psp_load_non_psp_fw(struct psp_context *psp)
|
||||
{
|
||||
int i, ret;
|
||||
struct amdgpu_firmware_info *ucode;
|
||||
|
@ -2450,7 +2551,7 @@ static int psp_np_fw_load(struct psp_context *psp)
|
|||
|
||||
psp_print_fw_hdr(psp, ucode);
|
||||
|
||||
ret = psp_execute_np_fw_load(psp, ucode);
|
||||
ret = psp_execute_non_psp_fw_load(psp, ucode);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -2527,7 +2628,7 @@ skip_memalloc:
|
|||
if (ret)
|
||||
goto failed;
|
||||
|
||||
ret = psp_np_fw_load(psp);
|
||||
ret = psp_load_non_psp_fw(psp);
|
||||
if (ret)
|
||||
goto failed;
|
||||
|
||||
|
@ -2727,7 +2828,7 @@ static int psp_resume(void *handle)
|
|||
if (ret)
|
||||
goto failed;
|
||||
|
||||
ret = psp_np_fw_load(psp);
|
||||
ret = psp_load_non_psp_fw(psp);
|
||||
if (ret)
|
||||
goto failed;
|
||||
|
||||
|
@ -2825,7 +2926,7 @@ int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
|
|||
ucode.mc_addr = cmd_gpu_addr;
|
||||
ucode.ucode_size = cmd_size;
|
||||
|
||||
return psp_execute_np_fw_load(&adev->psp, &ucode);
|
||||
return psp_execute_non_psp_fw_load(&adev->psp, &ucode);
|
||||
}
|
||||
|
||||
int psp_ring_cmd_submit(struct psp_context *psp,
|
||||
|
@ -2918,7 +3019,7 @@ int psp_init_toc_microcode(struct psp_context *psp,
|
|||
const char *chip_name)
|
||||
{
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
char fw_name[30];
|
||||
char fw_name[PSP_FW_NAME_LEN];
|
||||
const struct psp_firmware_header_v1_0 *toc_hdr;
|
||||
int err = 0;
|
||||
|
||||
|
@ -2937,10 +3038,10 @@ int psp_init_toc_microcode(struct psp_context *psp,
|
|||
goto out;
|
||||
|
||||
toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
|
||||
adev->psp.toc_fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
|
||||
adev->psp.toc_feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
|
||||
adev->psp.toc_bin_size = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
|
||||
adev->psp.toc_start_addr = (uint8_t *)toc_hdr +
|
||||
adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
|
||||
adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
|
||||
adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
|
||||
adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
|
||||
le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
|
||||
return 0;
|
||||
out:
|
||||
|
@ -2950,6 +3051,82 @@ out:
|
|||
return err;
|
||||
}
|
||||
|
||||
static int parse_sos_bin_descriptor(struct psp_context *psp,
|
||||
const struct psp_fw_bin_desc *desc,
|
||||
const struct psp_firmware_header_v2_0 *sos_hdr)
|
||||
{
|
||||
uint8_t *ucode_start_addr = NULL;
|
||||
|
||||
if (!psp || !desc || !sos_hdr)
|
||||
return -EINVAL;
|
||||
|
||||
ucode_start_addr = (uint8_t *)sos_hdr +
|
||||
le32_to_cpu(desc->offset_bytes) +
|
||||
le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
|
||||
|
||||
switch (desc->fw_type) {
|
||||
case PSP_FW_TYPE_PSP_SOS:
|
||||
psp->sos.fw_version = le32_to_cpu(desc->fw_version);
|
||||
psp->sos.feature_version = le32_to_cpu(desc->fw_version);
|
||||
psp->sos.size_bytes = le32_to_cpu(desc->size_bytes);
|
||||
psp->sos.start_addr = ucode_start_addr;
|
||||
break;
|
||||
case PSP_FW_TYPE_PSP_SYS_DRV:
|
||||
psp->sys.fw_version = le32_to_cpu(desc->fw_version);
|
||||
psp->sys.feature_version = le32_to_cpu(desc->fw_version);
|
||||
psp->sys.size_bytes = le32_to_cpu(desc->size_bytes);
|
||||
psp->sys.start_addr = ucode_start_addr;
|
||||
break;
|
||||
case PSP_FW_TYPE_PSP_KDB:
|
||||
psp->kdb.fw_version = le32_to_cpu(desc->fw_version);
|
||||
psp->kdb.feature_version = le32_to_cpu(desc->fw_version);
|
||||
psp->kdb.size_bytes = le32_to_cpu(desc->size_bytes);
|
||||
psp->kdb.start_addr = ucode_start_addr;
|
||||
break;
|
||||
case PSP_FW_TYPE_PSP_TOC:
|
||||
psp->toc.fw_version = le32_to_cpu(desc->fw_version);
|
||||
psp->toc.feature_version = le32_to_cpu(desc->fw_version);
|
||||
psp->toc.size_bytes = le32_to_cpu(desc->size_bytes);
|
||||
psp->toc.start_addr = ucode_start_addr;
|
||||
break;
|
||||
case PSP_FW_TYPE_PSP_SPL:
|
||||
psp->spl.fw_version = le32_to_cpu(desc->fw_version);
|
||||
psp->spl.feature_version = le32_to_cpu(desc->fw_version);
|
||||
psp->spl.size_bytes = le32_to_cpu(desc->size_bytes);
|
||||
psp->spl.start_addr = ucode_start_addr;
|
||||
break;
|
||||
case PSP_FW_TYPE_PSP_RL:
|
||||
psp->rl.fw_version = le32_to_cpu(desc->fw_version);
|
||||
psp->rl.feature_version = le32_to_cpu(desc->fw_version);
|
||||
psp->rl.size_bytes = le32_to_cpu(desc->size_bytes);
|
||||
psp->rl.start_addr = ucode_start_addr;
|
||||
break;
|
||||
case PSP_FW_TYPE_PSP_SOC_DRV:
|
||||
psp->soc_drv.fw_version = le32_to_cpu(desc->fw_version);
|
||||
psp->soc_drv.feature_version = le32_to_cpu(desc->fw_version);
|
||||
psp->soc_drv.size_bytes = le32_to_cpu(desc->size_bytes);
|
||||
psp->soc_drv.start_addr = ucode_start_addr;
|
||||
break;
|
||||
case PSP_FW_TYPE_PSP_INTF_DRV:
|
||||
psp->intf_drv.fw_version = le32_to_cpu(desc->fw_version);
|
||||
psp->intf_drv.feature_version = le32_to_cpu(desc->fw_version);
|
||||
psp->intf_drv.size_bytes = le32_to_cpu(desc->size_bytes);
|
||||
psp->intf_drv.start_addr = ucode_start_addr;
|
||||
break;
|
||||
case PSP_FW_TYPE_PSP_DBG_DRV:
|
||||
psp->dbg_drv.fw_version = le32_to_cpu(desc->fw_version);
|
||||
psp->dbg_drv.feature_version = le32_to_cpu(desc->fw_version);
|
||||
psp->dbg_drv.size_bytes = le32_to_cpu(desc->size_bytes);
|
||||
psp->dbg_drv.start_addr = ucode_start_addr;
|
||||
break;
|
||||
default:
|
||||
dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int psp_init_sos_base_fw(struct amdgpu_device *adev)
|
||||
{
|
||||
const struct psp_firmware_header_v1_0 *sos_hdr;
|
||||
|
@ -2961,32 +3138,32 @@ static int psp_init_sos_base_fw(struct amdgpu_device *adev)
|
|||
le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
|
||||
|
||||
if (adev->gmc.xgmi.connected_to_cpu || (adev->asic_type != CHIP_ALDEBARAN)) {
|
||||
adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
|
||||
adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->sos.fw_version);
|
||||
adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
|
||||
adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version);
|
||||
|
||||
adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos.offset_bytes);
|
||||
adev->psp.sys_start_addr = ucode_array_start_addr;
|
||||
adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes);
|
||||
adev->psp.sys.start_addr = ucode_array_start_addr;
|
||||
|
||||
adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos.size_bytes);
|
||||
adev->psp.sos_start_addr = ucode_array_start_addr +
|
||||
adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes);
|
||||
adev->psp.sos.start_addr = ucode_array_start_addr +
|
||||
le32_to_cpu(sos_hdr->sos.offset_bytes);
|
||||
} else {
|
||||
/* Load alternate PSP SOS FW */
|
||||
sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
|
||||
|
||||
adev->psp.sos_fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
|
||||
adev->psp.sos_feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
|
||||
adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
|
||||
adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
|
||||
|
||||
adev->psp.sys_bin_size = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes);
|
||||
adev->psp.sys_start_addr = ucode_array_start_addr +
|
||||
adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes);
|
||||
adev->psp.sys.start_addr = ucode_array_start_addr +
|
||||
le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes);
|
||||
|
||||
adev->psp.sos_bin_size = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
|
||||
adev->psp.sos_start_addr = ucode_array_start_addr +
|
||||
adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
|
||||
adev->psp.sos.start_addr = ucode_array_start_addr +
|
||||
le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
|
||||
}
|
||||
|
||||
if ((adev->psp.sys_bin_size == 0) || (adev->psp.sos_bin_size == 0)) {
|
||||
if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) {
|
||||
dev_warn(adev->dev, "PSP SOS FW not available");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -3003,8 +3180,10 @@ int psp_init_sos_microcode(struct psp_context *psp,
|
|||
const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
|
||||
const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
|
||||
const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
|
||||
const struct psp_firmware_header_v2_0 *sos_hdr_v2_0;
|
||||
int err = 0;
|
||||
uint8_t *ucode_array_start_addr;
|
||||
int fw_index = 0;
|
||||
|
||||
if (!chip_name) {
|
||||
dev_err(adev->dev, "invalid chip name for sos microcode\n");
|
||||
|
@ -3033,35 +3212,52 @@ int psp_init_sos_microcode(struct psp_context *psp,
|
|||
|
||||
if (sos_hdr->header.header_version_minor == 1) {
|
||||
sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
|
||||
adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes);
|
||||
adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr +
|
||||
adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes);
|
||||
adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr +
|
||||
le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes);
|
||||
adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes);
|
||||
adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
|
||||
adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes);
|
||||
adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
|
||||
le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes);
|
||||
}
|
||||
if (sos_hdr->header.header_version_minor == 2) {
|
||||
sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
|
||||
adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes);
|
||||
adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
|
||||
adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes);
|
||||
adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
|
||||
le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes);
|
||||
}
|
||||
if (sos_hdr->header.header_version_minor == 3) {
|
||||
sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
|
||||
adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes);
|
||||
adev->psp.toc_start_addr = ucode_array_start_addr +
|
||||
adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes);
|
||||
adev->psp.toc.start_addr = ucode_array_start_addr +
|
||||
le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes);
|
||||
adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes);
|
||||
adev->psp.kdb_start_addr = ucode_array_start_addr +
|
||||
adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes);
|
||||
adev->psp.kdb.start_addr = ucode_array_start_addr +
|
||||
le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes);
|
||||
adev->psp.spl_bin_size = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes);
|
||||
adev->psp.spl_start_addr = ucode_array_start_addr +
|
||||
adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes);
|
||||
adev->psp.spl.start_addr = ucode_array_start_addr +
|
||||
le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes);
|
||||
adev->psp.rl_bin_size = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes);
|
||||
adev->psp.rl_start_addr = ucode_array_start_addr +
|
||||
adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes);
|
||||
adev->psp.rl.start_addr = ucode_array_start_addr +
|
||||
le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes);
|
||||
}
|
||||
break;
|
||||
case 2:
|
||||
sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data;
|
||||
|
||||
if (le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
|
||||
dev_err(adev->dev, "packed SOS count exceeds maximum limit\n");
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (fw_index = 0; fw_index < le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count); fw_index++) {
|
||||
err = parse_sos_bin_descriptor(psp,
|
||||
&sos_hdr_v2_0->psp_fw_bin[fw_index],
|
||||
sos_hdr_v2_0);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
dev_err(adev->dev,
|
||||
"unsupported psp sos firmware\n");
|
||||
|
@ -3080,7 +3276,7 @@ out:
|
|||
}
|
||||
|
||||
static int parse_ta_bin_descriptor(struct psp_context *psp,
|
||||
const struct ta_fw_bin_desc *desc,
|
||||
const struct psp_fw_bin_desc *desc,
|
||||
const struct ta_firmware_header_v2_0 *ta_hdr)
|
||||
{
|
||||
uint8_t *ucode_start_addr = NULL;
|
||||
|
@ -3168,7 +3364,7 @@ int psp_init_ta_microcode(struct psp_context *psp,
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_TA_PACKAGING) {
|
||||
if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
|
||||
dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
|
@ -3235,11 +3431,12 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
|
|||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(ddev);
|
||||
void *cpu_addr;
|
||||
dma_addr_t dma_addr;
|
||||
int ret, idx;
|
||||
char fw_name[100];
|
||||
const struct firmware *usbc_pd_fw;
|
||||
struct amdgpu_bo *fw_buf_bo = NULL;
|
||||
uint64_t fw_pri_mc_addr;
|
||||
void *fw_pri_cpu_addr;
|
||||
|
||||
if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
|
||||
DRM_INFO("PSP block is not ready yet.");
|
||||
|
@ -3254,31 +3451,24 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
|
|||
if (ret)
|
||||
goto fail;
|
||||
|
||||
/* We need contiguous physical mem to place the FW for psp to access */
|
||||
cpu_addr = dma_alloc_coherent(adev->dev, usbc_pd_fw->size, &dma_addr, GFP_KERNEL);
|
||||
|
||||
ret = dma_mapping_error(adev->dev, dma_addr);
|
||||
/* LFB address which is aligned to 1MB boundary per PSP request */
|
||||
ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&fw_buf_bo,
|
||||
&fw_pri_mc_addr,
|
||||
&fw_pri_cpu_addr);
|
||||
if (ret)
|
||||
goto rel_buf;
|
||||
|
||||
memcpy_toio(cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
|
||||
|
||||
/*
|
||||
* x86 specific workaround.
|
||||
* Without it the buffer is invisible in PSP.
|
||||
*
|
||||
* TODO Remove once PSP starts snooping CPU cache
|
||||
*/
|
||||
#ifdef CONFIG_X86
|
||||
clflush_cache_range(cpu_addr, (usbc_pd_fw->size & ~(L1_CACHE_BYTES - 1)));
|
||||
#endif
|
||||
memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
|
||||
|
||||
mutex_lock(&adev->psp.mutex);
|
||||
ret = psp_load_usbc_pd_fw(&adev->psp, dma_addr);
|
||||
ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr);
|
||||
mutex_unlock(&adev->psp.mutex);
|
||||
|
||||
amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
|
||||
|
||||
rel_buf:
|
||||
dma_free_coherent(adev->dev, usbc_pd_fw->size, cpu_addr, dma_addr);
|
||||
release_firmware(usbc_pd_fw);
|
||||
fail:
|
||||
if (ret) {
|
||||
|
@ -3307,7 +3497,10 @@ static DEVICE_ATTR(usbc_pd_fw, S_IRUGO | S_IWUSR,
|
|||
psp_usbc_pd_fw_sysfs_read,
|
||||
psp_usbc_pd_fw_sysfs_write);
|
||||
|
||||
|
||||
int is_psp_fw_valid(struct psp_bin_desc bin)
|
||||
{
|
||||
return bin.size_bytes;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs psp_ip_funcs = {
|
||||
.name = "psp",
|
||||
|
@ -3369,6 +3562,14 @@ const struct amdgpu_ip_block_version psp_v11_0_ip_block =
|
|||
.funcs = &psp_ip_funcs,
|
||||
};
|
||||
|
||||
const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = {
|
||||
.type = AMD_IP_BLOCK_TYPE_PSP,
|
||||
.major = 11,
|
||||
.minor = 0,
|
||||
.rev = 8,
|
||||
.funcs = &psp_ip_funcs,
|
||||
};
|
||||
|
||||
const struct amdgpu_ip_block_version psp_v12_0_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_PSP,
|
||||
|
|
|
@ -48,11 +48,15 @@
|
|||
struct psp_context;
|
||||
struct psp_xgmi_node_info;
|
||||
struct psp_xgmi_topology_info;
|
||||
struct psp_bin_desc;
|
||||
|
||||
enum psp_bootloader_cmd {
|
||||
PSP_BL__LOAD_SYSDRV = 0x10000,
|
||||
PSP_BL__LOAD_SOSDRV = 0x20000,
|
||||
PSP_BL__LOAD_KEY_DATABASE = 0x80000,
|
||||
PSP_BL__LOAD_SOCDRV = 0x90000,
|
||||
PSP_BL__LOAD_INTFDRV = 0xA0000,
|
||||
PSP_BL__LOAD_DBGDRV = 0xB0000,
|
||||
PSP_BL__DRAM_LONG_TRAIN = 0x100000,
|
||||
PSP_BL__DRAM_SHORT_TRAIN = 0x200000,
|
||||
PSP_BL__LOAD_TOS_SPL_TABLE = 0x10000000,
|
||||
|
@ -93,6 +97,9 @@ struct psp_funcs
|
|||
int (*bootloader_load_kdb)(struct psp_context *psp);
|
||||
int (*bootloader_load_spl)(struct psp_context *psp);
|
||||
int (*bootloader_load_sysdrv)(struct psp_context *psp);
|
||||
int (*bootloader_load_soc_drv)(struct psp_context *psp);
|
||||
int (*bootloader_load_intf_drv)(struct psp_context *psp);
|
||||
int (*bootloader_load_dbg_drv)(struct psp_context *psp);
|
||||
int (*bootloader_load_sos)(struct psp_context *psp);
|
||||
int (*ring_init)(struct psp_context *psp, enum psp_ring_type ring_type);
|
||||
int (*ring_create)(struct psp_context *psp,
|
||||
|
@ -106,7 +113,7 @@ struct psp_funcs
|
|||
int (*mem_training)(struct psp_context *psp, uint32_t ops);
|
||||
uint32_t (*ring_get_wptr)(struct psp_context *psp);
|
||||
void (*ring_set_wptr)(struct psp_context *psp, uint32_t value);
|
||||
int (*load_usbc_pd_fw)(struct psp_context *psp, dma_addr_t dma_addr);
|
||||
int (*load_usbc_pd_fw)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
|
||||
int (*read_usbc_pd_fw)(struct psp_context *psp, uint32_t *fw_ver);
|
||||
};
|
||||
|
||||
|
@ -116,6 +123,7 @@ struct psp_xgmi_node_info {
|
|||
uint8_t num_hops;
|
||||
uint8_t is_sharing_enabled;
|
||||
enum ta_xgmi_assigned_sdma_engine sdma_engine;
|
||||
uint8_t num_links;
|
||||
};
|
||||
|
||||
struct psp_xgmi_topology_info {
|
||||
|
@ -282,6 +290,13 @@ struct psp_runtime_boot_cfg_entry {
|
|||
uint32_t reserved;
|
||||
};
|
||||
|
||||
struct psp_bin_desc {
|
||||
uint32_t fw_version;
|
||||
uint32_t feature_version;
|
||||
uint32_t size_bytes;
|
||||
uint8_t *start_addr;
|
||||
};
|
||||
|
||||
struct psp_context
|
||||
{
|
||||
struct amdgpu_device *adev;
|
||||
|
@ -297,20 +312,15 @@ struct psp_context
|
|||
|
||||
/* sos firmware */
|
||||
const struct firmware *sos_fw;
|
||||
uint32_t sos_fw_version;
|
||||
uint32_t sos_feature_version;
|
||||
uint32_t sys_bin_size;
|
||||
uint32_t sos_bin_size;
|
||||
uint32_t toc_bin_size;
|
||||
uint32_t kdb_bin_size;
|
||||
uint32_t spl_bin_size;
|
||||
uint32_t rl_bin_size;
|
||||
uint8_t *sys_start_addr;
|
||||
uint8_t *sos_start_addr;
|
||||
uint8_t *toc_start_addr;
|
||||
uint8_t *kdb_start_addr;
|
||||
uint8_t *spl_start_addr;
|
||||
uint8_t *rl_start_addr;
|
||||
struct psp_bin_desc sys;
|
||||
struct psp_bin_desc sos;
|
||||
struct psp_bin_desc toc;
|
||||
struct psp_bin_desc kdb;
|
||||
struct psp_bin_desc spl;
|
||||
struct psp_bin_desc rl;
|
||||
struct psp_bin_desc soc_drv;
|
||||
struct psp_bin_desc intf_drv;
|
||||
struct psp_bin_desc dbg_drv;
|
||||
|
||||
/* tmr buffer */
|
||||
struct amdgpu_bo *tmr_bo;
|
||||
|
@ -325,8 +335,6 @@ struct psp_context
|
|||
|
||||
/* toc firmware */
|
||||
const struct firmware *toc_fw;
|
||||
uint32_t toc_fw_version;
|
||||
uint32_t toc_feature_version;
|
||||
|
||||
/* fence buffer */
|
||||
struct amdgpu_bo *fence_buf_bo;
|
||||
|
@ -402,6 +410,12 @@ struct amdgpu_psp_funcs {
|
|||
((psp)->funcs->bootloader_load_spl ? (psp)->funcs->bootloader_load_spl((psp)) : 0)
|
||||
#define psp_bootloader_load_sysdrv(psp) \
|
||||
((psp)->funcs->bootloader_load_sysdrv ? (psp)->funcs->bootloader_load_sysdrv((psp)) : 0)
|
||||
#define psp_bootloader_load_soc_drv(psp) \
|
||||
((psp)->funcs->bootloader_load_soc_drv ? (psp)->funcs->bootloader_load_soc_drv((psp)) : 0)
|
||||
#define psp_bootloader_load_intf_drv(psp) \
|
||||
((psp)->funcs->bootloader_load_intf_drv ? (psp)->funcs->bootloader_load_intf_drv((psp)) : 0)
|
||||
#define psp_bootloader_load_dbg_drv(psp) \
|
||||
((psp)->funcs->bootloader_load_dbg_drv ? (psp)->funcs->bootloader_load_dbg_drv((psp)) : 0)
|
||||
#define psp_bootloader_load_sos(psp) \
|
||||
((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0)
|
||||
#define psp_smu_reload_quirk(psp) \
|
||||
|
@ -414,9 +428,9 @@ struct amdgpu_psp_funcs {
|
|||
#define psp_ring_get_wptr(psp) (psp)->funcs->ring_get_wptr((psp))
|
||||
#define psp_ring_set_wptr(psp, value) (psp)->funcs->ring_set_wptr((psp), (value))
|
||||
|
||||
#define psp_load_usbc_pd_fw(psp, dma_addr) \
|
||||
#define psp_load_usbc_pd_fw(psp, fw_pri_mc_addr) \
|
||||
((psp)->funcs->load_usbc_pd_fw ? \
|
||||
(psp)->funcs->load_usbc_pd_fw((psp), (dma_addr)) : -EINVAL)
|
||||
(psp)->funcs->load_usbc_pd_fw((psp), (fw_pri_mc_addr)) : -EINVAL)
|
||||
|
||||
#define psp_read_usbc_pd_fw(psp, fw_ver) \
|
||||
((psp)->funcs->read_usbc_pd_fw ? \
|
||||
|
@ -427,6 +441,7 @@ extern const struct amd_ip_funcs psp_ip_funcs;
|
|||
extern const struct amdgpu_ip_block_version psp_v3_1_ip_block;
|
||||
extern const struct amdgpu_ip_block_version psp_v10_0_ip_block;
|
||||
extern const struct amdgpu_ip_block_version psp_v11_0_ip_block;
|
||||
extern const struct amdgpu_ip_block_version psp_v11_0_8_ip_block;
|
||||
extern const struct amdgpu_ip_block_version psp_v12_0_ip_block;
|
||||
extern const struct amdgpu_ip_block_version psp_v13_0_ip_block;
|
||||
|
||||
|
@ -483,4 +498,5 @@ int psp_load_fw_list(struct psp_context *psp,
|
|||
struct amdgpu_firmware_info **ucode_list, int ucode_count);
|
||||
void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size);
|
||||
|
||||
int is_psp_fw_valid(struct psp_bin_desc bin);
|
||||
#endif
|
||||
|
|
|
@ -71,8 +71,8 @@ const char *ras_block_string[] = {
|
|||
/* inject address is 52 bits */
|
||||
#define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
|
||||
|
||||
/* typical ECC bad page rate(1 bad page per 100MB VRAM) */
|
||||
#define RAS_BAD_PAGE_RATE (100 * 1024 * 1024ULL)
|
||||
/* typical ECC bad page rate is 1 bad page per 100MB VRAM */
|
||||
#define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL)
|
||||
|
||||
enum amdgpu_ras_retire_page_reservation {
|
||||
AMDGPU_RAS_RETIRE_PAGE_RESERVED,
|
||||
|
@ -355,8 +355,9 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
|
|||
* to see which blocks support RAS on a particular asic.
|
||||
*
|
||||
*/
|
||||
static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
|
||||
const char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
|
||||
struct ras_debug_if data;
|
||||
|
@ -370,7 +371,7 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
|
|||
|
||||
ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
return ret;
|
||||
|
||||
if (data.op == 3) {
|
||||
ret = amdgpu_reserve_page_direct(adev, data.inject.address);
|
||||
|
@ -403,9 +404,9 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
|
|||
/* umc ce/ue error injection for a bad page is not allowed */
|
||||
if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
|
||||
amdgpu_ras_check_bad_page(adev, data.inject.address)) {
|
||||
dev_warn(adev->dev, "RAS WARN: 0x%llx has been marked "
|
||||
"as bad before error injection!\n",
|
||||
data.inject.address);
|
||||
dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
|
||||
"already been marked as bad!\n",
|
||||
data.inject.address);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -439,21 +440,24 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
|
|||
* will reset EEPROM table to 0 entries.
|
||||
*
|
||||
*/
|
||||
static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, const char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
|
||||
const char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
struct amdgpu_device *adev =
|
||||
(struct amdgpu_device *)file_inode(f)->i_private;
|
||||
int ret;
|
||||
|
||||
ret = amdgpu_ras_eeprom_reset_table(
|
||||
&(amdgpu_ras_get_context(adev)->eeprom_control));
|
||||
&(amdgpu_ras_get_context(adev)->eeprom_control));
|
||||
|
||||
if (ret == 1) {
|
||||
if (!ret) {
|
||||
/* Something was written to EEPROM.
|
||||
*/
|
||||
amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
|
||||
return size;
|
||||
} else {
|
||||
return -EIO;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1316,6 +1320,12 @@ static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *
|
|||
&con->bad_page_cnt_threshold);
|
||||
debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
|
||||
debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
|
||||
debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
|
||||
&amdgpu_ras_debugfs_eeprom_size_ops);
|
||||
con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
|
||||
S_IRUGO, dir, adev,
|
||||
&amdgpu_ras_debugfs_eeprom_table_ops);
|
||||
amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);
|
||||
|
||||
/*
|
||||
* After one uncorrectable error happens, usually GPU recovery will
|
||||
|
@ -1833,13 +1843,12 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
|
|||
|
||||
control = &con->eeprom_control;
|
||||
data = con->eh_data;
|
||||
save_count = data->count - control->num_recs;
|
||||
save_count = data->count - control->ras_num_recs;
|
||||
/* only new entries are saved */
|
||||
if (save_count > 0) {
|
||||
if (amdgpu_ras_eeprom_process_recods(control,
|
||||
&data->bps[control->num_recs],
|
||||
true,
|
||||
save_count)) {
|
||||
if (amdgpu_ras_eeprom_append(control,
|
||||
&data->bps[control->ras_num_recs],
|
||||
save_count)) {
|
||||
dev_err(adev->dev, "Failed to save EEPROM table data!");
|
||||
return -EIO;
|
||||
}
|
||||
|
@ -1857,28 +1866,24 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
|
|||
static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ras_eeprom_control *control =
|
||||
&adev->psp.ras.ras->eeprom_control;
|
||||
struct eeprom_table_record *bps = NULL;
|
||||
int ret = 0;
|
||||
&adev->psp.ras.ras->eeprom_control;
|
||||
struct eeprom_table_record *bps;
|
||||
int ret;
|
||||
|
||||
/* no bad page record, skip eeprom access */
|
||||
if (!control->num_recs || (amdgpu_bad_page_threshold == 0))
|
||||
return ret;
|
||||
if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
|
||||
return 0;
|
||||
|
||||
bps = kcalloc(control->num_recs, sizeof(*bps), GFP_KERNEL);
|
||||
bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL);
|
||||
if (!bps)
|
||||
return -ENOMEM;
|
||||
|
||||
if (amdgpu_ras_eeprom_process_recods(control, bps, false,
|
||||
control->num_recs)) {
|
||||
ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
|
||||
if (ret)
|
||||
dev_err(adev->dev, "Failed to load EEPROM table records!");
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
else
|
||||
ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs);
|
||||
|
||||
ret = amdgpu_ras_add_bad_pages(adev, bps, control->num_recs);
|
||||
|
||||
out:
|
||||
kfree(bps);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1918,11 +1923,9 @@ static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
|
|||
}
|
||||
|
||||
static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
|
||||
uint32_t max_length)
|
||||
uint32_t max_count)
|
||||
{
|
||||
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
||||
int tmp_threshold = amdgpu_bad_page_threshold;
|
||||
u64 val;
|
||||
|
||||
/*
|
||||
* Justification of value bad_page_cnt_threshold in ras structure
|
||||
|
@ -1943,18 +1946,15 @@ static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
|
|||
* take no effect.
|
||||
*/
|
||||
|
||||
if (tmp_threshold < -1)
|
||||
tmp_threshold = -1;
|
||||
else if (tmp_threshold > max_length)
|
||||
tmp_threshold = max_length;
|
||||
if (amdgpu_bad_page_threshold < 0) {
|
||||
u64 val = adev->gmc.mc_vram_size;
|
||||
|
||||
if (tmp_threshold == -1) {
|
||||
val = adev->gmc.mc_vram_size;
|
||||
do_div(val, RAS_BAD_PAGE_RATE);
|
||||
do_div(val, RAS_BAD_PAGE_COVER);
|
||||
con->bad_page_cnt_threshold = min(lower_32_bits(val),
|
||||
max_length);
|
||||
max_count);
|
||||
} else {
|
||||
con->bad_page_cnt_threshold = tmp_threshold;
|
||||
con->bad_page_cnt_threshold = min_t(int, max_count,
|
||||
amdgpu_bad_page_threshold);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1962,15 +1962,24 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
|
|||
{
|
||||
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
||||
struct ras_err_handler_data **data;
|
||||
uint32_t max_eeprom_records_len = 0;
|
||||
u32 max_eeprom_records_count = 0;
|
||||
bool exc_err_limit = false;
|
||||
int ret;
|
||||
|
||||
if (adev->ras_enabled && con)
|
||||
data = &con->eh_data;
|
||||
else
|
||||
if (!con)
|
||||
return 0;
|
||||
|
||||
/* Allow access to RAS EEPROM via debugfs, when the ASIC
|
||||
* supports RAS and debugfs is enabled, but when
|
||||
* adev->ras_enabled is unset, i.e. when "ras_enable"
|
||||
* module parameter is set to 0.
|
||||
*/
|
||||
con->adev = adev;
|
||||
|
||||
if (!adev->ras_enabled)
|
||||
return 0;
|
||||
|
||||
data = &con->eh_data;
|
||||
*data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO);
|
||||
if (!*data) {
|
||||
ret = -ENOMEM;
|
||||
|
@ -1980,10 +1989,9 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
|
|||
mutex_init(&con->recovery_lock);
|
||||
INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
|
||||
atomic_set(&con->in_recovery, 0);
|
||||
con->adev = adev;
|
||||
|
||||
max_eeprom_records_len = amdgpu_ras_eeprom_get_record_max_length();
|
||||
amdgpu_ras_validate_threshold(adev, max_eeprom_records_len);
|
||||
max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count();
|
||||
amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
|
||||
|
||||
/* Todo: During test the SMU might fail to read the eeprom through I2C
|
||||
* when the GPU is pending on XGMI reset during probe time
|
||||
|
@ -1999,13 +2007,13 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
|
|||
if (exc_err_limit || ret)
|
||||
goto free;
|
||||
|
||||
if (con->eeprom_control.num_recs) {
|
||||
if (con->eeprom_control.ras_num_recs) {
|
||||
ret = amdgpu_ras_load_bad_pages(adev);
|
||||
if (ret)
|
||||
goto free;
|
||||
|
||||
if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->send_hbm_bad_pages_num)
|
||||
adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.num_recs);
|
||||
adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.ras_num_recs);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -2015,7 +2023,7 @@ free:
|
|||
kfree(*data);
|
||||
con->eh_data = NULL;
|
||||
out:
|
||||
dev_warn(adev->dev, "Failed to initialize ras recovery!\n");
|
||||
dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);
|
||||
|
||||
/*
|
||||
* Except error threshold exceeding case, other failure cases in this
|
||||
|
|
|
@ -318,6 +318,7 @@ struct amdgpu_ras {
|
|||
/* sysfs */
|
||||
struct device_attribute features_attr;
|
||||
struct bin_attribute badpages_attr;
|
||||
struct dentry *de_ras_eeprom_table;
|
||||
/* block array */
|
||||
struct ras_manager *objs;
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -28,10 +28,11 @@
|
|||
|
||||
struct amdgpu_device;
|
||||
|
||||
enum amdgpu_ras_eeprom_err_type{
|
||||
AMDGPU_RAS_EEPROM_ERR_PLACE_HOLDER,
|
||||
enum amdgpu_ras_eeprom_err_type {
|
||||
AMDGPU_RAS_EEPROM_ERR_NA,
|
||||
AMDGPU_RAS_EEPROM_ERR_RECOVERABLE,
|
||||
AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE
|
||||
AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE,
|
||||
AMDGPU_RAS_EEPROM_ERR_COUNT,
|
||||
};
|
||||
|
||||
struct amdgpu_ras_eeprom_table_header {
|
||||
|
@ -40,15 +41,45 @@ struct amdgpu_ras_eeprom_table_header {
|
|||
uint32_t first_rec_offset;
|
||||
uint32_t tbl_size;
|
||||
uint32_t checksum;
|
||||
}__attribute__((__packed__));
|
||||
} __packed;
|
||||
|
||||
struct amdgpu_ras_eeprom_control {
|
||||
struct amdgpu_ras_eeprom_table_header tbl_hdr;
|
||||
uint32_t next_addr;
|
||||
unsigned int num_recs;
|
||||
struct mutex tbl_mutex;
|
||||
uint32_t tbl_byte_sum;
|
||||
uint16_t i2c_address; // 8-bit represented address
|
||||
|
||||
/* Base I2C EEPPROM 19-bit memory address,
|
||||
* where the table is located. For more information,
|
||||
* see top of amdgpu_eeprom.c.
|
||||
*/
|
||||
u32 i2c_address;
|
||||
|
||||
/* The byte offset off of @i2c_address
|
||||
* where the table header is found,
|
||||
* and where the records start--always
|
||||
* right after the header.
|
||||
*/
|
||||
u32 ras_header_offset;
|
||||
u32 ras_record_offset;
|
||||
|
||||
/* Number of records in the table.
|
||||
*/
|
||||
u32 ras_num_recs;
|
||||
|
||||
/* First record index to read, 0-based.
|
||||
* Range is [0, num_recs-1]. This is
|
||||
* an absolute index, starting right after
|
||||
* the table header.
|
||||
*/
|
||||
u32 ras_fri;
|
||||
|
||||
/* Maximum possible number of records
|
||||
* we could store, i.e. the maximum capacity
|
||||
* of the table.
|
||||
*/
|
||||
u32 ras_max_record_count;
|
||||
|
||||
/* Protect table access via this mutex.
|
||||
*/
|
||||
struct mutex ras_tbl_mutex;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -74,21 +105,26 @@ struct eeprom_table_record {
|
|||
|
||||
unsigned char mem_channel;
|
||||
unsigned char mcumc_id;
|
||||
}__attribute__((__packed__));
|
||||
} __packed;
|
||||
|
||||
int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control,
|
||||
bool *exceed_err_limit);
|
||||
bool *exceed_err_limit);
|
||||
|
||||
int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control);
|
||||
|
||||
bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev);
|
||||
|
||||
int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
|
||||
struct eeprom_table_record *records,
|
||||
bool write,
|
||||
int num);
|
||||
int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
|
||||
struct eeprom_table_record *records, const u32 num);
|
||||
|
||||
inline uint32_t amdgpu_ras_eeprom_get_record_max_length(void);
|
||||
int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
|
||||
struct eeprom_table_record *records, const u32 num);
|
||||
|
||||
void amdgpu_ras_eeprom_test(struct amdgpu_ras_eeprom_control *control);
|
||||
inline uint32_t amdgpu_ras_eeprom_max_record_count(void);
|
||||
|
||||
void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control);
|
||||
|
||||
extern const struct file_operations amdgpu_ras_debugfs_eeprom_size_ops;
|
||||
extern const struct file_operations amdgpu_ras_debugfs_eeprom_table_ops;
|
||||
|
||||
#endif // _AMDGPU_RAS_EEPROM_H
|
||||
|
|
|
@ -107,8 +107,6 @@ struct amdgpu_fence_driver {
|
|||
};
|
||||
|
||||
int amdgpu_fence_driver_init(struct amdgpu_device *adev);
|
||||
void amdgpu_fence_driver_fini_hw(struct amdgpu_device *adev);
|
||||
void amdgpu_fence_driver_fini_sw(struct amdgpu_device *adev);
|
||||
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
|
||||
|
||||
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
|
||||
|
@ -117,8 +115,9 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
|
|||
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
|
||||
struct amdgpu_irq_src *irq_src,
|
||||
unsigned irq_type);
|
||||
void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
|
||||
void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
|
||||
void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev);
|
||||
void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev);
|
||||
void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev);
|
||||
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence,
|
||||
unsigned flags);
|
||||
int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
|
||||
|
|
|
@ -127,8 +127,8 @@ struct amdgpu_rlc_funcs {
|
|||
void (*reset)(struct amdgpu_device *adev);
|
||||
void (*start)(struct amdgpu_device *adev);
|
||||
void (*update_spm_vmid)(struct amdgpu_device *adev, unsigned vmid);
|
||||
void (*rlcg_wreg)(struct amdgpu_device *adev, u32 offset, u32 v, u32 acc_flags, u32 hwip);
|
||||
u32 (*rlcg_rreg)(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip);
|
||||
void (*sriov_wreg)(struct amdgpu_device *adev, u32 offset, u32 v, u32 acc_flags, u32 hwip);
|
||||
u32 (*sriov_rreg)(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip);
|
||||
bool (*is_rlcg_access_range)(struct amdgpu_device *adev, uint32_t reg);
|
||||
};
|
||||
|
||||
|
|
|
@ -1396,6 +1396,41 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
|
|||
return ttm_bo_eviction_valuable(bo, place);
|
||||
}
|
||||
|
||||
static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos,
|
||||
void *buf, size_t size, bool write)
|
||||
{
|
||||
while (size) {
|
||||
uint64_t aligned_pos = ALIGN_DOWN(pos, 4);
|
||||
uint64_t bytes = 4 - (pos & 0x3);
|
||||
uint32_t shift = (pos & 0x3) * 8;
|
||||
uint32_t mask = 0xffffffff << shift;
|
||||
uint32_t value = 0;
|
||||
|
||||
if (size < bytes) {
|
||||
mask &= 0xffffffff >> (bytes - size) * 8;
|
||||
bytes = size;
|
||||
}
|
||||
|
||||
if (mask != 0xffffffff) {
|
||||
amdgpu_device_mm_access(adev, aligned_pos, &value, 4, false);
|
||||
if (write) {
|
||||
value &= ~mask;
|
||||
value |= (*(uint32_t *)buf << shift) & mask;
|
||||
amdgpu_device_mm_access(adev, aligned_pos, &value, 4, true);
|
||||
} else {
|
||||
value = (value & mask) >> shift;
|
||||
memcpy(buf, &value, bytes);
|
||||
}
|
||||
} else {
|
||||
amdgpu_device_mm_access(adev, aligned_pos, buf, 4, write);
|
||||
}
|
||||
|
||||
pos += bytes;
|
||||
buf += bytes;
|
||||
size -= bytes;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
|
||||
*
|
||||
|
@ -1415,8 +1450,6 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
|
|||
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
|
||||
struct amdgpu_res_cursor cursor;
|
||||
unsigned long flags;
|
||||
uint32_t value = 0;
|
||||
int ret = 0;
|
||||
|
||||
if (bo->resource->mem_type != TTM_PL_VRAM)
|
||||
|
@ -1424,41 +1457,21 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
|
|||
|
||||
amdgpu_res_first(bo->resource, offset, len, &cursor);
|
||||
while (cursor.remaining) {
|
||||
uint64_t aligned_pos = cursor.start & ~(uint64_t)3;
|
||||
uint64_t bytes = 4 - (cursor.start & 3);
|
||||
uint32_t shift = (cursor.start & 3) * 8;
|
||||
uint32_t mask = 0xffffffff << shift;
|
||||
size_t count, size = cursor.size;
|
||||
loff_t pos = cursor.start;
|
||||
|
||||
if (cursor.size < bytes) {
|
||||
mask &= 0xffffffff >> (bytes - cursor.size) * 8;
|
||||
bytes = cursor.size;
|
||||
count = amdgpu_device_aper_access(adev, pos, buf, size, write);
|
||||
size -= count;
|
||||
if (size) {
|
||||
/* using MM to access rest vram and handle un-aligned address */
|
||||
pos += count;
|
||||
buf += count;
|
||||
amdgpu_ttm_vram_mm_access(adev, pos, buf, size, write);
|
||||
}
|
||||
|
||||
if (mask != 0xffffffff) {
|
||||
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
|
||||
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
|
||||
WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
|
||||
value = RREG32_NO_KIQ(mmMM_DATA);
|
||||
if (write) {
|
||||
value &= ~mask;
|
||||
value |= (*(uint32_t *)buf << shift) & mask;
|
||||
WREG32_NO_KIQ(mmMM_DATA, value);
|
||||
}
|
||||
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
|
||||
if (!write) {
|
||||
value = (value & mask) >> shift;
|
||||
memcpy(buf, &value, bytes);
|
||||
}
|
||||
} else {
|
||||
bytes = cursor.size & ~0x3ULL;
|
||||
amdgpu_device_vram_access(adev, cursor.start,
|
||||
(uint32_t *)buf, bytes,
|
||||
write);
|
||||
}
|
||||
|
||||
ret += bytes;
|
||||
buf = (uint8_t *)buf + bytes;
|
||||
amdgpu_res_next(&cursor, bytes);
|
||||
ret += cursor.size;
|
||||
buf += cursor.size;
|
||||
amdgpu_res_next(&cursor, cursor.size);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -2148,7 +2161,6 @@ static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
|
|||
return -ENXIO;
|
||||
|
||||
while (size) {
|
||||
unsigned long flags;
|
||||
uint32_t value;
|
||||
|
||||
if (*pos >= adev->gmc.mc_vram_size)
|
||||
|
@ -2158,11 +2170,7 @@ static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
|
||||
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
|
||||
WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
|
||||
WREG32_NO_KIQ(mmMM_DATA, value);
|
||||
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
|
||||
amdgpu_device_mm_access(adev, *pos, &value, 4, true);
|
||||
|
||||
result += 4;
|
||||
buf += 4;
|
||||
|
|
|
@ -409,6 +409,12 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
|
|||
return AMDGPU_FW_LOAD_DIRECT;
|
||||
else
|
||||
return AMDGPU_FW_LOAD_PSP;
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
if (!(load_type &&
|
||||
adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2))
|
||||
return AMDGPU_FW_LOAD_DIRECT;
|
||||
else
|
||||
return AMDGPU_FW_LOAD_PSP;
|
||||
default:
|
||||
DRM_ERROR("Unknown firmware load type\n");
|
||||
}
|
||||
|
@ -416,6 +422,84 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
|
|||
return AMDGPU_FW_LOAD_DIRECT;
|
||||
}
|
||||
|
||||
const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id)
|
||||
{
|
||||
switch (ucode_id) {
|
||||
case AMDGPU_UCODE_ID_SDMA0:
|
||||
return "SDMA0";
|
||||
case AMDGPU_UCODE_ID_SDMA1:
|
||||
return "SDMA1";
|
||||
case AMDGPU_UCODE_ID_SDMA2:
|
||||
return "SDMA2";
|
||||
case AMDGPU_UCODE_ID_SDMA3:
|
||||
return "SDMA3";
|
||||
case AMDGPU_UCODE_ID_SDMA4:
|
||||
return "SDMA4";
|
||||
case AMDGPU_UCODE_ID_SDMA5:
|
||||
return "SDMA5";
|
||||
case AMDGPU_UCODE_ID_SDMA6:
|
||||
return "SDMA6";
|
||||
case AMDGPU_UCODE_ID_SDMA7:
|
||||
return "SDMA7";
|
||||
case AMDGPU_UCODE_ID_CP_CE:
|
||||
return "CP_CE";
|
||||
case AMDGPU_UCODE_ID_CP_PFP:
|
||||
return "CP_PFP";
|
||||
case AMDGPU_UCODE_ID_CP_ME:
|
||||
return "CP_ME";
|
||||
case AMDGPU_UCODE_ID_CP_MEC1:
|
||||
return "CP_MEC1";
|
||||
case AMDGPU_UCODE_ID_CP_MEC1_JT:
|
||||
return "CP_MEC1_JT";
|
||||
case AMDGPU_UCODE_ID_CP_MEC2:
|
||||
return "CP_MEC2";
|
||||
case AMDGPU_UCODE_ID_CP_MEC2_JT:
|
||||
return "CP_MEC2_JT";
|
||||
case AMDGPU_UCODE_ID_CP_MES:
|
||||
return "CP_MES";
|
||||
case AMDGPU_UCODE_ID_CP_MES_DATA:
|
||||
return "CP_MES_DATA";
|
||||
case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
|
||||
return "RLC_RESTORE_LIST_CNTL";
|
||||
case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
|
||||
return "RLC_RESTORE_LIST_GPM_MEM";
|
||||
case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
|
||||
return "RLC_RESTORE_LIST_SRM_MEM";
|
||||
case AMDGPU_UCODE_ID_RLC_IRAM:
|
||||
return "RLC_IRAM";
|
||||
case AMDGPU_UCODE_ID_RLC_DRAM:
|
||||
return "RLC_DRAM";
|
||||
case AMDGPU_UCODE_ID_RLC_G:
|
||||
return "RLC_G";
|
||||
case AMDGPU_UCODE_ID_STORAGE:
|
||||
return "STORAGE";
|
||||
case AMDGPU_UCODE_ID_SMC:
|
||||
return "SMC";
|
||||
case AMDGPU_UCODE_ID_UVD:
|
||||
return "UVD";
|
||||
case AMDGPU_UCODE_ID_UVD1:
|
||||
return "UVD1";
|
||||
case AMDGPU_UCODE_ID_VCE:
|
||||
return "VCE";
|
||||
case AMDGPU_UCODE_ID_VCN:
|
||||
return "VCN";
|
||||
case AMDGPU_UCODE_ID_VCN1:
|
||||
return "VCN1";
|
||||
case AMDGPU_UCODE_ID_DMCU_ERAM:
|
||||
return "DMCU_ERAM";
|
||||
case AMDGPU_UCODE_ID_DMCU_INTV:
|
||||
return "DMCU_INTV";
|
||||
case AMDGPU_UCODE_ID_VCN0_RAM:
|
||||
return "VCN0_RAM";
|
||||
case AMDGPU_UCODE_ID_VCN1_RAM:
|
||||
return "VCN1_RAM";
|
||||
case AMDGPU_UCODE_ID_DMCUB:
|
||||
return "DMCUB";
|
||||
default:
|
||||
return "UNKNOWN UCODE";
|
||||
}
|
||||
}
|
||||
|
||||
#define FW_VERSION_ATTR(name, mode, field) \
|
||||
static ssize_t show_##name(struct device *dev, \
|
||||
struct device_attribute *attr, \
|
||||
|
@ -440,7 +524,7 @@ FW_VERSION_ATTR(rlc_srlg_fw_version, 0444, gfx.rlc_srlg_fw_version);
|
|||
FW_VERSION_ATTR(rlc_srls_fw_version, 0444, gfx.rlc_srls_fw_version);
|
||||
FW_VERSION_ATTR(mec_fw_version, 0444, gfx.mec_fw_version);
|
||||
FW_VERSION_ATTR(mec2_fw_version, 0444, gfx.mec2_fw_version);
|
||||
FW_VERSION_ATTR(sos_fw_version, 0444, psp.sos_fw_version);
|
||||
FW_VERSION_ATTR(sos_fw_version, 0444, psp.sos.fw_version);
|
||||
FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd_fw_version);
|
||||
FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ta_ras_ucode_version);
|
||||
FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.ta_xgmi_ucode_version);
|
||||
|
|
|
@ -71,7 +71,7 @@ struct smc_firmware_header_v2_1 {
|
|||
uint32_t pptable_entry_offset;
|
||||
};
|
||||
|
||||
struct psp_fw_bin_desc {
|
||||
struct psp_fw_legacy_bin_desc {
|
||||
uint32_t fw_version;
|
||||
uint32_t offset_bytes;
|
||||
uint32_t size_bytes;
|
||||
|
@ -80,30 +80,57 @@ struct psp_fw_bin_desc {
|
|||
/* version_major=1, version_minor=0 */
|
||||
struct psp_firmware_header_v1_0 {
|
||||
struct common_firmware_header header;
|
||||
struct psp_fw_bin_desc sos;
|
||||
struct psp_fw_legacy_bin_desc sos;
|
||||
};
|
||||
|
||||
/* version_major=1, version_minor=1 */
|
||||
struct psp_firmware_header_v1_1 {
|
||||
struct psp_firmware_header_v1_0 v1_0;
|
||||
struct psp_fw_bin_desc toc;
|
||||
struct psp_fw_bin_desc kdb;
|
||||
struct psp_fw_legacy_bin_desc toc;
|
||||
struct psp_fw_legacy_bin_desc kdb;
|
||||
};
|
||||
|
||||
/* version_major=1, version_minor=2 */
|
||||
struct psp_firmware_header_v1_2 {
|
||||
struct psp_firmware_header_v1_0 v1_0;
|
||||
struct psp_fw_bin_desc res;
|
||||
struct psp_fw_bin_desc kdb;
|
||||
struct psp_fw_legacy_bin_desc res;
|
||||
struct psp_fw_legacy_bin_desc kdb;
|
||||
};
|
||||
|
||||
/* version_major=1, version_minor=3 */
|
||||
struct psp_firmware_header_v1_3 {
|
||||
struct psp_firmware_header_v1_1 v1_1;
|
||||
struct psp_fw_bin_desc spl;
|
||||
struct psp_fw_bin_desc rl;
|
||||
struct psp_fw_bin_desc sys_drv_aux;
|
||||
struct psp_fw_bin_desc sos_aux;
|
||||
struct psp_fw_legacy_bin_desc spl;
|
||||
struct psp_fw_legacy_bin_desc rl;
|
||||
struct psp_fw_legacy_bin_desc sys_drv_aux;
|
||||
struct psp_fw_legacy_bin_desc sos_aux;
|
||||
};
|
||||
|
||||
struct psp_fw_bin_desc {
|
||||
uint32_t fw_type;
|
||||
uint32_t fw_version;
|
||||
uint32_t offset_bytes;
|
||||
uint32_t size_bytes;
|
||||
};
|
||||
|
||||
enum psp_fw_type {
|
||||
PSP_FW_TYPE_UNKOWN,
|
||||
PSP_FW_TYPE_PSP_SOS,
|
||||
PSP_FW_TYPE_PSP_SYS_DRV,
|
||||
PSP_FW_TYPE_PSP_KDB,
|
||||
PSP_FW_TYPE_PSP_TOC,
|
||||
PSP_FW_TYPE_PSP_SPL,
|
||||
PSP_FW_TYPE_PSP_RL,
|
||||
PSP_FW_TYPE_PSP_SOC_DRV,
|
||||
PSP_FW_TYPE_PSP_INTF_DRV,
|
||||
PSP_FW_TYPE_PSP_DBG_DRV,
|
||||
};
|
||||
|
||||
/* version_major=2, version_minor=0 */
|
||||
struct psp_firmware_header_v2_0 {
|
||||
struct common_firmware_header header;
|
||||
uint32_t psp_fw_bin_count;
|
||||
struct psp_fw_bin_desc psp_fw_bin[];
|
||||
};
|
||||
|
||||
/* version_major=1, version_minor=0 */
|
||||
|
@ -138,18 +165,11 @@ enum ta_fw_type {
|
|||
TA_FW_TYPE_MAX_INDEX,
|
||||
};
|
||||
|
||||
struct ta_fw_bin_desc {
|
||||
uint32_t fw_type;
|
||||
uint32_t fw_version;
|
||||
uint32_t offset_bytes;
|
||||
uint32_t size_bytes;
|
||||
};
|
||||
|
||||
/* version_major=2, version_minor=0 */
|
||||
struct ta_firmware_header_v2_0 {
|
||||
struct common_firmware_header header;
|
||||
uint32_t ta_fw_bin_count;
|
||||
struct ta_fw_bin_desc ta_fw_bin[];
|
||||
struct psp_fw_bin_desc ta_fw_bin[];
|
||||
};
|
||||
|
||||
/* version_major=1, version_minor=0 */
|
||||
|
@ -312,6 +332,7 @@ union amdgpu_firmware_header {
|
|||
struct psp_firmware_header_v1_0 psp;
|
||||
struct psp_firmware_header_v1_1 psp_v1_1;
|
||||
struct psp_firmware_header_v1_3 psp_v1_3;
|
||||
struct psp_firmware_header_v2_0 psp_v2_0;
|
||||
struct ta_firmware_header_v1_0 ta;
|
||||
struct ta_firmware_header_v2_0 ta_v2_0;
|
||||
struct gfx_firmware_header_v1_0 gfx;
|
||||
|
@ -326,7 +347,7 @@ union amdgpu_firmware_header {
|
|||
uint8_t raw[0x100];
|
||||
};
|
||||
|
||||
#define UCODE_MAX_TA_PACKAGING ((sizeof(union amdgpu_firmware_header) - sizeof(struct common_firmware_header) - 4) / sizeof(struct ta_fw_bin_desc))
|
||||
#define UCODE_MAX_PSP_PACKAGING ((sizeof(union amdgpu_firmware_header) - sizeof(struct common_firmware_header) - 4) / sizeof(struct psp_fw_bin_desc))
|
||||
|
||||
/*
|
||||
* fw loading support
|
||||
|
@ -449,4 +470,6 @@ void amdgpu_ucode_sysfs_fini(struct amdgpu_device *adev);
|
|||
enum amdgpu_firmware_load_type
|
||||
amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type);
|
||||
|
||||
const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -134,7 +134,7 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
|
|||
amdgpu_ras_save_bad_pages(adev);
|
||||
|
||||
if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->send_hbm_bad_pages_num)
|
||||
adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.num_recs);
|
||||
adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.ras_num_recs);
|
||||
}
|
||||
|
||||
amdgpu_ras_reset_gpu(adev);
|
||||
|
|
|
@ -531,7 +531,7 @@ static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
|
|||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC, adev->gfx.mec_fw_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2, adev->gfx.mec2_fw_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS, adev->psp.sos_fw_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS, adev->psp.sos.fw_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD, adev->psp.asd_fw_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS, adev->psp.ta_ras_ucode_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI, adev->psp.ta_xgmi_ucode_version);
|
||||
|
|
|
@ -88,6 +88,46 @@ struct amdgpu_prt_cb {
|
|||
struct dma_fence_cb cb;
|
||||
};
|
||||
|
||||
/**
|
||||
* amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @vm: amdgpu_vm pointer
|
||||
* @pasid: the pasid the VM is using on this GPU
|
||||
*
|
||||
* Set the pasid this VM is using on this GPU, can also be used to remove the
|
||||
* pasid by passing in zero.
|
||||
*
|
||||
*/
|
||||
int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
u32 pasid)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (vm->pasid == pasid)
|
||||
return 0;
|
||||
|
||||
if (vm->pasid) {
|
||||
r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid));
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
vm->pasid = 0;
|
||||
}
|
||||
|
||||
if (pasid) {
|
||||
r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm,
|
||||
GFP_KERNEL));
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
vm->pasid = pasid;
|
||||
}
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
|
||||
* happens while holding this lock anywhere to prevent deadlocks when
|
||||
|
@ -2863,14 +2903,13 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
|
|||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @vm: requested vm
|
||||
* @pasid: Process address space identifier
|
||||
*
|
||||
* Init @vm fields.
|
||||
*
|
||||
* Returns:
|
||||
* 0 for success, error for failure.
|
||||
*/
|
||||
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid)
|
||||
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||
{
|
||||
struct amdgpu_bo *root_bo;
|
||||
struct amdgpu_bo_vm *root;
|
||||
|
@ -2944,19 +2983,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid)
|
|||
|
||||
amdgpu_bo_unreserve(vm->root.bo);
|
||||
|
||||
if (pasid) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
||||
r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
|
||||
GFP_ATOMIC);
|
||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
||||
if (r < 0)
|
||||
goto error_free_root;
|
||||
|
||||
vm->pasid = pasid;
|
||||
}
|
||||
|
||||
INIT_KFIFO(vm->faults);
|
||||
|
||||
return 0;
|
||||
|
@ -3012,7 +3038,6 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
|
|||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @vm: requested vm
|
||||
* @pasid: pasid to use
|
||||
*
|
||||
* This only works on GFX VMs that don't have any BOs added and no
|
||||
* page tables allocated yet.
|
||||
|
@ -3020,7 +3045,6 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
|
|||
* Changes the following VM parameters:
|
||||
* - use_cpu_for_update
|
||||
* - pte_supports_ats
|
||||
* - pasid (old PASID is released, because compute manages its own PASIDs)
|
||||
*
|
||||
* Reinitializes the page directory to reflect the changed ATS
|
||||
* setting.
|
||||
|
@ -3028,8 +3052,7 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
|
|||
* Returns:
|
||||
* 0 for success, -errno for errors.
|
||||
*/
|
||||
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
u32 pasid)
|
||||
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||
{
|
||||
bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
|
||||
int r;
|
||||
|
@ -3043,19 +3066,6 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
if (r)
|
||||
goto unreserve_bo;
|
||||
|
||||
if (pasid) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
||||
r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
|
||||
GFP_ATOMIC);
|
||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
||||
|
||||
if (r == -ENOSPC)
|
||||
goto unreserve_bo;
|
||||
r = 0;
|
||||
}
|
||||
|
||||
/* Check if PD needs to be reinitialized and do it before
|
||||
* changing any other state, in case it fails.
|
||||
*/
|
||||
|
@ -3065,7 +3075,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
to_amdgpu_bo_vm(vm->root.bo),
|
||||
false);
|
||||
if (r)
|
||||
goto free_idr;
|
||||
goto unreserve_bo;
|
||||
}
|
||||
|
||||
/* Update VM state */
|
||||
|
@ -3082,7 +3092,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
r = amdgpu_bo_sync_wait(vm->root.bo,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, true);
|
||||
if (r)
|
||||
goto free_idr;
|
||||
goto unreserve_bo;
|
||||
|
||||
vm->update_funcs = &amdgpu_vm_cpu_funcs;
|
||||
} else {
|
||||
|
@ -3092,36 +3102,11 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
vm->last_update = NULL;
|
||||
vm->is_compute_context = true;
|
||||
|
||||
if (vm->pasid) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
||||
idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
|
||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
||||
|
||||
/* Free the original amdgpu allocated pasid
|
||||
* Will be replaced with kfd allocated pasid
|
||||
*/
|
||||
amdgpu_pasid_free(vm->pasid);
|
||||
vm->pasid = 0;
|
||||
}
|
||||
|
||||
/* Free the shadow bo for compute VM */
|
||||
amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow);
|
||||
|
||||
if (pasid)
|
||||
vm->pasid = pasid;
|
||||
|
||||
goto unreserve_bo;
|
||||
|
||||
free_idr:
|
||||
if (pasid) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
||||
idr_remove(&adev->vm_manager.pasid_idr, pasid);
|
||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
||||
}
|
||||
unreserve_bo:
|
||||
amdgpu_bo_unreserve(vm->root.bo);
|
||||
return r;
|
||||
|
@ -3137,14 +3122,7 @@ unreserve_bo:
|
|||
*/
|
||||
void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||
{
|
||||
if (vm->pasid) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
||||
idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
|
||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
||||
}
|
||||
vm->pasid = 0;
|
||||
amdgpu_vm_set_pasid(adev, vm, 0);
|
||||
vm->is_compute_context = false;
|
||||
}
|
||||
|
||||
|
@ -3168,15 +3146,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|||
|
||||
root = amdgpu_bo_ref(vm->root.bo);
|
||||
amdgpu_bo_reserve(root, true);
|
||||
if (vm->pasid) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
||||
idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
|
||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
||||
vm->pasid = 0;
|
||||
}
|
||||
|
||||
amdgpu_vm_set_pasid(adev, vm, 0);
|
||||
dma_fence_wait(vm->last_unlocked, false);
|
||||
dma_fence_put(vm->last_unlocked);
|
||||
|
||||
|
@ -3258,8 +3228,7 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
|
|||
adev->vm_manager.vm_update_mode = 0;
|
||||
#endif
|
||||
|
||||
idr_init(&adev->vm_manager.pasid_idr);
|
||||
spin_lock_init(&adev->vm_manager.pasid_lock);
|
||||
xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3271,8 +3240,8 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
|
|||
*/
|
||||
void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
|
||||
idr_destroy(&adev->vm_manager.pasid_idr);
|
||||
WARN_ON(!xa_empty(&adev->vm_manager.pasids));
|
||||
xa_destroy(&adev->vm_manager.pasids);
|
||||
|
||||
amdgpu_vmid_mgr_fini(adev);
|
||||
}
|
||||
|
@ -3341,13 +3310,13 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
|
|||
struct amdgpu_vm *vm;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
|
||||
xa_lock_irqsave(&adev->vm_manager.pasids, flags);
|
||||
|
||||
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
|
||||
vm = xa_load(&adev->vm_manager.pasids, pasid);
|
||||
if (vm)
|
||||
*task_info = vm->task_info;
|
||||
|
||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
||||
xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3389,15 +3358,15 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
|
|||
struct amdgpu_vm *vm;
|
||||
int r;
|
||||
|
||||
spin_lock_irqsave(&adev->vm_manager.pasid_lock, irqflags);
|
||||
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
|
||||
xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
|
||||
vm = xa_load(&adev->vm_manager.pasids, pasid);
|
||||
if (vm) {
|
||||
root = amdgpu_bo_ref(vm->root.bo);
|
||||
is_compute_context = vm->is_compute_context;
|
||||
} else {
|
||||
root = NULL;
|
||||
}
|
||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, irqflags);
|
||||
xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
|
||||
|
||||
if (!root)
|
||||
return false;
|
||||
|
@ -3415,11 +3384,11 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
|
|||
goto error_unref;
|
||||
|
||||
/* Double check that the VM still exists */
|
||||
spin_lock_irqsave(&adev->vm_manager.pasid_lock, irqflags);
|
||||
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
|
||||
xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
|
||||
vm = xa_load(&adev->vm_manager.pasids, pasid);
|
||||
if (vm && vm->root.bo != root)
|
||||
vm = NULL;
|
||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, irqflags);
|
||||
xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
|
||||
if (!vm)
|
||||
goto error_unlock;
|
||||
|
||||
|
|
|
@ -359,8 +359,7 @@ struct amdgpu_vm_manager {
|
|||
/* PASID to VM mapping, will be used in interrupt context to
|
||||
* look up VM of a page fault
|
||||
*/
|
||||
struct idr pasid_idr;
|
||||
spinlock_t pasid_lock;
|
||||
struct xarray pasids;
|
||||
};
|
||||
|
||||
struct amdgpu_bo_va_mapping;
|
||||
|
@ -375,9 +374,12 @@ extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
|
|||
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
|
||||
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
|
||||
|
||||
int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
u32 pasid);
|
||||
|
||||
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
|
||||
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid);
|
||||
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid);
|
||||
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||
void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
|
||||
|
|
|
@ -486,6 +486,18 @@ int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
int amdgpu_xgmi_get_num_links(struct amdgpu_device *adev,
|
||||
struct amdgpu_device *peer_adev)
|
||||
{
|
||||
struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
|
||||
int i;
|
||||
|
||||
for (i = 0 ; i < top->num_nodes; ++i)
|
||||
if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
|
||||
return top->nodes[i].num_links;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
|
||||
{
|
||||
struct psp_xgmi_topology_info *top_info;
|
||||
|
|
|
@ -59,6 +59,8 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
|
|||
int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate);
|
||||
int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
|
||||
struct amdgpu_device *peer_adev);
|
||||
int amdgpu_xgmi_get_num_links(struct amdgpu_device *adev,
|
||||
struct amdgpu_device *peer_adev);
|
||||
uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
|
||||
uint64_t addr);
|
||||
static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
/*
|
||||
* Copyright 2018 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include "amdgpu.h"
|
||||
#include "nv.h"
|
||||
|
||||
#include "soc15_common.h"
|
||||
#include "soc15_hw_ip.h"
|
||||
#include "cyan_skillfish_ip_offset.h"
|
||||
|
||||
int cyan_skillfish_reg_base_init(struct amdgpu_device *adev)
|
||||
{
|
||||
/* HW has more IP blocks, only initialized the blocke needed by driver */
|
||||
uint32_t i;
|
||||
for (i = 0 ; i < MAX_INSTANCE ; ++i) {
|
||||
adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
|
||||
adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
|
||||
adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
|
||||
adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
|
||||
adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
|
||||
adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
|
||||
adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
|
||||
adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(UVD0_BASE.instance[i]));
|
||||
adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
|
||||
adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DMU_BASE.instance[i]));
|
||||
adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
|
||||
adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
|
||||
adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
|
||||
adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
|
||||
}
|
||||
return 0;
|
||||
}
|
|
@ -56,6 +56,10 @@
|
|||
#define GFX10_NUM_GFX_RINGS_Sienna_Cichlid 1
|
||||
#define GFX10_MEC_HPD_SIZE 2048
|
||||
|
||||
#define RLCG_VFGATE_DISABLED 0x4000000
|
||||
#define RLCG_WRONG_OPERATION_TYPE 0x2000000
|
||||
#define RLCG_NOT_IN_RANGE 0x1000000
|
||||
|
||||
#define F32_CE_PROGRAM_RAM_SIZE 65536
|
||||
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
|
||||
|
||||
|
@ -181,6 +185,9 @@
|
|||
#define GFX_RLCG_GC_READ (0x1 << 28)
|
||||
#define GFX_RLCG_MMHUB_WRITE (0x2 << 28)
|
||||
|
||||
#define RLCG_ERROR_REPORT_ENABLED(adev) \
|
||||
(amdgpu_sriov_reg_indirect_mmhub(adev) || amdgpu_sriov_reg_indirect_gc(adev))
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/navi10_ce.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navi10_pfp.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navi10_me.bin");
|
||||
|
@ -249,6 +256,39 @@ MODULE_FIRMWARE("amdgpu/yellow_carp_mec.bin");
|
|||
MODULE_FIRMWARE("amdgpu/yellow_carp_mec2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/yellow_carp_rlc.bin");
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/cyan_skillfish_ce.bin");
|
||||
MODULE_FIRMWARE("amdgpu/cyan_skillfish_pfp.bin");
|
||||
MODULE_FIRMWARE("amdgpu/cyan_skillfish_me.bin");
|
||||
MODULE_FIRMWARE("amdgpu/cyan_skillfish_mec.bin");
|
||||
MODULE_FIRMWARE("amdgpu/cyan_skillfish_mec2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/cyan_skillfish_rlc.bin");
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/cyan_skillfish2_ce.bin");
|
||||
MODULE_FIRMWARE("amdgpu/cyan_skillfish2_pfp.bin");
|
||||
MODULE_FIRMWARE("amdgpu/cyan_skillfish2_me.bin");
|
||||
MODULE_FIRMWARE("amdgpu/cyan_skillfish2_mec.bin");
|
||||
MODULE_FIRMWARE("amdgpu/cyan_skillfish2_mec2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/cyan_skillfish2_rlc.bin");
|
||||
|
||||
static const struct soc15_reg_golden golden_settings_gc_10_0[] =
|
||||
{
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
|
||||
/* TA_GRAD_ADJ_UCONFIG -> TA_GRAD_ADJ */
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382),
|
||||
/* VGT_TF_RING_SIZE_UMD -> VGT_TF_RING_SIZE */
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2262c24e),
|
||||
/* VGT_HS_OFFCHIP_PARAM_UMD -> VGT_HS_OFFCHIP_PARAM */
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x226cc24f),
|
||||
/* VGT_TF_MEMORY_BASE_UMD -> VGT_TF_MEMORY_BASE */
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x226ec250),
|
||||
/* VGT_TF_MEMORY_BASE_HI_UMD -> VGT_TF_MEMORY_BASE_HI */
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2278c261),
|
||||
/* VGT_ESGS_RING_SIZE_UMD -> VGT_ESGS_RING_SIZE */
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2232c240),
|
||||
/* VGT_GSVS_RING_SIZE_UMD -> VGT_GSVS_RING_SIZE */
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2233c241),
|
||||
};
|
||||
|
||||
static const struct soc15_reg_golden golden_settings_gc_10_1[] =
|
||||
{
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014),
|
||||
|
@ -1486,6 +1526,7 @@ static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32
|
|||
uint32_t i = 0;
|
||||
uint32_t retries = 50000;
|
||||
u32 ret = 0;
|
||||
u32 tmp;
|
||||
|
||||
scratch_reg0 = adev->rmmio +
|
||||
(adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0) * 4;
|
||||
|
@ -1519,9 +1560,8 @@ static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32
|
|||
writel(v, scratch_reg0);
|
||||
writel(offset | flag, scratch_reg1);
|
||||
writel(1, spare_int);
|
||||
for (i = 0; i < retries; i++) {
|
||||
u32 tmp;
|
||||
|
||||
for (i = 0; i < retries; i++) {
|
||||
tmp = readl(scratch_reg1);
|
||||
if (!(tmp & flag))
|
||||
break;
|
||||
|
@ -1529,8 +1569,19 @@ static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32
|
|||
udelay(10);
|
||||
}
|
||||
|
||||
if (i >= retries)
|
||||
pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
|
||||
if (i >= retries) {
|
||||
if (RLCG_ERROR_REPORT_ENABLED(adev)) {
|
||||
if (tmp & RLCG_VFGATE_DISABLED)
|
||||
pr_err("The vfgate is disabled, program reg:0x%05x failed!\n", offset);
|
||||
else if (tmp & RLCG_WRONG_OPERATION_TYPE)
|
||||
pr_err("Wrong operation type, program reg:0x%05x failed!\n", offset);
|
||||
else if (tmp & RLCG_NOT_IN_RANGE)
|
||||
pr_err("The register is not in range, program reg:0x%05x failed!\n", offset);
|
||||
else
|
||||
pr_err("Unknown error type, program reg:0x%05x failed!\n", offset);
|
||||
} else
|
||||
pr_err("timeout: rlcg program reg:0x%05x failed!\n", offset);
|
||||
}
|
||||
}
|
||||
|
||||
ret = readl(scratch_reg0);
|
||||
|
@ -1538,7 +1589,7 @@ static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 value, u32 acc_flags, u32 hwip)
|
||||
static void gfx_v10_sriov_wreg(struct amdgpu_device *adev, u32 offset, u32 value, u32 acc_flags, u32 hwip)
|
||||
{
|
||||
u32 rlcg_flag;
|
||||
|
||||
|
@ -1554,7 +1605,7 @@ static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 value,
|
|||
WREG32(offset, value);
|
||||
}
|
||||
|
||||
static u32 gfx_v10_rlcg_rreg(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip)
|
||||
static u32 gfx_v10_sriov_rreg(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip)
|
||||
{
|
||||
u32 rlcg_flag;
|
||||
|
||||
|
@ -3488,12 +3539,51 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_5[] = {
|
|||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffbfffff, 0x00a00000)
|
||||
};
|
||||
|
||||
static const struct soc15_reg_golden golden_settings_gc_10_0_cyan_skillfish[] = {
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_FAST_CLKS, 0x3fffffff, 0x0000493e),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_CPF_CLK_CTRL, 0xfcff8fff, 0xf8000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xff7f0fff, 0x3c000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0xa0000000, 0xa0000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0x00008000, 0x003c8014),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_DRAM_BURST_CTRL, 0x00000010, 0x00000017),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0xffffffff, 0xd8d8d8d8),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_VC5_ENABLE, 0x00000003, 0x00000003),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0x800007ff, 0x000005ff),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04800000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860210),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1800ff, 0x00000044),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x00009d00, 0x00008500),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCMC_VM_CACHEABLE_DRAM_ADDRESS_END, 0xffffffff, 0x000fffff),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_DRAM_BURST_CTRL, 0x00000010, 0x00000017),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xfcfcfcfc, 0xd8d8d8d8),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77707770, 0x21302130),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77707770, 0x21302130),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0x10000000, 0x10000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xfc02002f, 0x9402002f),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0x00002188, 0x00000188),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x08000009, 0x08000009),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_0, 0xcc3fcc03, 0x842a4c02),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000000f, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffff3109, 0xffff3101),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000100, 0x00000130),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x00030008, 0x01030000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00800000, 0x00800000)
|
||||
};
|
||||
|
||||
#define DEFAULT_SH_MEM_CONFIG \
|
||||
((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
|
||||
(SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
|
||||
(SH_MEM_RETRY_MODE_ALL << SH_MEM_CONFIG__RETRY_MODE__SHIFT) | \
|
||||
(3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))
|
||||
|
||||
/* TODO: pending on golden setting value of gb address config */
|
||||
#define CYAN_SKILLFISH_GB_ADDR_CONFIG_GOLDEN 0x00100044
|
||||
|
||||
static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev);
|
||||
static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev);
|
||||
|
@ -3718,6 +3808,14 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
|
|||
golden_settings_gc_10_3_5,
|
||||
(const u32)ARRAY_SIZE(golden_settings_gc_10_3_5));
|
||||
break;
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
soc15_program_register_sequence(adev,
|
||||
golden_settings_gc_10_0,
|
||||
(const u32)ARRAY_SIZE(golden_settings_gc_10_0));
|
||||
soc15_program_register_sequence(adev,
|
||||
golden_settings_gc_10_0_cyan_skillfish,
|
||||
(const u32)ARRAY_SIZE(golden_settings_gc_10_0_cyan_skillfish));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -3891,6 +3989,7 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
|
|||
case CHIP_NAVI10:
|
||||
case CHIP_NAVI12:
|
||||
case CHIP_NAVI14:
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
if ((adev->gfx.me_fw_version >= 0x00000046) &&
|
||||
(adev->gfx.me_feature_version >= 27) &&
|
||||
(adev->gfx.pfp_fw_version >= 0x00000068) &&
|
||||
|
@ -4025,6 +4124,12 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
|
|||
case CHIP_YELLOW_CARP:
|
||||
chip_name = "yellow_carp";
|
||||
break;
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2)
|
||||
chip_name = "cyan_skillfish2";
|
||||
else
|
||||
chip_name = "cyan_skillfish";
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
@ -4604,6 +4709,14 @@ static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev)
|
|||
adev->gfx.config.gb_addr_config_fields.num_pkrs =
|
||||
1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
|
||||
break;
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
adev->gfx.config.max_hw_contexts = 8;
|
||||
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
|
||||
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
|
||||
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
|
||||
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
|
||||
gb_addr_config = CYAN_SKILLFISH_GB_ADDR_CONFIG_GOLDEN;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
break;
|
||||
|
@ -4708,6 +4821,7 @@ static int gfx_v10_0_sw_init(void *handle)
|
|||
case CHIP_NAVI10:
|
||||
case CHIP_NAVI14:
|
||||
case CHIP_NAVI12:
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
adev->gfx.me.num_me = 1;
|
||||
adev->gfx.me.num_pipe_per_me = 1;
|
||||
adev->gfx.me.num_queue_per_pipe = 1;
|
||||
|
@ -5319,7 +5433,8 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
|
|||
{
|
||||
int r;
|
||||
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
|
||||
adev->psp.autoload_supported) {
|
||||
|
||||
r = gfx_v10_0_wait_for_rlc_autoload_complete(adev);
|
||||
if (r)
|
||||
|
@ -5379,7 +5494,7 @@ static int gfx_v10_0_parse_rlc_toc(struct amdgpu_device *adev)
|
|||
int ret;
|
||||
RLC_TABLE_OF_CONTENT *rlc_toc;
|
||||
|
||||
ret = amdgpu_bo_create_reserved(adev, adev->psp.toc_bin_size, PAGE_SIZE,
|
||||
ret = amdgpu_bo_create_reserved(adev, adev->psp.toc.size_bytes, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_GTT,
|
||||
&adev->gfx.rlc.rlc_toc_bo,
|
||||
&adev->gfx.rlc.rlc_toc_gpu_addr,
|
||||
|
@ -5390,7 +5505,7 @@ static int gfx_v10_0_parse_rlc_toc(struct amdgpu_device *adev)
|
|||
}
|
||||
|
||||
/* Copy toc from psp sos fw to rlc toc buffer */
|
||||
memcpy(adev->gfx.rlc.rlc_toc_buf, adev->psp.toc_start_addr, adev->psp.toc_bin_size);
|
||||
memcpy(adev->gfx.rlc.rlc_toc_buf, adev->psp.toc.start_addr, adev->psp.toc.size_bytes);
|
||||
|
||||
rlc_toc = (RLC_TABLE_OF_CONTENT *)adev->gfx.rlc.rlc_toc_buf;
|
||||
while (rlc_toc && (rlc_toc->id > FIRMWARE_ID_INVALID) &&
|
||||
|
@ -7608,10 +7723,8 @@ static int gfx_v10_0_soft_reset(void *handle)
|
|||
|
||||
static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
|
||||
{
|
||||
uint64_t clock;
|
||||
uint64_t clock, clock_lo, clock_hi, hi_check;
|
||||
|
||||
amdgpu_gfx_off_ctrl(adev, false);
|
||||
mutex_lock(&adev->gfx.gpu_clock_mutex);
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_YELLOW_CARP:
|
||||
|
@ -7619,12 +7732,21 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
|
|||
((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh) << 32ULL);
|
||||
break;
|
||||
default:
|
||||
clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) |
|
||||
((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL);
|
||||
preempt_disable();
|
||||
clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER);
|
||||
clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER);
|
||||
hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER);
|
||||
/* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over
|
||||
* roughly every 42 seconds.
|
||||
*/
|
||||
if (hi_check != clock_hi) {
|
||||
clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER);
|
||||
clock_hi = hi_check;
|
||||
}
|
||||
preempt_enable();
|
||||
clock = clock_lo | (clock_hi << 32ULL);
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&adev->gfx.gpu_clock_mutex);
|
||||
amdgpu_gfx_off_ctrl(adev, true);
|
||||
return clock;
|
||||
}
|
||||
|
||||
|
@ -7665,6 +7787,7 @@ static int gfx_v10_0_early_init(void *handle)
|
|||
case CHIP_NAVI10:
|
||||
case CHIP_NAVI14:
|
||||
case CHIP_NAVI12:
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_NV1X;
|
||||
break;
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
|
@ -8261,8 +8384,8 @@ static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs_sriov = {
|
|||
.reset = gfx_v10_0_rlc_reset,
|
||||
.start = gfx_v10_0_rlc_start,
|
||||
.update_spm_vmid = gfx_v10_0_update_spm_vmid,
|
||||
.rlcg_wreg = gfx_v10_rlcg_wreg,
|
||||
.rlcg_rreg = gfx_v10_rlcg_rreg,
|
||||
.sriov_wreg = gfx_v10_sriov_wreg,
|
||||
.sriov_rreg = gfx_v10_sriov_rreg,
|
||||
.is_rlcg_access_range = gfx_v10_0_is_rlcg_access_range,
|
||||
};
|
||||
|
||||
|
@ -9425,6 +9548,7 @@ static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev)
|
|||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs;
|
||||
break;
|
||||
case CHIP_NAVI12:
|
||||
|
|
|
@ -787,7 +787,7 @@ static void gfx_v9_0_rlcg_w(struct amdgpu_device *adev, u32 offset, u32 v, u32 f
|
|||
|
||||
}
|
||||
|
||||
static void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset,
|
||||
static void gfx_v9_0_sriov_wreg(struct amdgpu_device *adev, u32 offset,
|
||||
u32 v, u32 acc_flags, u32 hwip)
|
||||
{
|
||||
if ((acc_flags & AMDGPU_REGS_RLC) &&
|
||||
|
@ -5131,7 +5131,7 @@ static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
|
|||
.reset = gfx_v9_0_rlc_reset,
|
||||
.start = gfx_v9_0_rlc_start,
|
||||
.update_spm_vmid = gfx_v9_0_update_spm_vmid,
|
||||
.rlcg_wreg = gfx_v9_0_rlcg_wreg,
|
||||
.sriov_wreg = gfx_v9_0_sriov_wreg,
|
||||
.is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
|
||||
};
|
||||
|
||||
|
|
|
@ -810,6 +810,7 @@ static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
|
|||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
default:
|
||||
adev->gmc.gart_size = 512ULL << 20;
|
||||
break;
|
||||
|
@ -879,6 +880,7 @@ static int gmc_v10_0_sw_init(void *handle)
|
|||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
adev->num_vmhubs = 2;
|
||||
/*
|
||||
* To fulfill 4-level page support,
|
||||
|
@ -996,6 +998,7 @@ static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev)
|
|||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
|
|
@ -53,6 +53,7 @@
|
|||
#include "mmhub_v1_7.h"
|
||||
#include "umc_v6_1.h"
|
||||
#include "umc_v6_0.h"
|
||||
#include "umc_v6_7.h"
|
||||
#include "hdp_v4_0.h"
|
||||
|
||||
#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
|
||||
|
@ -1168,6 +1169,18 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
|
|||
adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
|
||||
adev->umc.ras_funcs = &umc_v6_1_ras_funcs;
|
||||
break;
|
||||
case CHIP_ALDEBARAN:
|
||||
adev->umc.max_ras_err_cnt_per_query = UMC_V6_7_TOTAL_CHANNEL_NUM;
|
||||
adev->umc.channel_inst_num = UMC_V6_7_UMC_INSTANCE_NUM;
|
||||
adev->umc.umc_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM;
|
||||
adev->umc.channel_offs = UMC_V6_7_PER_CHANNEL_OFFSET;
|
||||
if (!adev->gmc.xgmi.connected_to_cpu)
|
||||
adev->umc.ras_funcs = &umc_v6_7_ras_funcs;
|
||||
if (1 & adev->smuio.funcs->get_die_id(adev))
|
||||
adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_first[0][0];
|
||||
else
|
||||
adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_second[0][0];
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -508,6 +508,26 @@ static void nbio_v2_3_apply_l1_link_width_reconfig_wa(struct amdgpu_device *adev
|
|||
WREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL, reg_data);
|
||||
}
|
||||
|
||||
static void nbio_v2_3_clear_doorbell_interrupt(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t reg, reg_data;
|
||||
|
||||
if (adev->asic_type != CHIP_SIENNA_CICHLID)
|
||||
return;
|
||||
|
||||
reg = RREG32_SOC15(NBIO, 0, mmBIF_RB_CNTL);
|
||||
|
||||
/* Clear Interrupt Status
|
||||
*/
|
||||
if ((reg & BIF_RB_CNTL__RB_ENABLE_MASK) == 0) {
|
||||
reg = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
|
||||
if (reg & BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_STATUS_MASK) {
|
||||
reg_data = 1 << BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_CLEAR__SHIFT;
|
||||
WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, reg_data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
|
||||
.get_hdp_flush_req_offset = nbio_v2_3_get_hdp_flush_req_offset,
|
||||
.get_hdp_flush_done_offset = nbio_v2_3_get_hdp_flush_done_offset,
|
||||
|
@ -531,4 +551,5 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
|
|||
.program_aspm = nbio_v2_3_program_aspm,
|
||||
.apply_lc_spc_mode_wa = nbio_v2_3_apply_lc_spc_mode_wa,
|
||||
.apply_l1_link_width_reconfig_wa = nbio_v2_3_apply_l1_link_width_reconfig_wa,
|
||||
.clear_doorbell_interrupt = nbio_v2_3_clear_doorbell_interrupt,
|
||||
};
|
||||
|
|
|
@ -666,6 +666,9 @@ legacy_init:
|
|||
case CHIP_YELLOW_CARP:
|
||||
yellow_carp_reg_base_init(adev);
|
||||
break;
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
cyan_skillfish_reg_base_init(adev);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -682,7 +685,10 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
|
|||
{
|
||||
int r;
|
||||
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
if (adev->asic_type == CHIP_CYAN_SKILLFISH) {
|
||||
adev->nbio.funcs = &nbio_v2_3_funcs;
|
||||
adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
|
||||
} else if (adev->flags & AMD_IS_APU) {
|
||||
adev->nbio.funcs = &nbio_v7_2_funcs;
|
||||
adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
|
||||
} else {
|
||||
|
@ -889,6 +895,20 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
|
|||
amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
|
||||
break;
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
|
||||
if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
|
||||
if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
|
||||
amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
|
||||
}
|
||||
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1241,6 +1261,11 @@ static int nv_common_early_init(void *handle)
|
|||
else
|
||||
adev->external_rev_id = adev->rev_id + 0x01;
|
||||
break;
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
adev->cg_flags = 0;
|
||||
adev->pg_flags = 0;
|
||||
adev->external_rev_id = adev->rev_id + 0x82;
|
||||
break;
|
||||
default:
|
||||
/* FIXME: not supported yet */
|
||||
return -EINVAL;
|
||||
|
|
|
@ -38,5 +38,6 @@ void vangogh_reg_base_init(struct amdgpu_device *adev);
|
|||
int dimgrey_cavefish_reg_base_init(struct amdgpu_device *adev);
|
||||
int beige_goby_reg_base_init(struct amdgpu_device *adev);
|
||||
int yellow_carp_reg_base_init(struct amdgpu_device *adev);
|
||||
int cyan_skillfish_reg_base_init(struct amdgpu_device *adev);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -80,6 +80,9 @@ MODULE_FIRMWARE("amdgpu/beige_goby_ta.bin");
|
|||
/* For large FW files the time to complete can be very long */
|
||||
#define USBC_PD_POLLING_LIMIT_S 240
|
||||
|
||||
/* Read USB-PD from LFB */
|
||||
#define GFX_CMD_USB_PD_USE_LFB 0x480
|
||||
|
||||
static int psp_v11_0_init_microcode(struct psp_context *psp)
|
||||
{
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
|
@ -284,7 +287,7 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
|
|||
return ret;
|
||||
|
||||
/* Copy PSP KDB binary to memory */
|
||||
psp_copy_fw(psp, psp->kdb_start_addr, psp->kdb_bin_size);
|
||||
psp_copy_fw(psp, psp->kdb.start_addr, psp->kdb.size_bytes);
|
||||
|
||||
/* Provide the PSP KDB to bootloader */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
|
||||
|
@ -315,7 +318,7 @@ static int psp_v11_0_bootloader_load_spl(struct psp_context *psp)
|
|||
return ret;
|
||||
|
||||
/* Copy PSP SPL binary to memory */
|
||||
psp_copy_fw(psp, psp->spl_start_addr, psp->spl_bin_size);
|
||||
psp_copy_fw(psp, psp->spl.start_addr, psp->spl.size_bytes);
|
||||
|
||||
/* Provide the PSP SPL to bootloader */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
|
||||
|
@ -346,7 +349,7 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
|
|||
return ret;
|
||||
|
||||
/* Copy PSP System Driver binary to memory */
|
||||
psp_copy_fw(psp, psp->sys_start_addr, psp->sys_bin_size);
|
||||
psp_copy_fw(psp, psp->sys.start_addr, psp->sys.size_bytes);
|
||||
|
||||
/* Provide the sys driver to bootloader */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
|
||||
|
@ -380,7 +383,7 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
|
|||
return ret;
|
||||
|
||||
/* Copy Secure OS binary to PSP memory */
|
||||
psp_copy_fw(psp, psp->sos_start_addr, psp->sos_bin_size);
|
||||
psp_copy_fw(psp, psp->sos.start_addr, psp->sos.size_bytes);
|
||||
|
||||
/* Provide the PSP secure OS to bootloader */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
|
||||
|
@ -753,44 +756,26 @@ static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
|
|||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
|
||||
}
|
||||
|
||||
static int psp_v11_0_load_usbc_pd_fw(struct psp_context *psp, dma_addr_t dma_addr)
|
||||
static int psp_v11_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc_addr)
|
||||
{
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
uint32_t reg_status;
|
||||
int ret, i = 0;
|
||||
|
||||
/* Write lower 32-bit address of the PD Controller FW */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, lower_32_bits(dma_addr));
|
||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
|
||||
0x80000000, 0x80000000, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Fireup interrupt so PSP can pick up the lower address */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, 0x800000);
|
||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
|
||||
0x80000000, 0x80000000, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
reg_status = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35);
|
||||
|
||||
if ((reg_status & 0xFFFF) != 0) {
|
||||
DRM_ERROR("Lower address load failed - MP0_SMN_C2PMSG_35.Bits [15:0] = %02x...\n",
|
||||
reg_status & 0xFFFF);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* Write upper 32-bit address of the PD Controller FW */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, upper_32_bits(dma_addr));
|
||||
/*
|
||||
* LFB address which is aligned to 1MB address and has to be
|
||||
* right-shifted by 20 so that LFB address can be passed on a 32-bit C2P
|
||||
* register
|
||||
*/
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, (fw_pri_mc_addr >> 20));
|
||||
|
||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
|
||||
0x80000000, 0x80000000, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Fireup interrupt so PSP can pick up the upper address */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, 0x4000000);
|
||||
/* Fireup interrupt so PSP can pick up the address */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, (GFX_CMD_USB_PD_USE_LFB << 16));
|
||||
|
||||
/* FW load takes very long time */
|
||||
do {
|
||||
|
@ -806,7 +791,7 @@ static int psp_v11_0_load_usbc_pd_fw(struct psp_context *psp, dma_addr_t dma_add
|
|||
done:
|
||||
|
||||
if ((reg_status & 0xFFFF) != 0) {
|
||||
DRM_ERROR("Upper address load failed - MP0_SMN_C2PMSG_35.Bits [15:0] = x%04x\n",
|
||||
DRM_ERROR("Address load failed - MP0_SMN_C2PMSG_35.Bits [15:0] = 0x%04x\n",
|
||||
reg_status & 0xFFFF);
|
||||
return -EIO;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,208 @@
|
|||
/*
|
||||
* Copyright 2021 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_psp.h"
|
||||
#include "amdgpu_ucode.h"
|
||||
#include "soc15_common.h"
|
||||
#include "psp_v11_0_8.h"
|
||||
|
||||
#include "mp/mp_11_0_8_offset.h"
|
||||
|
||||
static int psp_v11_0_8_ring_init(struct psp_context *psp,
|
||||
enum psp_ring_type ring_type)
|
||||
{
|
||||
int ret = 0;
|
||||
struct psp_ring *ring;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
|
||||
ring = &psp->km_ring;
|
||||
|
||||
ring->ring_type = ring_type;
|
||||
|
||||
/* allocate 4k Page of Local Frame Buffer memory for ring */
|
||||
ring->ring_size = 0x1000;
|
||||
ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->firmware.rbuf,
|
||||
&ring->ring_mem_mc_addr,
|
||||
(void **)&ring->ring_mem);
|
||||
if (ret) {
|
||||
ring->ring_size = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int psp_v11_0_8_ring_stop(struct psp_context *psp,
|
||||
enum psp_ring_type ring_type)
|
||||
{
|
||||
int ret = 0;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
/* Write the ring destroy command*/
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
|
||||
GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
|
||||
/* there might be handshake issue with hardware which needs delay */
|
||||
mdelay(20);
|
||||
/* Wait for response flag (bit 31) */
|
||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
|
||||
0x80000000, 0x80000000, false);
|
||||
} else {
|
||||
/* Write the ring destroy command*/
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
|
||||
GFX_CTRL_CMD_ID_DESTROY_RINGS);
|
||||
/* there might be handshake issue with hardware which needs delay */
|
||||
mdelay(20);
|
||||
/* Wait for response flag (bit 31) */
|
||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
|
||||
0x80000000, 0x80000000, false);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int psp_v11_0_8_ring_create(struct psp_context *psp,
|
||||
enum psp_ring_type ring_type)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned int psp_ring_reg = 0;
|
||||
struct psp_ring *ring = &psp->km_ring;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
ret = psp_v11_0_8_ring_stop(psp, ring_type);
|
||||
if (ret) {
|
||||
DRM_ERROR("psp_v11_0_8_ring_stop_sriov failed!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Write low address of the ring to C2PMSG_102 */
|
||||
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg);
|
||||
/* Write high address of the ring to C2PMSG_103 */
|
||||
psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_103, psp_ring_reg);
|
||||
|
||||
/* Write the ring initialization command to C2PMSG_101 */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
|
||||
GFX_CTRL_CMD_ID_INIT_GPCOM_RING);
|
||||
|
||||
/* there might be handshake issue with hardware which needs delay */
|
||||
mdelay(20);
|
||||
|
||||
/* Wait for response flag (bit 31) in C2PMSG_101 */
|
||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
|
||||
0x80000000, 0x8000FFFF, false);
|
||||
|
||||
} else {
|
||||
/* Wait for sOS ready for ring creation */
|
||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
|
||||
0x80000000, 0x80000000, false);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to wait for trust OS ready for ring creation\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Write low address of the ring to C2PMSG_69 */
|
||||
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
|
||||
/* Write high address of the ring to C2PMSG_70 */
|
||||
psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
|
||||
/* Write size of ring to C2PMSG_71 */
|
||||
psp_ring_reg = ring->ring_size;
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
|
||||
/* Write the ring initialization command to C2PMSG_64 */
|
||||
psp_ring_reg = ring_type;
|
||||
psp_ring_reg = psp_ring_reg << 16;
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
|
||||
|
||||
/* there might be handshake issue with hardware which needs delay */
|
||||
mdelay(20);
|
||||
|
||||
/* Wait for response flag (bit 31) in C2PMSG_64 */
|
||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
|
||||
0x80000000, 0x8000FFFF, false);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int psp_v11_0_8_ring_destroy(struct psp_context *psp,
|
||||
enum psp_ring_type ring_type)
|
||||
{
|
||||
int ret = 0;
|
||||
struct psp_ring *ring = &psp->km_ring;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
|
||||
ret = psp_v11_0_8_ring_stop(psp, ring_type);
|
||||
if (ret)
|
||||
DRM_ERROR("Fail to stop psp ring\n");
|
||||
|
||||
amdgpu_bo_free_kernel(&adev->firmware.rbuf,
|
||||
&ring->ring_mem_mc_addr,
|
||||
(void **)&ring->ring_mem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static uint32_t psp_v11_0_8_ring_get_wptr(struct psp_context *psp)
|
||||
{
|
||||
uint32_t data;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
|
||||
else
|
||||
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
static void psp_v11_0_8_ring_set_wptr(struct psp_context *psp, uint32_t value)
|
||||
{
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
|
||||
GFX_CTRL_CMD_ID_CONSUME_CMD);
|
||||
} else
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
|
||||
}
|
||||
|
||||
static const struct psp_funcs psp_v11_0_8_funcs = {
|
||||
.ring_init = psp_v11_0_8_ring_init,
|
||||
.ring_create = psp_v11_0_8_ring_create,
|
||||
.ring_stop = psp_v11_0_8_ring_stop,
|
||||
.ring_destroy = psp_v11_0_8_ring_destroy,
|
||||
.ring_get_wptr = psp_v11_0_8_ring_get_wptr,
|
||||
.ring_set_wptr = psp_v11_0_8_ring_set_wptr,
|
||||
};
|
||||
|
||||
void psp_v11_0_8_set_psp_funcs(struct psp_context *psp)
|
||||
{
|
||||
psp->funcs = &psp_v11_0_8_funcs;
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
/*
|
||||
* Copyright 2021 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#ifndef __PSP_V11_0_8_H__
|
||||
#define __PSP_V11_0_8_H__
|
||||
|
||||
#include "amdgpu_psp.h"
|
||||
|
||||
void psp_v11_0_8_set_psp_funcs(struct psp_context *psp);
|
||||
|
||||
#endif
|
|
@ -67,7 +67,7 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
|
|||
|
||||
err = psp_init_asd_microcode(psp, chip_name);
|
||||
if (err)
|
||||
goto out;
|
||||
return err;
|
||||
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
|
||||
err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
|
||||
|
@ -80,7 +80,7 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
|
|||
} else {
|
||||
err = amdgpu_ucode_validate(adev->psp.ta_fw);
|
||||
if (err)
|
||||
goto out2;
|
||||
goto out;
|
||||
|
||||
ta_hdr = (const struct ta_firmware_header_v1_0 *)
|
||||
adev->psp.ta_fw->data;
|
||||
|
@ -105,10 +105,9 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
|
|||
|
||||
return 0;
|
||||
|
||||
out2:
|
||||
out:
|
||||
release_firmware(adev->psp.ta_fw);
|
||||
adev->psp.ta_fw = NULL;
|
||||
out:
|
||||
if (err) {
|
||||
dev_err(adev->dev,
|
||||
"psp v12.0: Failed to load firmware \"%s\"\n",
|
||||
|
@ -139,7 +138,7 @@ static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp)
|
|||
return ret;
|
||||
|
||||
/* Copy PSP System Driver binary to memory */
|
||||
psp_copy_fw(psp, psp->sys_start_addr, psp->sys_bin_size);
|
||||
psp_copy_fw(psp, psp->sys.start_addr, psp->sys.size_bytes);
|
||||
|
||||
/* Provide the sys driver to bootloader */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
|
||||
|
@ -178,7 +177,7 @@ static int psp_v12_0_bootloader_load_sos(struct psp_context *psp)
|
|||
return ret;
|
||||
|
||||
/* Copy Secure OS binary to PSP memory */
|
||||
psp_copy_fw(psp, psp->sos_start_addr, psp->sos_bin_size);
|
||||
psp_copy_fw(psp, psp->sos.start_addr, psp->sos.size_bytes);
|
||||
|
||||
/* Provide the PSP secure OS to bootloader */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
|
||||
|
|
|
@ -35,6 +35,12 @@ MODULE_FIRMWARE("amdgpu/yellow_carp_asd.bin");
|
|||
MODULE_FIRMWARE("amdgpu/yellow_carp_toc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/yellow_carp_ta.bin");
|
||||
|
||||
/* For large FW files the time to complete can be very long */
|
||||
#define USBC_PD_POLLING_LIMIT_S 240
|
||||
|
||||
/* Read USB-PD from LFB */
|
||||
#define GFX_CMD_USB_PD_USE_LFB 0x480
|
||||
|
||||
static int psp_v13_0_init_microcode(struct psp_context *psp)
|
||||
{
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
|
@ -111,7 +117,9 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int psp_v13_0_bootloader_load_kdb(struct psp_context *psp)
|
||||
static int psp_v13_0_bootloader_load_component(struct psp_context *psp,
|
||||
struct psp_bin_desc *bin_desc,
|
||||
enum psp_bootloader_cmd bl_cmd)
|
||||
{
|
||||
int ret;
|
||||
uint32_t psp_gfxdrv_command_reg = 0;
|
||||
|
@ -130,12 +138,12 @@ static int psp_v13_0_bootloader_load_kdb(struct psp_context *psp)
|
|||
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
|
||||
|
||||
/* Copy PSP KDB binary to memory */
|
||||
memcpy(psp->fw_pri_buf, psp->kdb_start_addr, psp->kdb_bin_size);
|
||||
memcpy(psp->fw_pri_buf, bin_desc->start_addr, bin_desc->size_bytes);
|
||||
|
||||
/* Provide the PSP KDB to bootloader */
|
||||
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36,
|
||||
(uint32_t)(psp->fw_pri_mc_addr >> 20));
|
||||
psp_gfxdrv_command_reg = PSP_BL__LOAD_KEY_DATABASE;
|
||||
psp_gfxdrv_command_reg = bl_cmd;
|
||||
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_35,
|
||||
psp_gfxdrv_command_reg);
|
||||
|
||||
|
@ -144,40 +152,29 @@ static int psp_v13_0_bootloader_load_kdb(struct psp_context *psp)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int psp_v13_0_bootloader_load_kdb(struct psp_context *psp)
|
||||
{
|
||||
return psp_v13_0_bootloader_load_component(psp, &psp->kdb, PSP_BL__LOAD_KEY_DATABASE);
|
||||
}
|
||||
|
||||
static int psp_v13_0_bootloader_load_sysdrv(struct psp_context *psp)
|
||||
{
|
||||
int ret;
|
||||
uint32_t psp_gfxdrv_command_reg = 0;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
return psp_v13_0_bootloader_load_component(psp, &psp->sys, PSP_BL__LOAD_SYSDRV);
|
||||
}
|
||||
|
||||
/* Check sOS sign of life register to confirm sys driver and sOS
|
||||
* are already been loaded.
|
||||
*/
|
||||
if (psp_v13_0_is_sos_alive(psp))
|
||||
return 0;
|
||||
static int psp_v13_0_bootloader_load_soc_drv(struct psp_context *psp)
|
||||
{
|
||||
return psp_v13_0_bootloader_load_component(psp, &psp->soc_drv, PSP_BL__LOAD_SOCDRV);
|
||||
}
|
||||
|
||||
ret = psp_v13_0_wait_for_bootloader(psp);
|
||||
if (ret)
|
||||
return ret;
|
||||
static int psp_v13_0_bootloader_load_intf_drv(struct psp_context *psp)
|
||||
{
|
||||
return psp_v13_0_bootloader_load_component(psp, &psp->intf_drv, PSP_BL__LOAD_INTFDRV);
|
||||
}
|
||||
|
||||
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
|
||||
|
||||
/* Copy PSP System Driver binary to memory */
|
||||
memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size);
|
||||
|
||||
/* Provide the sys driver to bootloader */
|
||||
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36,
|
||||
(uint32_t)(psp->fw_pri_mc_addr >> 20));
|
||||
psp_gfxdrv_command_reg = PSP_BL__LOAD_SYSDRV;
|
||||
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_35,
|
||||
psp_gfxdrv_command_reg);
|
||||
|
||||
/* there might be handshake issue with hardware which needs delay */
|
||||
mdelay(20);
|
||||
|
||||
ret = psp_v13_0_wait_for_bootloader(psp);
|
||||
|
||||
return ret;
|
||||
static int psp_v13_0_bootloader_load_dbg_drv(struct psp_context *psp)
|
||||
{
|
||||
return psp_v13_0_bootloader_load_component(psp, &psp->dbg_drv, PSP_BL__LOAD_DBGDRV);
|
||||
}
|
||||
|
||||
static int psp_v13_0_bootloader_load_sos(struct psp_context *psp)
|
||||
|
@ -199,7 +196,7 @@ static int psp_v13_0_bootloader_load_sos(struct psp_context *psp)
|
|||
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
|
||||
|
||||
/* Copy Secure OS binary to PSP memory */
|
||||
memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size);
|
||||
memcpy(psp->fw_pri_buf, psp->sos.start_addr, psp->sos.size_bytes);
|
||||
|
||||
/* Provide the PSP secure OS to bootloader */
|
||||
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36,
|
||||
|
@ -382,10 +379,71 @@ static void psp_v13_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
|
|||
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_67, value);
|
||||
}
|
||||
|
||||
static int psp_v13_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc_addr)
|
||||
{
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
uint32_t reg_status;
|
||||
int ret, i = 0;
|
||||
|
||||
/*
|
||||
* LFB address which is aligned to 1MB address and has to be
|
||||
* right-shifted by 20 so that LFB address can be passed on a 32-bit C2P
|
||||
* register
|
||||
*/
|
||||
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36, (fw_pri_mc_addr >> 20));
|
||||
|
||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
|
||||
0x80000000, 0x80000000, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Fireup interrupt so PSP can pick up the address */
|
||||
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_35, (GFX_CMD_USB_PD_USE_LFB << 16));
|
||||
|
||||
/* FW load takes very long time */
|
||||
do {
|
||||
msleep(1000);
|
||||
reg_status = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_35);
|
||||
|
||||
if (reg_status & 0x80000000)
|
||||
goto done;
|
||||
|
||||
} while (++i < USBC_PD_POLLING_LIMIT_S);
|
||||
|
||||
return -ETIME;
|
||||
done:
|
||||
|
||||
if ((reg_status & 0xFFFF) != 0) {
|
||||
DRM_ERROR("Address load failed - MP0_SMN_C2PMSG_35.Bits [15:0] = %04x\n",
|
||||
reg_status & 0xFFFF);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int psp_v13_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver)
|
||||
{
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
int ret;
|
||||
|
||||
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER);
|
||||
|
||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
|
||||
0x80000000, 0x80000000, false);
|
||||
if (!ret)
|
||||
*fw_ver = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct psp_funcs psp_v13_0_funcs = {
|
||||
.init_microcode = psp_v13_0_init_microcode,
|
||||
.bootloader_load_kdb = psp_v13_0_bootloader_load_kdb,
|
||||
.bootloader_load_sysdrv = psp_v13_0_bootloader_load_sysdrv,
|
||||
.bootloader_load_soc_drv = psp_v13_0_bootloader_load_soc_drv,
|
||||
.bootloader_load_intf_drv = psp_v13_0_bootloader_load_intf_drv,
|
||||
.bootloader_load_dbg_drv = psp_v13_0_bootloader_load_dbg_drv,
|
||||
.bootloader_load_sos = psp_v13_0_bootloader_load_sos,
|
||||
.ring_init = psp_v13_0_ring_init,
|
||||
.ring_create = psp_v13_0_ring_create,
|
||||
|
@ -393,6 +451,8 @@ static const struct psp_funcs psp_v13_0_funcs = {
|
|||
.ring_destroy = psp_v13_0_ring_destroy,
|
||||
.ring_get_wptr = psp_v13_0_ring_get_wptr,
|
||||
.ring_set_wptr = psp_v13_0_ring_set_wptr,
|
||||
.load_usbc_pd_fw = psp_v13_0_load_usbc_pd_fw,
|
||||
.read_usbc_pd_fw = psp_v13_0_read_usbc_pd_fw
|
||||
};
|
||||
|
||||
void psp_v13_0_set_psp_funcs(struct psp_context *psp)
|
||||
|
|
|
@ -103,7 +103,7 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
|
|||
return ret;
|
||||
|
||||
/* Copy PSP System Driver binary to memory */
|
||||
psp_copy_fw(psp, psp->sys_start_addr, psp->sys_bin_size);
|
||||
psp_copy_fw(psp, psp->sys.start_addr, psp->sys.size_bytes);
|
||||
|
||||
/* Provide the sys driver to bootloader */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
|
||||
|
@ -142,7 +142,7 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
|
|||
return ret;
|
||||
|
||||
/* Copy Secure OS binary to PSP memory */
|
||||
psp_copy_fw(psp, psp->sos_start_addr, psp->sos_bin_size);
|
||||
psp_copy_fw(psp, psp->sos.start_addr, psp->sos.size_bytes);
|
||||
|
||||
/* Provide the PSP secure OS to bootloader */
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
|
||||
|
|
|
@ -51,6 +51,12 @@ MODULE_FIRMWARE("amdgpu/navi14_sdma1.bin");
|
|||
MODULE_FIRMWARE("amdgpu/navi12_sdma.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navi12_sdma1.bin");
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/cyan_skillfish_sdma.bin");
|
||||
MODULE_FIRMWARE("amdgpu/cyan_skillfish_sdma1.bin");
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/cyan_skillfish2_sdma.bin");
|
||||
MODULE_FIRMWARE("amdgpu/cyan_skillfish2_sdma1.bin");
|
||||
|
||||
#define SDMA1_REG_OFFSET 0x600
|
||||
#define SDMA0_HYP_DEC_REG_START 0x5880
|
||||
#define SDMA0_HYP_DEC_REG_END 0x5893
|
||||
|
@ -130,6 +136,37 @@ static const struct soc15_reg_golden golden_settings_sdma_nv12[] = {
|
|||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
};
|
||||
|
||||
static const struct soc15_reg_golden golden_settings_sdma_cyan_skillfish[] = {
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_UTCL1_PAGE, 0x007fffff, 0x004c5c00),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_UTCL1_PAGE, 0x007fffff, 0x004c5c00)
|
||||
};
|
||||
|
||||
static u32 sdma_v5_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset)
|
||||
{
|
||||
u32 base;
|
||||
|
@ -180,6 +217,11 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev)
|
|||
golden_settings_sdma_nv12,
|
||||
(const u32)ARRAY_SIZE(golden_settings_sdma_nv12));
|
||||
break;
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
soc15_program_register_sequence(adev,
|
||||
golden_settings_sdma_cyan_skillfish,
|
||||
(const u32)ARRAY_SIZE(golden_settings_sdma_cyan_skillfish));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -200,7 +242,7 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev)
|
|||
static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
|
||||
{
|
||||
const char *chip_name;
|
||||
char fw_name[30];
|
||||
char fw_name[40];
|
||||
int err = 0, i;
|
||||
struct amdgpu_firmware_info *info = NULL;
|
||||
const struct common_firmware_header *header = NULL;
|
||||
|
@ -221,6 +263,12 @@ static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
|
|||
case CHIP_NAVI12:
|
||||
chip_name = "navi12";
|
||||
break;
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2)
|
||||
chip_name = "cyan_skillfish2";
|
||||
else
|
||||
chip_name = "cyan_skillfish";
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
|
|
@ -87,21 +87,6 @@ static u32 sdma_v5_2_get_reg_offset(struct amdgpu_device *adev, u32 instance, u3
|
|||
return base + internal_offset;
|
||||
}
|
||||
|
||||
static void sdma_v5_2_init_golden_registers(struct amdgpu_device *adev)
|
||||
{
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int sdma_v5_2_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst)
|
||||
{
|
||||
int err = 0;
|
||||
|
@ -1345,8 +1330,6 @@ static int sdma_v5_2_hw_init(void *handle)
|
|||
int r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
sdma_v5_2_init_golden_registers(adev);
|
||||
|
||||
r = sdma_v5_2_start(adev);
|
||||
|
||||
return r;
|
||||
|
|
|
@ -41,9 +41,7 @@
|
|||
#define I2C_SW_TIMEOUT 8
|
||||
#define I2C_ABORT 0x10
|
||||
|
||||
/* I2C transaction flags */
|
||||
#define I2C_NO_STOP 1
|
||||
#define I2C_RESTART 2
|
||||
#define I2C_X_RESTART BIT(31)
|
||||
|
||||
#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
|
||||
|
||||
|
@ -56,12 +54,48 @@ static void smu_v11_0_i2c_set_clock_gating(struct i2c_adapter *control, bool en)
|
|||
WREG32_SOC15(SMUIO, 0, mmSMUIO_PWRMGT, reg);
|
||||
}
|
||||
|
||||
/* The T_I2C_POLL_US is defined as follows:
|
||||
*
|
||||
* "Define a timer interval (t_i2c_poll) equal to 10 times the
|
||||
* signalling period for the highest I2C transfer speed used in the
|
||||
* system and supported by DW_apb_i2c. For instance, if the highest
|
||||
* I2C data transfer mode is 400 kb/s, then t_i2c_poll is 25 us." --
|
||||
* DesignWare DW_apb_i2c Databook, Version 1.21a, section 3.8.3.1,
|
||||
* page 56, with grammar and syntax corrections.
|
||||
*
|
||||
* Vcc for our device is at 1.8V which puts it at 400 kHz,
|
||||
* see Atmel AT24CM02 datasheet, section 8.3 DC Characteristics table, page 14.
|
||||
*
|
||||
* The procedure to disable the IP block is described in section
|
||||
* 3.8.3 Disabling DW_apb_i2c on page 56.
|
||||
*/
|
||||
#define I2C_SPEED_MODE_FAST 2
|
||||
#define T_I2C_POLL_US 25
|
||||
#define I2C_MAX_T_POLL_COUNT 1000
|
||||
|
||||
static void smu_v11_0_i2c_enable(struct i2c_adapter *control, bool enable)
|
||||
static int smu_v11_0_i2c_enable(struct i2c_adapter *control, bool enable)
|
||||
{
|
||||
struct amdgpu_device *adev = to_amdgpu_device(control);
|
||||
|
||||
WREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_ENABLE, enable ? 1 : 0);
|
||||
|
||||
if (!enable) {
|
||||
int ii;
|
||||
|
||||
for (ii = I2C_MAX_T_POLL_COUNT; ii > 0; ii--) {
|
||||
u32 en_stat = RREG32_SOC15(SMUIO,
|
||||
0,
|
||||
mmCKSVII2C_IC_ENABLE_STATUS);
|
||||
if (REG_GET_FIELD(en_stat, CKSVII2C_IC_ENABLE_STATUS, IC_EN))
|
||||
udelay(T_I2C_POLL_US);
|
||||
else
|
||||
return I2C_OK;
|
||||
}
|
||||
|
||||
return I2C_ABORT;
|
||||
}
|
||||
|
||||
return I2C_OK;
|
||||
}
|
||||
|
||||
static void smu_v11_0_i2c_clear_status(struct i2c_adapter *control)
|
||||
|
@ -83,8 +117,13 @@ static void smu_v11_0_i2c_configure(struct i2c_adapter *control)
|
|||
reg = REG_SET_FIELD(reg, CKSVII2C_IC_CON, IC_RESTART_EN, 1);
|
||||
reg = REG_SET_FIELD(reg, CKSVII2C_IC_CON, IC_10BITADDR_MASTER, 0);
|
||||
reg = REG_SET_FIELD(reg, CKSVII2C_IC_CON, IC_10BITADDR_SLAVE, 0);
|
||||
/* Standard mode */
|
||||
reg = REG_SET_FIELD(reg, CKSVII2C_IC_CON, IC_MAX_SPEED_MODE, 2);
|
||||
/* The values of IC_MAX_SPEED_MODE are,
|
||||
* 1: standard mode, 0 - 100 Kb/s,
|
||||
* 2: fast mode, <= 400 Kb/s, or fast mode plus, <= 1000 Kb/s,
|
||||
* 3: high speed mode, <= 3.4 Mb/s.
|
||||
*/
|
||||
reg = REG_SET_FIELD(reg, CKSVII2C_IC_CON, IC_MAX_SPEED_MODE,
|
||||
I2C_SPEED_MODE_FAST);
|
||||
reg = REG_SET_FIELD(reg, CKSVII2C_IC_CON, IC_MASTER_MODE, 1);
|
||||
|
||||
WREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_CON, reg);
|
||||
|
@ -113,13 +152,15 @@ static void smu_v11_0_i2c_set_clock(struct i2c_adapter *control)
|
|||
WREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_SDA_HOLD, 20);
|
||||
}
|
||||
|
||||
static void smu_v11_0_i2c_set_address(struct i2c_adapter *control, uint8_t address)
|
||||
static void smu_v11_0_i2c_set_address(struct i2c_adapter *control, u16 address)
|
||||
{
|
||||
struct amdgpu_device *adev = to_amdgpu_device(control);
|
||||
|
||||
/* Convert fromr 8-bit to 7-bit address */
|
||||
address >>= 1;
|
||||
WREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_TAR, (address & 0xFF));
|
||||
/* The IC_TAR::IC_TAR field is 10-bits wide.
|
||||
* It takes a 7-bit or 10-bit addresses as an address,
|
||||
* i.e. no read/write bit--no wire format, just the address.
|
||||
*/
|
||||
WREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_TAR, address & 0x3FF);
|
||||
}
|
||||
|
||||
static uint32_t smu_v11_0_i2c_poll_tx_status(struct i2c_adapter *control)
|
||||
|
@ -206,9 +247,6 @@ static uint32_t smu_v11_0_i2c_poll_rx_status(struct i2c_adapter *control)
|
|||
return ret;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* smu_v11_0_i2c_transmit - Send a block of data over the I2C bus to a slave device.
|
||||
*
|
||||
|
@ -221,17 +259,17 @@ static uint32_t smu_v11_0_i2c_poll_rx_status(struct i2c_adapter *control)
|
|||
* Returns 0 on success or error.
|
||||
*/
|
||||
static uint32_t smu_v11_0_i2c_transmit(struct i2c_adapter *control,
|
||||
uint8_t address, uint8_t *data,
|
||||
uint32_t numbytes, uint32_t i2c_flag)
|
||||
u16 address, u8 *data,
|
||||
u32 numbytes, u32 i2c_flag)
|
||||
{
|
||||
struct amdgpu_device *adev = to_amdgpu_device(control);
|
||||
uint32_t bytes_sent, reg, ret = 0;
|
||||
u32 bytes_sent, reg, ret = I2C_OK;
|
||||
unsigned long timeout_counter;
|
||||
|
||||
bytes_sent = 0;
|
||||
|
||||
DRM_DEBUG_DRIVER("I2C_Transmit(), address = %x, bytes = %d , data: ",
|
||||
(uint16_t)address, numbytes);
|
||||
address, numbytes);
|
||||
|
||||
if (drm_debug_enabled(DRM_UT_DRIVER)) {
|
||||
print_hex_dump(KERN_INFO, "data: ", DUMP_PREFIX_NONE,
|
||||
|
@ -246,53 +284,49 @@ static uint32_t smu_v11_0_i2c_transmit(struct i2c_adapter *control,
|
|||
/* Clear status bits */
|
||||
smu_v11_0_i2c_clear_status(control);
|
||||
|
||||
|
||||
timeout_counter = jiffies + msecs_to_jiffies(20);
|
||||
|
||||
while (numbytes > 0) {
|
||||
reg = RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_STATUS);
|
||||
if (REG_GET_FIELD(reg, CKSVII2C_IC_STATUS, TFNF)) {
|
||||
do {
|
||||
reg = 0;
|
||||
/*
|
||||
* Prepare transaction, no need to set RESTART. I2C engine will send
|
||||
* START as soon as it sees data in TXFIFO
|
||||
*/
|
||||
if (bytes_sent == 0)
|
||||
reg = REG_SET_FIELD(reg, CKSVII2C_IC_DATA_CMD, RESTART,
|
||||
(i2c_flag & I2C_RESTART) ? 1 : 0);
|
||||
reg = REG_SET_FIELD(reg, CKSVII2C_IC_DATA_CMD, DAT, data[bytes_sent]);
|
||||
if (!REG_GET_FIELD(reg, CKSVII2C_IC_STATUS, TFNF)) {
|
||||
/*
|
||||
* We waited for too long for the transmission
|
||||
* FIFO to become not-full. Exit the loop
|
||||
* with error.
|
||||
*/
|
||||
if (time_after(jiffies, timeout_counter)) {
|
||||
ret |= I2C_SW_TIMEOUT;
|
||||
goto Err;
|
||||
}
|
||||
} else {
|
||||
reg = REG_SET_FIELD(reg, CKSVII2C_IC_DATA_CMD, DAT,
|
||||
data[bytes_sent]);
|
||||
|
||||
/* determine if we need to send STOP bit or not */
|
||||
if (numbytes == 1)
|
||||
/* Final transaction, so send stop unless I2C_NO_STOP */
|
||||
reg = REG_SET_FIELD(reg, CKSVII2C_IC_DATA_CMD, STOP,
|
||||
(i2c_flag & I2C_NO_STOP) ? 0 : 1);
|
||||
/* Write */
|
||||
reg = REG_SET_FIELD(reg, CKSVII2C_IC_DATA_CMD, CMD, 0);
|
||||
WREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_DATA_CMD, reg);
|
||||
/* Final message, final byte, must generate a
|
||||
* STOP to release the bus, i.e. don't hold
|
||||
* SCL low.
|
||||
*/
|
||||
if (numbytes == 1 && i2c_flag & I2C_M_STOP)
|
||||
reg = REG_SET_FIELD(reg,
|
||||
CKSVII2C_IC_DATA_CMD,
|
||||
STOP, 1);
|
||||
|
||||
/* Record that the bytes were transmitted */
|
||||
bytes_sent++;
|
||||
numbytes--;
|
||||
if (bytes_sent == 0 && i2c_flag & I2C_X_RESTART)
|
||||
reg = REG_SET_FIELD(reg,
|
||||
CKSVII2C_IC_DATA_CMD,
|
||||
RESTART, 1);
|
||||
|
||||
reg = RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_STATUS);
|
||||
/* Write */
|
||||
reg = REG_SET_FIELD(reg, CKSVII2C_IC_DATA_CMD, CMD, 0);
|
||||
WREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_DATA_CMD, reg);
|
||||
|
||||
} while (numbytes && REG_GET_FIELD(reg, CKSVII2C_IC_STATUS, TFNF));
|
||||
}
|
||||
|
||||
/*
|
||||
* We waited too long for the transmission FIFO to become not-full.
|
||||
* Exit the loop with error.
|
||||
*/
|
||||
if (time_after(jiffies, timeout_counter)) {
|
||||
ret |= I2C_SW_TIMEOUT;
|
||||
goto Err;
|
||||
/* Record that the bytes were transmitted */
|
||||
bytes_sent++;
|
||||
numbytes--;
|
||||
}
|
||||
}
|
||||
|
||||
ret = smu_v11_0_i2c_poll_tx_status(control);
|
||||
|
||||
Err:
|
||||
/* Any error, no point in proceeding */
|
||||
if (ret != I2C_OK) {
|
||||
|
@ -323,8 +357,8 @@ Err:
|
|||
* Returns 0 on success or error.
|
||||
*/
|
||||
static uint32_t smu_v11_0_i2c_receive(struct i2c_adapter *control,
|
||||
uint8_t address, uint8_t *data,
|
||||
uint32_t numbytes, uint8_t i2c_flag)
|
||||
u16 address, u8 *data,
|
||||
u32 numbytes, u32 i2c_flag)
|
||||
{
|
||||
struct amdgpu_device *adev = to_amdgpu_device(control);
|
||||
uint32_t bytes_received, ret = I2C_OK;
|
||||
|
@ -342,23 +376,21 @@ static uint32_t smu_v11_0_i2c_receive(struct i2c_adapter *control,
|
|||
|
||||
smu_v11_0_i2c_clear_status(control);
|
||||
|
||||
|
||||
/* Prepare transaction */
|
||||
|
||||
/* Each time we disable I2C, so this is not a restart */
|
||||
if (bytes_received == 0)
|
||||
reg = REG_SET_FIELD(reg, CKSVII2C_IC_DATA_CMD, RESTART,
|
||||
(i2c_flag & I2C_RESTART) ? 1 : 0);
|
||||
|
||||
reg = REG_SET_FIELD(reg, CKSVII2C_IC_DATA_CMD, DAT, 0);
|
||||
/* Read */
|
||||
reg = REG_SET_FIELD(reg, CKSVII2C_IC_DATA_CMD, CMD, 1);
|
||||
|
||||
/* Transmitting last byte */
|
||||
if (numbytes == 1)
|
||||
/* Final transaction, so send stop if requested */
|
||||
reg = REG_SET_FIELD(reg, CKSVII2C_IC_DATA_CMD, STOP,
|
||||
(i2c_flag & I2C_NO_STOP) ? 0 : 1);
|
||||
/* Final message, final byte, must generate a STOP
|
||||
* to release the bus, i.e. don't hold SCL low.
|
||||
*/
|
||||
if (numbytes == 1 && i2c_flag & I2C_M_STOP)
|
||||
reg = REG_SET_FIELD(reg, CKSVII2C_IC_DATA_CMD,
|
||||
STOP, 1);
|
||||
|
||||
if (bytes_received == 0 && i2c_flag & I2C_X_RESTART)
|
||||
reg = REG_SET_FIELD(reg, CKSVII2C_IC_DATA_CMD,
|
||||
RESTART, 1);
|
||||
|
||||
WREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_DATA_CMD, reg);
|
||||
|
||||
|
@ -413,7 +445,6 @@ static void smu_v11_0_i2c_abort(struct i2c_adapter *control)
|
|||
DRM_DEBUG_DRIVER("I2C_Abort() Done.");
|
||||
}
|
||||
|
||||
|
||||
static bool smu_v11_0_i2c_activity_done(struct i2c_adapter *control)
|
||||
{
|
||||
struct amdgpu_device *adev = to_amdgpu_device(control);
|
||||
|
@ -425,7 +456,6 @@ static bool smu_v11_0_i2c_activity_done(struct i2c_adapter *control)
|
|||
reg_ic_enable_status = RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_ENABLE_STATUS);
|
||||
reg_ic_enable = RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_ENABLE);
|
||||
|
||||
|
||||
if ((REG_GET_FIELD(reg_ic_enable, CKSVII2C_IC_ENABLE, ENABLE) == 0) &&
|
||||
(REG_GET_FIELD(reg_ic_enable_status, CKSVII2C_IC_ENABLE_STATUS, IC_EN) == 1)) {
|
||||
/*
|
||||
|
@ -455,6 +485,8 @@ static bool smu_v11_0_i2c_activity_done(struct i2c_adapter *control)
|
|||
|
||||
static void smu_v11_0_i2c_init(struct i2c_adapter *control)
|
||||
{
|
||||
int res;
|
||||
|
||||
/* Disable clock gating */
|
||||
smu_v11_0_i2c_set_clock_gating(control, false);
|
||||
|
||||
|
@ -462,7 +494,9 @@ static void smu_v11_0_i2c_init(struct i2c_adapter *control)
|
|||
DRM_WARN("I2C busy !");
|
||||
|
||||
/* Disable I2C */
|
||||
smu_v11_0_i2c_enable(control, false);
|
||||
res = smu_v11_0_i2c_enable(control, false);
|
||||
if (res != I2C_OK)
|
||||
smu_v11_0_i2c_abort(control);
|
||||
|
||||
/* Configure I2C to operate as master and in standard mode */
|
||||
smu_v11_0_i2c_configure(control);
|
||||
|
@ -475,21 +509,22 @@ static void smu_v11_0_i2c_init(struct i2c_adapter *control)
|
|||
static void smu_v11_0_i2c_fini(struct i2c_adapter *control)
|
||||
{
|
||||
struct amdgpu_device *adev = to_amdgpu_device(control);
|
||||
uint32_t reg_ic_enable_status, reg_ic_enable;
|
||||
u32 status, enable, en_stat;
|
||||
int res;
|
||||
|
||||
smu_v11_0_i2c_enable(control, false);
|
||||
res = smu_v11_0_i2c_enable(control, false);
|
||||
if (res != I2C_OK) {
|
||||
status = RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_STATUS);
|
||||
enable = RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_ENABLE);
|
||||
en_stat = RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_ENABLE_STATUS);
|
||||
|
||||
/* Double check if disabled, else force abort */
|
||||
reg_ic_enable_status = RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_ENABLE_STATUS);
|
||||
reg_ic_enable = RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_ENABLE);
|
||||
|
||||
if ((REG_GET_FIELD(reg_ic_enable, CKSVII2C_IC_ENABLE, ENABLE) == 0) &&
|
||||
(REG_GET_FIELD(reg_ic_enable_status,
|
||||
CKSVII2C_IC_ENABLE_STATUS, IC_EN) == 1)) {
|
||||
/*
|
||||
* Nobody is using I2C engine, but engine remains active because
|
||||
* someone missed to send STOP
|
||||
/* Nobody is using the I2C engine, yet it remains
|
||||
* active, possibly because someone missed to send
|
||||
* STOP.
|
||||
*/
|
||||
DRM_DEBUG_DRIVER("Aborting from fini: status:0x%08x "
|
||||
"enable:0x%08x enable_stat:0x%08x",
|
||||
status, enable, en_stat);
|
||||
smu_v11_0_i2c_abort(control);
|
||||
}
|
||||
|
||||
|
@ -531,22 +566,12 @@ static bool smu_v11_0_i2c_bus_unlock(struct i2c_adapter *control)
|
|||
/***************************** I2C GLUE ****************************/
|
||||
|
||||
static uint32_t smu_v11_0_i2c_read_data(struct i2c_adapter *control,
|
||||
uint8_t address,
|
||||
uint8_t *data,
|
||||
uint32_t numbytes)
|
||||
struct i2c_msg *msg, uint32_t i2c_flag)
|
||||
{
|
||||
uint32_t ret = 0;
|
||||
uint32_t ret;
|
||||
|
||||
/* First 2 bytes are dummy write to set EEPROM address */
|
||||
ret = smu_v11_0_i2c_transmit(control, address, data, 2, I2C_NO_STOP);
|
||||
if (ret != I2C_OK)
|
||||
goto Fail;
|
||||
ret = smu_v11_0_i2c_receive(control, msg->addr, msg->buf, msg->len, i2c_flag);
|
||||
|
||||
/* Now read data starting with that address */
|
||||
ret = smu_v11_0_i2c_receive(control, address, data + 2, numbytes - 2,
|
||||
I2C_RESTART);
|
||||
|
||||
Fail:
|
||||
if (ret != I2C_OK)
|
||||
DRM_ERROR("ReadData() - I2C error occurred :%x", ret);
|
||||
|
||||
|
@ -554,28 +579,15 @@ Fail:
|
|||
}
|
||||
|
||||
static uint32_t smu_v11_0_i2c_write_data(struct i2c_adapter *control,
|
||||
uint8_t address,
|
||||
uint8_t *data,
|
||||
uint32_t numbytes)
|
||||
struct i2c_msg *msg, uint32_t i2c_flag)
|
||||
{
|
||||
uint32_t ret;
|
||||
|
||||
ret = smu_v11_0_i2c_transmit(control, address, data, numbytes, 0);
|
||||
ret = smu_v11_0_i2c_transmit(control, msg->addr, msg->buf, msg->len, i2c_flag);
|
||||
|
||||
if (ret != I2C_OK)
|
||||
DRM_ERROR("WriteI2CData() - I2C error occurred :%x", ret);
|
||||
else
|
||||
/*
|
||||
* According to EEPROM spec there is a MAX of 10 ms required for
|
||||
* EEPROM to flush internal RX buffer after STOP was issued at the
|
||||
* end of write transaction. During this time the EEPROM will not be
|
||||
* responsive to any more commands - so wait a bit more.
|
||||
*
|
||||
* TODO Improve to wait for first ACK for slave address after
|
||||
* internal write cycle done.
|
||||
*/
|
||||
msleep(10);
|
||||
|
||||
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
@ -584,12 +596,11 @@ static void lock_bus(struct i2c_adapter *i2c, unsigned int flags)
|
|||
{
|
||||
struct amdgpu_device *adev = to_amdgpu_device(i2c);
|
||||
|
||||
if (!smu_v11_0_i2c_bus_lock(i2c)) {
|
||||
mutex_lock(&adev->pm.smu_i2c_mutex);
|
||||
if (!smu_v11_0_i2c_bus_lock(i2c))
|
||||
DRM_ERROR("Failed to lock the bus from SMU");
|
||||
return;
|
||||
}
|
||||
|
||||
adev->pm.bus_locked = true;
|
||||
else
|
||||
adev->pm.bus_locked = true;
|
||||
}
|
||||
|
||||
static int trylock_bus(struct i2c_adapter *i2c, unsigned int flags)
|
||||
|
@ -602,12 +613,11 @@ static void unlock_bus(struct i2c_adapter *i2c, unsigned int flags)
|
|||
{
|
||||
struct amdgpu_device *adev = to_amdgpu_device(i2c);
|
||||
|
||||
if (!smu_v11_0_i2c_bus_unlock(i2c)) {
|
||||
if (!smu_v11_0_i2c_bus_unlock(i2c))
|
||||
DRM_ERROR("Failed to unlock the bus from SMU");
|
||||
return;
|
||||
}
|
||||
|
||||
adev->pm.bus_locked = false;
|
||||
else
|
||||
adev->pm.bus_locked = false;
|
||||
mutex_unlock(&adev->pm.smu_i2c_mutex);
|
||||
}
|
||||
|
||||
static const struct i2c_lock_operations smu_v11_0_i2c_i2c_lock_ops = {
|
||||
|
@ -617,27 +627,60 @@ static const struct i2c_lock_operations smu_v11_0_i2c_i2c_lock_ops = {
|
|||
};
|
||||
|
||||
static int smu_v11_0_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
struct i2c_msg *msgs, int num)
|
||||
struct i2c_msg *msg, int num)
|
||||
{
|
||||
int i, ret;
|
||||
struct amdgpu_device *adev = to_amdgpu_device(i2c_adap);
|
||||
|
||||
if (!adev->pm.bus_locked) {
|
||||
DRM_ERROR("I2C bus unlocked, stopping transaction!");
|
||||
return -EIO;
|
||||
}
|
||||
u16 addr, dir;
|
||||
|
||||
smu_v11_0_i2c_init(i2c_adap);
|
||||
|
||||
/* From the client's point of view, this sequence of
|
||||
* messages-- the array i2c_msg *msg, is a single transaction
|
||||
* on the bus, starting with START and ending with STOP.
|
||||
*
|
||||
* The client is welcome to send any sequence of messages in
|
||||
* this array, as processing under this function here is
|
||||
* striving to be agnostic.
|
||||
*
|
||||
* Record the first address and direction we see. If either
|
||||
* changes for a subsequent message, generate ReSTART. The
|
||||
* DW_apb_i2c databook, v1.21a, specifies that ReSTART is
|
||||
* generated when the direction changes, with the default IP
|
||||
* block parameter settings, but it doesn't specify if ReSTART
|
||||
* is generated when the address changes (possibly...). We
|
||||
* don't rely on the default IP block parameter settings as
|
||||
* the block is shared and they may change.
|
||||
*/
|
||||
if (num > 0) {
|
||||
addr = msg[0].addr;
|
||||
dir = msg[0].flags & I2C_M_RD;
|
||||
}
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
if (msgs[i].flags & I2C_M_RD)
|
||||
u32 i2c_flag = 0;
|
||||
|
||||
if (msg[i].addr != addr || (msg[i].flags ^ dir) & I2C_M_RD) {
|
||||
addr = msg[i].addr;
|
||||
dir = msg[i].flags & I2C_M_RD;
|
||||
i2c_flag |= I2C_X_RESTART;
|
||||
}
|
||||
|
||||
if (i == num - 1) {
|
||||
/* Set the STOP bit on the last message, so
|
||||
* that the IP block generates a STOP after
|
||||
* the last byte of the message.
|
||||
*/
|
||||
i2c_flag |= I2C_M_STOP;
|
||||
}
|
||||
|
||||
if (msg[i].flags & I2C_M_RD)
|
||||
ret = smu_v11_0_i2c_read_data(i2c_adap,
|
||||
(uint8_t)msgs[i].addr,
|
||||
msgs[i].buf, msgs[i].len);
|
||||
msg + i,
|
||||
i2c_flag);
|
||||
else
|
||||
ret = smu_v11_0_i2c_write_data(i2c_adap,
|
||||
(uint8_t)msgs[i].addr,
|
||||
msgs[i].buf, msgs[i].len);
|
||||
msg + i,
|
||||
i2c_flag);
|
||||
|
||||
if (ret != I2C_OK) {
|
||||
num = -EIO;
|
||||
|
@ -654,23 +697,28 @@ static u32 smu_v11_0_i2c_func(struct i2c_adapter *adap)
|
|||
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
|
||||
}
|
||||
|
||||
|
||||
static const struct i2c_algorithm smu_v11_0_i2c_algo = {
|
||||
.master_xfer = smu_v11_0_i2c_xfer,
|
||||
.functionality = smu_v11_0_i2c_func,
|
||||
};
|
||||
|
||||
static const struct i2c_adapter_quirks smu_v11_0_i2c_control_quirks = {
|
||||
.flags = I2C_AQ_NO_ZERO_LEN,
|
||||
};
|
||||
|
||||
int smu_v11_0_i2c_control_init(struct i2c_adapter *control)
|
||||
{
|
||||
struct amdgpu_device *adev = to_amdgpu_device(control);
|
||||
int res;
|
||||
|
||||
mutex_init(&adev->pm.smu_i2c_mutex);
|
||||
control->owner = THIS_MODULE;
|
||||
control->class = I2C_CLASS_SPD;
|
||||
control->class = I2C_CLASS_HWMON;
|
||||
control->dev.parent = &adev->pdev->dev;
|
||||
control->algo = &smu_v11_0_i2c_algo;
|
||||
snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
|
||||
control->lock_ops = &smu_v11_0_i2c_i2c_lock_ops;
|
||||
control->quirks = &smu_v11_0_i2c_control_quirks;
|
||||
|
||||
res = i2c_add_adapter(control);
|
||||
if (res)
|
||||
|
|
|
@ -575,7 +575,7 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
|
|||
baco_reset = amdgpu_dpm_is_baco_supported(adev);
|
||||
break;
|
||||
case CHIP_VEGA20:
|
||||
if (adev->psp.sos_fw_version >= 0x80067)
|
||||
if (adev->psp.sos.fw_version >= 0x80067)
|
||||
baco_reset = amdgpu_dpm_is_baco_supported(adev);
|
||||
|
||||
/*
|
||||
|
@ -635,7 +635,7 @@ static bool soc15_supports_baco(struct amdgpu_device *adev)
|
|||
case CHIP_ARCTURUS:
|
||||
return amdgpu_dpm_is_baco_supported(adev);
|
||||
case CHIP_VEGA20:
|
||||
if (adev->psp.sos_fw_version >= 0x80067)
|
||||
if (adev->psp.sos.fw_version >= 0x80067)
|
||||
return amdgpu_dpm_is_baco_supported(adev);
|
||||
return false;
|
||||
default:
|
||||
|
|
|
@ -28,13 +28,13 @@
|
|||
#define SOC15_REG_OFFSET(ip, inst, reg) (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
|
||||
|
||||
#define __WREG32_SOC15_RLC__(reg, value, flag, hwip) \
|
||||
((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.funcs->rlcg_wreg) ? \
|
||||
adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, value, flag, hwip) : \
|
||||
((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.funcs->sriov_wreg) ? \
|
||||
adev->gfx.rlc.funcs->sriov_wreg(adev, reg, value, flag, hwip) : \
|
||||
WREG32(reg, value))
|
||||
|
||||
#define __RREG32_SOC15_RLC__(reg, flag, hwip) \
|
||||
((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.funcs->rlcg_rreg) ? \
|
||||
adev->gfx.rlc.funcs->rlcg_rreg(adev, reg, flag, hwip) : \
|
||||
((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.funcs->sriov_rreg) ? \
|
||||
adev->gfx.rlc.funcs->sriov_rreg(adev, reg, flag, hwip) : \
|
||||
RREG32(reg))
|
||||
|
||||
#define WREG32_FIELD15(ip, idx, reg, field, val) \
|
||||
|
|
|
@ -33,7 +33,8 @@ enum ta_command_xgmi {
|
|||
TA_COMMAND_XGMI__GET_NODE_ID = 0x01,
|
||||
TA_COMMAND_XGMI__GET_HIVE_ID = 0x02,
|
||||
TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO = 0x03,
|
||||
TA_COMMAND_XGMI__SET_TOPOLOGY_INFO = 0x04
|
||||
TA_COMMAND_XGMI__SET_TOPOLOGY_INFO = 0x04,
|
||||
TA_COMMAND_XGMI__GET_PEER_LINKS = 0x0B
|
||||
};
|
||||
|
||||
/* XGMI related enumerations */
|
||||
|
@ -75,6 +76,11 @@ struct ta_xgmi_node_info {
|
|||
enum ta_xgmi_assigned_sdma_engine sdma_engine;
|
||||
};
|
||||
|
||||
struct ta_xgmi_peer_link_info {
|
||||
uint64_t node_id;
|
||||
uint8_t num_links;
|
||||
};
|
||||
|
||||
struct ta_xgmi_cmd_initialize_output {
|
||||
uint32_t status;
|
||||
};
|
||||
|
@ -97,6 +103,11 @@ struct ta_xgmi_cmd_get_topology_info_output {
|
|||
struct ta_xgmi_node_info nodes[TA_XGMI__MAX_CONNECTED_NODES];
|
||||
};
|
||||
|
||||
struct ta_xgmi_cmd_get_peer_link_info_output {
|
||||
uint32_t num_nodes;
|
||||
struct ta_xgmi_peer_link_info nodes[TA_XGMI__MAX_CONNECTED_NODES];
|
||||
};
|
||||
|
||||
struct ta_xgmi_cmd_set_topology_info_input {
|
||||
uint32_t num_nodes;
|
||||
struct ta_xgmi_node_info nodes[TA_XGMI__MAX_CONNECTED_NODES];
|
||||
|
@ -115,6 +126,7 @@ union ta_xgmi_cmd_output {
|
|||
struct ta_xgmi_cmd_get_node_id_output get_node_id;
|
||||
struct ta_xgmi_cmd_get_hive_id_output get_hive_id;
|
||||
struct ta_xgmi_cmd_get_topology_info_output get_topology_info;
|
||||
struct ta_xgmi_cmd_get_peer_link_info_output get_link_info;
|
||||
};
|
||||
/**********************************************************/
|
||||
|
||||
|
|
|
@ -28,6 +28,21 @@
|
|||
#include "umc/umc_6_7_0_offset.h"
|
||||
#include "umc/umc_6_7_0_sh_mask.h"
|
||||
|
||||
const uint32_t
|
||||
umc_v6_7_channel_idx_tbl_second[UMC_V6_7_UMC_INSTANCE_NUM][UMC_V6_7_CHANNEL_INSTANCE_NUM] = {
|
||||
{28, 12, 6, 22}, {19, 3, 9, 25},
|
||||
{20, 4, 30, 14}, {11, 27, 1, 17},
|
||||
{24, 8, 2, 18}, {15, 31, 5, 21},
|
||||
{16, 0, 26, 10}, {7, 23, 29, 13}
|
||||
};
|
||||
const uint32_t
|
||||
umc_v6_7_channel_idx_tbl_first[UMC_V6_7_UMC_INSTANCE_NUM][UMC_V6_7_CHANNEL_INSTANCE_NUM] = {
|
||||
{19, 3, 9, 25}, {28, 12, 6, 22},
|
||||
{11, 27, 1, 17}, {20, 4, 30, 14},
|
||||
{15, 31, 5, 21}, {24, 8, 2, 18},
|
||||
{7, 23, 29, 13}, {16, 0, 26, 10}
|
||||
};
|
||||
|
||||
static inline uint32_t get_umc_v6_7_reg_offset(struct amdgpu_device *adev,
|
||||
uint32_t umc_inst,
|
||||
uint32_t ch_inst)
|
||||
|
|
|
@ -23,6 +23,9 @@
|
|||
#ifndef __UMC_V6_7_H__
|
||||
#define __UMC_V6_7_H__
|
||||
|
||||
#include "soc15_common.h"
|
||||
#include "amdgpu.h"
|
||||
|
||||
/* EccErrCnt max value */
|
||||
#define UMC_V6_7_CE_CNT_MAX 0xffff
|
||||
/* umc ce interrupt threshold */
|
||||
|
@ -32,6 +35,18 @@
|
|||
|
||||
#define UMC_V6_7_INST_DIST 0x40000
|
||||
|
||||
/* number of umc channel instance with memory map register access */
|
||||
#define UMC_V6_7_CHANNEL_INSTANCE_NUM 4
|
||||
/* number of umc instance with memory map register access */
|
||||
#define UMC_V6_7_UMC_INSTANCE_NUM 8
|
||||
/* total channel instances in one umc block */
|
||||
#define UMC_V6_7_TOTAL_CHANNEL_NUM (UMC_V6_7_CHANNEL_INSTANCE_NUM * UMC_V6_7_UMC_INSTANCE_NUM)
|
||||
/* UMC regiser per channel offset */
|
||||
#define UMC_V6_7_PER_CHANNEL_OFFSET 0x400
|
||||
extern const struct amdgpu_umc_ras_funcs umc_v6_7_ras_funcs;
|
||||
extern const uint32_t
|
||||
umc_v6_7_channel_idx_tbl_second[UMC_V6_7_UMC_INSTANCE_NUM][UMC_V6_7_CHANNEL_INSTANCE_NUM];
|
||||
extern const uint32_t
|
||||
umc_v6_7_channel_idx_tbl_first[UMC_V6_7_UMC_INSTANCE_NUM][UMC_V6_7_CHANNEL_INSTANCE_NUM];
|
||||
|
||||
#endif
|
||||
|
|
|
@ -119,7 +119,7 @@ static int vcn_v1_0_sw_init(void *handle)
|
|||
adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
|
||||
DRM_INFO("PSP loading VCN firmware\n");
|
||||
dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
|
||||
}
|
||||
|
||||
r = amdgpu_vcn_resume(adev);
|
||||
|
|
|
@ -122,7 +122,7 @@ static int vcn_v2_0_sw_init(void *handle)
|
|||
adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
|
||||
DRM_INFO("PSP loading VCN firmware\n");
|
||||
dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
|
||||
}
|
||||
|
||||
r = amdgpu_vcn_resume(adev);
|
||||
|
|
|
@ -152,7 +152,7 @@ static int vcn_v2_5_sw_init(void *handle)
|
|||
adev->firmware.fw_size +=
|
||||
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
|
||||
}
|
||||
DRM_INFO("PSP loading VCN firmware\n");
|
||||
dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
|
||||
}
|
||||
|
||||
r = amdgpu_vcn_resume(adev);
|
||||
|
|
|
@ -160,7 +160,7 @@ static int vcn_v3_0_sw_init(void *handle)
|
|||
adev->firmware.fw_size +=
|
||||
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
|
||||
}
|
||||
DRM_INFO("PSP loading VCN firmware\n");
|
||||
dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
|
||||
}
|
||||
|
||||
r = amdgpu_vcn_resume(adev);
|
||||
|
|
|
@ -1404,6 +1404,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
|
|||
break;
|
||||
case CHIP_NAVI10:
|
||||
case CHIP_NAVI12:
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
pcache_info = navi10_cache_info;
|
||||
num_of_cache_types = ARRAY_SIZE(navi10_cache_info);
|
||||
break;
|
||||
|
@ -1989,8 +1990,19 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
|
|||
sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
|
||||
sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
|
||||
sub_type_hdr->num_hops_xgmi = 1;
|
||||
if (adev->asic_type == CHIP_ALDEBARAN) {
|
||||
sub_type_hdr->minimum_bandwidth_mbs =
|
||||
amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(
|
||||
kdev->kgd, NULL, true);
|
||||
sub_type_hdr->maximum_bandwidth_mbs =
|
||||
sub_type_hdr->minimum_bandwidth_mbs;
|
||||
}
|
||||
} else {
|
||||
sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_PCIEXPRESS;
|
||||
sub_type_hdr->minimum_bandwidth_mbs =
|
||||
amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->kgd, true);
|
||||
sub_type_hdr->maximum_bandwidth_mbs =
|
||||
amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->kgd, false);
|
||||
}
|
||||
|
||||
sub_type_hdr->proximity_domain_from = proximity_domain;
|
||||
|
@ -2033,6 +2045,11 @@ static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size,
|
|||
sub_type_hdr->proximity_domain_to = proximity_domain_to;
|
||||
sub_type_hdr->num_hops_xgmi =
|
||||
amdgpu_amdkfd_get_xgmi_hops_count(kdev->kgd, peer_kdev->kgd);
|
||||
sub_type_hdr->maximum_bandwidth_mbs =
|
||||
amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->kgd, peer_kdev->kgd, false);
|
||||
sub_type_hdr->minimum_bandwidth_mbs = sub_type_hdr->maximum_bandwidth_mbs ?
|
||||
amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->kgd, NULL, true) : 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -84,6 +84,7 @@ static const struct kfd2kgd_calls *kfd2kgd_funcs[] = {
|
|||
[CHIP_DIMGREY_CAVEFISH] = &gfx_v10_3_kfd2kgd,
|
||||
[CHIP_BEIGE_GOBY] = &gfx_v10_3_kfd2kgd,
|
||||
[CHIP_YELLOW_CARP] = &gfx_v10_3_kfd2kgd,
|
||||
[CHIP_CYAN_SKILLFISH] = &gfx_v10_kfd2kgd,
|
||||
};
|
||||
|
||||
#ifdef KFD_SUPPORT_IOMMU_V2
|
||||
|
@ -596,6 +597,24 @@ static const struct kfd_device_info yellow_carp_device_info = {
|
|||
.num_sdma_queues_per_engine = 2,
|
||||
};
|
||||
|
||||
static const struct kfd_device_info cyan_skillfish_device_info = {
|
||||
.asic_family = CHIP_CYAN_SKILLFISH,
|
||||
.asic_name = "cyan_skillfish",
|
||||
.max_pasid_bits = 16,
|
||||
.max_no_of_hqd = 24,
|
||||
.doorbell_size = 8,
|
||||
.ih_ring_entry_size = 8 * sizeof(uint32_t),
|
||||
.event_interrupt_class = &event_interrupt_class_v9,
|
||||
.num_of_watch_points = 4,
|
||||
.mqd_size_aligned = MQD_SIZE_ALIGNED,
|
||||
.needs_iommu_device = false,
|
||||
.supports_cwsr = true,
|
||||
.needs_pci_atomics = true,
|
||||
.num_sdma_engines = 2,
|
||||
.num_xgmi_sdma_engines = 0,
|
||||
.num_sdma_queues_per_engine = 8,
|
||||
};
|
||||
|
||||
/* For each entry, [0] is regular and [1] is virtualisation device. */
|
||||
static const struct kfd_device_info *kfd_supported_devices[][2] = {
|
||||
#ifdef KFD_SUPPORT_IOMMU_V2
|
||||
|
@ -625,6 +644,7 @@ static const struct kfd_device_info *kfd_supported_devices[][2] = {
|
|||
[CHIP_DIMGREY_CAVEFISH] = {&dimgrey_cavefish_device_info, &dimgrey_cavefish_device_info},
|
||||
[CHIP_BEIGE_GOBY] = {&beige_goby_device_info, &beige_goby_device_info},
|
||||
[CHIP_YELLOW_CARP] = {&yellow_carp_device_info, NULL},
|
||||
[CHIP_CYAN_SKILLFISH] = {&cyan_skillfish_device_info, NULL},
|
||||
};
|
||||
|
||||
static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
|
||||
|
@ -1369,7 +1389,7 @@ void kfd_dec_compute_active(struct kfd_dev *kfd)
|
|||
WARN_ONCE(count < 0, "Compute profile ref. count error");
|
||||
}
|
||||
|
||||
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask)
|
||||
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
|
||||
{
|
||||
if (kfd && kfd->init_complete)
|
||||
kfd_smi_event_update_thermal_throttling(kfd, throttle_bitmask);
|
||||
|
@ -1382,18 +1402,12 @@ void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask)
|
|||
*/
|
||||
int kfd_debugfs_hang_hws(struct kfd_dev *dev)
|
||||
{
|
||||
int r = 0;
|
||||
|
||||
if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) {
|
||||
pr_err("HWS is not enabled");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
r = pm_debugfs_hang_hws(&dev->dqm->packets);
|
||||
if (!r)
|
||||
r = dqm_debugfs_execute_queues(dev->dqm);
|
||||
|
||||
return r;
|
||||
return dqm_debugfs_hang_hws(dev->dqm);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -260,7 +260,7 @@ static int allocate_vmid(struct device_queue_manager *dqm,
|
|||
static int flush_texture_cache_nocpsch(struct kfd_dev *kdev,
|
||||
struct qcm_process_device *qpd)
|
||||
{
|
||||
const struct packet_manager_funcs *pmf = qpd->dqm->packets.pmf;
|
||||
const struct packet_manager_funcs *pmf = qpd->dqm->packet_mgr.pmf;
|
||||
int ret;
|
||||
|
||||
if (!qpd->ib_kaddr)
|
||||
|
@ -1000,7 +1000,7 @@ static int start_nocpsch(struct device_queue_manager *dqm)
|
|||
init_interrupts(dqm);
|
||||
|
||||
if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
|
||||
return pm_init(&dqm->packets, dqm);
|
||||
return pm_init(&dqm->packet_mgr, dqm);
|
||||
dqm->sched_running = true;
|
||||
|
||||
return 0;
|
||||
|
@ -1009,7 +1009,7 @@ static int start_nocpsch(struct device_queue_manager *dqm)
|
|||
static int stop_nocpsch(struct device_queue_manager *dqm)
|
||||
{
|
||||
if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
|
||||
pm_uninit(&dqm->packets, false);
|
||||
pm_uninit(&dqm->packet_mgr, false);
|
||||
dqm->sched_running = false;
|
||||
|
||||
return 0;
|
||||
|
@ -1124,7 +1124,7 @@ static int set_sched_resources(struct device_queue_manager *dqm)
|
|||
"queue mask: 0x%8llX\n",
|
||||
res.vmid_mask, res.queue_mask);
|
||||
|
||||
return pm_send_set_resources(&dqm->packets, &res);
|
||||
return pm_send_set_resources(&dqm->packet_mgr, &res);
|
||||
}
|
||||
|
||||
static int initialize_cpsch(struct device_queue_manager *dqm)
|
||||
|
@ -1164,7 +1164,8 @@ static int start_cpsch(struct device_queue_manager *dqm)
|
|||
|
||||
retval = 0;
|
||||
|
||||
retval = pm_init(&dqm->packets, dqm);
|
||||
dqm_lock(dqm);
|
||||
retval = pm_init(&dqm->packet_mgr, dqm);
|
||||
if (retval)
|
||||
goto fail_packet_manager_init;
|
||||
|
||||
|
@ -1186,7 +1187,6 @@ static int start_cpsch(struct device_queue_manager *dqm)
|
|||
|
||||
init_interrupts(dqm);
|
||||
|
||||
dqm_lock(dqm);
|
||||
/* clear hang status when driver try to start the hw scheduler */
|
||||
dqm->is_hws_hang = false;
|
||||
dqm->is_resetting = false;
|
||||
|
@ -1197,8 +1197,9 @@ static int start_cpsch(struct device_queue_manager *dqm)
|
|||
return 0;
|
||||
fail_allocate_vidmem:
|
||||
fail_set_sched_resources:
|
||||
pm_uninit(&dqm->packets, false);
|
||||
pm_uninit(&dqm->packet_mgr, false);
|
||||
fail_packet_manager_init:
|
||||
dqm_unlock(dqm);
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
@ -1211,12 +1212,12 @@ static int stop_cpsch(struct device_queue_manager *dqm)
|
|||
unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
|
||||
hanging = dqm->is_hws_hang || dqm->is_resetting;
|
||||
dqm->sched_running = false;
|
||||
dqm_unlock(dqm);
|
||||
|
||||
pm_release_ib(&dqm->packets);
|
||||
pm_release_ib(&dqm->packet_mgr);
|
||||
|
||||
kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
|
||||
pm_uninit(&dqm->packets, hanging);
|
||||
pm_uninit(&dqm->packet_mgr, hanging);
|
||||
dqm_unlock(dqm);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1390,7 +1391,7 @@ static int map_queues_cpsch(struct device_queue_manager *dqm)
|
|||
if (dqm->active_runlist)
|
||||
return 0;
|
||||
|
||||
retval = pm_send_runlist(&dqm->packets, &dqm->queues);
|
||||
retval = pm_send_runlist(&dqm->packet_mgr, &dqm->queues);
|
||||
pr_debug("%s sent runlist\n", __func__);
|
||||
if (retval) {
|
||||
pr_err("failed to execute runlist\n");
|
||||
|
@ -1416,13 +1417,13 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
|
|||
if (!dqm->active_runlist)
|
||||
return retval;
|
||||
|
||||
retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
|
||||
retval = pm_send_unmap_queue(&dqm->packet_mgr, KFD_QUEUE_TYPE_COMPUTE,
|
||||
filter, filter_param, false, 0);
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
*dqm->fence_addr = KFD_FENCE_INIT;
|
||||
pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
|
||||
pm_send_query_status(&dqm->packet_mgr, dqm->fence_gpu_addr,
|
||||
KFD_FENCE_COMPLETED);
|
||||
/* should be timed out */
|
||||
retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
|
||||
|
@ -1448,14 +1449,14 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
|
|||
* check those fields
|
||||
*/
|
||||
mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
|
||||
if (mqd_mgr->read_doorbell_id(dqm->packets.priv_queue->queue->mqd)) {
|
||||
if (mqd_mgr->read_doorbell_id(dqm->packet_mgr.priv_queue->queue->mqd)) {
|
||||
pr_err("HIQ MQD's queue_doorbell_id0 is not 0, Queue preemption time out\n");
|
||||
while (halt_if_hws_hang)
|
||||
schedule();
|
||||
return -ETIME;
|
||||
}
|
||||
|
||||
pm_release_ib(&dqm->packets);
|
||||
pm_release_ib(&dqm->packet_mgr);
|
||||
dqm->active_runlist = false;
|
||||
|
||||
return retval;
|
||||
|
@ -1946,6 +1947,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
|
|||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
device_queue_manager_init_v10_navi10(&dqm->asic_ops);
|
||||
break;
|
||||
default:
|
||||
|
@ -2099,11 +2101,16 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
|
|||
return r;
|
||||
}
|
||||
|
||||
int dqm_debugfs_execute_queues(struct device_queue_manager *dqm)
|
||||
int dqm_debugfs_hang_hws(struct device_queue_manager *dqm)
|
||||
{
|
||||
int r = 0;
|
||||
|
||||
dqm_lock(dqm);
|
||||
r = pm_debugfs_hang_hws(&dqm->packet_mgr);
|
||||
if (r) {
|
||||
dqm_unlock(dqm);
|
||||
return r;
|
||||
}
|
||||
dqm->active_runlist = true;
|
||||
r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
|
||||
dqm_unlock(dqm);
|
||||
|
|
|
@ -169,7 +169,7 @@ struct device_queue_manager {
|
|||
struct device_queue_manager_asic_ops asic_ops;
|
||||
|
||||
struct mqd_manager *mqd_mgrs[KFD_MQD_TYPE_MAX];
|
||||
struct packet_manager packets;
|
||||
struct packet_manager packet_mgr;
|
||||
struct kfd_dev *dev;
|
||||
struct mutex lock_hidden; /* use dqm_lock/unlock(dqm) */
|
||||
struct list_head queues;
|
||||
|
|
|
@ -422,6 +422,7 @@ int kfd_init_apertures(struct kfd_process *process)
|
|||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
kfd_init_apertures_v9(pdd, id);
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -251,6 +251,7 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
|
|||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
pm->pmf = &kfd_v9_pm_funcs;
|
||||
break;
|
||||
case CHIP_ALDEBARAN:
|
||||
|
@ -278,6 +279,7 @@ void pm_uninit(struct packet_manager *pm, bool hanging)
|
|||
{
|
||||
mutex_destroy(&pm->lock);
|
||||
kernel_queue_uninit(pm->priv_queue, hanging);
|
||||
pm->priv_queue = NULL;
|
||||
}
|
||||
|
||||
int pm_send_set_resources(struct packet_manager *pm,
|
||||
|
@ -447,6 +449,9 @@ int pm_debugfs_hang_hws(struct packet_manager *pm)
|
|||
uint32_t *buffer, size;
|
||||
int r = 0;
|
||||
|
||||
if (!pm->priv_queue)
|
||||
return -EAGAIN;
|
||||
|
||||
size = pm->pmf->query_status_size;
|
||||
mutex_lock(&pm->lock);
|
||||
kq_acquire_packet_buffer(pm->priv_queue,
|
||||
|
|
|
@ -1194,7 +1194,7 @@ int pm_debugfs_runlist(struct seq_file *m, void *data);
|
|||
|
||||
int kfd_debugfs_hang_hws(struct kfd_dev *dev);
|
||||
int pm_debugfs_hang_hws(struct packet_manager *pm);
|
||||
int dqm_debugfs_execute_queues(struct device_queue_manager *dqm);
|
||||
int dqm_debugfs_hang_hws(struct device_queue_manager *dqm);
|
||||
|
||||
#else
|
||||
|
||||
|
|
|
@ -205,23 +205,23 @@ void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset)
|
|||
}
|
||||
|
||||
void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev,
|
||||
uint32_t throttle_bitmask)
|
||||
uint64_t throttle_bitmask)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)dev->kgd;
|
||||
/*
|
||||
* ThermalThrottle msg = throttle_bitmask(8):
|
||||
* thermal_interrupt_count(16):
|
||||
* 1 byte event + 1 byte space + 8 byte throttle_bitmask +
|
||||
* 1 byte event + 1 byte space + 16 byte throttle_bitmask +
|
||||
* 1 byte : + 16 byte thermal_interupt_counter + 1 byte \n +
|
||||
* 1 byte \0 = 29
|
||||
* 1 byte \0 = 37
|
||||
*/
|
||||
char fifo_in[29];
|
||||
char fifo_in[37];
|
||||
int len;
|
||||
|
||||
if (list_empty(&dev->smi_clients))
|
||||
return;
|
||||
|
||||
len = snprintf(fifo_in, sizeof(fifo_in), "%x %x:%llx\n",
|
||||
len = snprintf(fifo_in, sizeof(fifo_in), "%x %llx:%llx\n",
|
||||
KFD_SMI_EVENT_THERMAL_THROTTLE, throttle_bitmask,
|
||||
atomic64_read(&adev->smu.throttle_int_counter));
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd);
|
||||
void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid);
|
||||
void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev,
|
||||
uint32_t throttle_bitmask);
|
||||
uint64_t throttle_bitmask);
|
||||
void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1424,6 +1424,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
|
|||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
case CHIP_CYAN_SKILLFISH:
|
||||
dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 <<
|
||||
HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
|
||||
HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
|
||||
|
@ -1630,7 +1631,7 @@ int kfd_debugfs_rls_by_device(struct seq_file *m, void *data)
|
|||
}
|
||||
|
||||
seq_printf(m, "Node %u, gpu_id %x:\n", i++, dev->gpu->id);
|
||||
r = pm_debugfs_runlist(m, &dev->gpu->dqm->packets);
|
||||
r = pm_debugfs_runlist(m, &dev->gpu->dqm->packet_mgr);
|
||||
if (r)
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -618,6 +618,7 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
|
|||
}
|
||||
#endif
|
||||
|
||||
#define DMUB_TRACE_MAX_READ 64
|
||||
/**
|
||||
* dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
|
||||
* @interrupt_params: used for determining the Outbox instance
|
||||
|
@ -625,7 +626,6 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
|
|||
* Handles the Outbox Interrupt
|
||||
* event handler.
|
||||
*/
|
||||
#define DMUB_TRACE_MAX_READ 64
|
||||
static void dm_dmub_outbox1_low_irq(void *interrupt_params)
|
||||
{
|
||||
struct dmub_notification notify;
|
||||
|
@ -2412,6 +2412,7 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
|
|||
static const u8 pre_computed_values[] = {
|
||||
50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
|
||||
71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
|
||||
int i;
|
||||
|
||||
if (!aconnector || !aconnector->dc_link)
|
||||
return;
|
||||
|
@ -2423,15 +2424,21 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
|
|||
conn_base = &aconnector->base;
|
||||
adev = drm_to_adev(conn_base->dev);
|
||||
dm = &adev->dm;
|
||||
caps = &dm->backlight_caps;
|
||||
for (i = 0; i < dm->num_of_edps; i++) {
|
||||
if (link == dm->backlight_link[i])
|
||||
break;
|
||||
}
|
||||
if (i >= dm->num_of_edps)
|
||||
return;
|
||||
caps = &dm->backlight_caps[i];
|
||||
caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
|
||||
caps->aux_support = false;
|
||||
max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
|
||||
min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
|
||||
|
||||
if (caps->ext_caps->bits.oled == 1 ||
|
||||
if (caps->ext_caps->bits.oled == 1 /*||
|
||||
caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
|
||||
caps->ext_caps->bits.hdr_aux_backlight_control == 1)
|
||||
caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
|
||||
caps->aux_support = true;
|
||||
|
||||
if (amdgpu_backlight == 0)
|
||||
|
@ -3423,35 +3430,36 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
|
|||
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
|
||||
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
|
||||
|
||||
static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
|
||||
static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
|
||||
int bl_idx)
|
||||
{
|
||||
#if defined(CONFIG_ACPI)
|
||||
struct amdgpu_dm_backlight_caps caps;
|
||||
|
||||
memset(&caps, 0, sizeof(caps));
|
||||
|
||||
if (dm->backlight_caps.caps_valid)
|
||||
if (dm->backlight_caps[bl_idx].caps_valid)
|
||||
return;
|
||||
|
||||
amdgpu_acpi_get_backlight_caps(&caps);
|
||||
if (caps.caps_valid) {
|
||||
dm->backlight_caps.caps_valid = true;
|
||||
dm->backlight_caps[bl_idx].caps_valid = true;
|
||||
if (caps.aux_support)
|
||||
return;
|
||||
dm->backlight_caps.min_input_signal = caps.min_input_signal;
|
||||
dm->backlight_caps.max_input_signal = caps.max_input_signal;
|
||||
dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
|
||||
dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
|
||||
} else {
|
||||
dm->backlight_caps.min_input_signal =
|
||||
dm->backlight_caps[bl_idx].min_input_signal =
|
||||
AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
|
||||
dm->backlight_caps.max_input_signal =
|
||||
dm->backlight_caps[bl_idx].max_input_signal =
|
||||
AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
|
||||
}
|
||||
#else
|
||||
if (dm->backlight_caps.aux_support)
|
||||
if (dm->backlight_caps[bl_idx].aux_support)
|
||||
return;
|
||||
|
||||
dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
|
||||
dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
|
||||
dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
|
||||
dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -3502,41 +3510,31 @@ static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *cap
|
|||
}
|
||||
|
||||
static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
|
||||
int bl_idx,
|
||||
u32 user_brightness)
|
||||
{
|
||||
struct amdgpu_dm_backlight_caps caps;
|
||||
struct dc_link *link[AMDGPU_DM_MAX_NUM_EDP];
|
||||
u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
|
||||
struct dc_link *link;
|
||||
u32 brightness;
|
||||
bool rc;
|
||||
int i;
|
||||
|
||||
amdgpu_dm_update_backlight_caps(dm);
|
||||
caps = dm->backlight_caps;
|
||||
amdgpu_dm_update_backlight_caps(dm, bl_idx);
|
||||
caps = dm->backlight_caps[bl_idx];
|
||||
|
||||
for (i = 0; i < dm->num_of_edps; i++) {
|
||||
dm->brightness[i] = user_brightness;
|
||||
brightness[i] = convert_brightness_from_user(&caps, dm->brightness[i]);
|
||||
link[i] = (struct dc_link *)dm->backlight_link[i];
|
||||
}
|
||||
dm->brightness[bl_idx] = user_brightness;
|
||||
brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
|
||||
link = (struct dc_link *)dm->backlight_link[bl_idx];
|
||||
|
||||
/* Change brightness based on AUX property */
|
||||
if (caps.aux_support) {
|
||||
for (i = 0; i < dm->num_of_edps; i++) {
|
||||
rc = dc_link_set_backlight_level_nits(link[i], true, brightness[i],
|
||||
AUX_BL_DEFAULT_TRANSITION_TIME_MS);
|
||||
if (!rc) {
|
||||
DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
rc = dc_link_set_backlight_level_nits(link, true, brightness,
|
||||
AUX_BL_DEFAULT_TRANSITION_TIME_MS);
|
||||
if (!rc)
|
||||
DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
|
||||
} else {
|
||||
for (i = 0; i < dm->num_of_edps; i++) {
|
||||
rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness[i], 0);
|
||||
if (!rc) {
|
||||
DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
rc = dc_link_set_backlight_level(link, brightness, 0);
|
||||
if (!rc)
|
||||
DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
|
||||
}
|
||||
|
||||
return rc ? 0 : 1;
|
||||
|
@ -3545,33 +3543,41 @@ static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
|
|||
static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
|
||||
{
|
||||
struct amdgpu_display_manager *dm = bl_get_data(bd);
|
||||
int i;
|
||||
|
||||
amdgpu_dm_backlight_set_level(dm, bd->props.brightness);
|
||||
for (i = 0; i < dm->num_of_edps; i++) {
|
||||
if (bd == dm->backlight_dev[i])
|
||||
break;
|
||||
}
|
||||
if (i >= AMDGPU_DM_MAX_NUM_EDP)
|
||||
i = 0;
|
||||
amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm)
|
||||
static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
|
||||
int bl_idx)
|
||||
{
|
||||
struct amdgpu_dm_backlight_caps caps;
|
||||
struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
|
||||
|
||||
amdgpu_dm_update_backlight_caps(dm);
|
||||
caps = dm->backlight_caps;
|
||||
amdgpu_dm_update_backlight_caps(dm, bl_idx);
|
||||
caps = dm->backlight_caps[bl_idx];
|
||||
|
||||
if (caps.aux_support) {
|
||||
struct dc_link *link = (struct dc_link *)dm->backlight_link[0];
|
||||
u32 avg, peak;
|
||||
bool rc;
|
||||
|
||||
rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
|
||||
if (!rc)
|
||||
return dm->brightness[0];
|
||||
return dm->brightness[bl_idx];
|
||||
return convert_brightness_to_user(&caps, avg);
|
||||
} else {
|
||||
int ret = dc_link_get_backlight_level(dm->backlight_link[0]);
|
||||
int ret = dc_link_get_backlight_level(link);
|
||||
|
||||
if (ret == DC_ERROR_UNEXPECTED)
|
||||
return dm->brightness[0];
|
||||
return dm->brightness[bl_idx];
|
||||
return convert_brightness_to_user(&caps, ret);
|
||||
}
|
||||
}
|
||||
|
@ -3579,8 +3585,15 @@ static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm)
|
|||
static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
|
||||
{
|
||||
struct amdgpu_display_manager *dm = bl_get_data(bd);
|
||||
int i;
|
||||
|
||||
return amdgpu_dm_backlight_get_level(dm);
|
||||
for (i = 0; i < dm->num_of_edps; i++) {
|
||||
if (bd == dm->backlight_dev[i])
|
||||
break;
|
||||
}
|
||||
if (i >= AMDGPU_DM_MAX_NUM_EDP)
|
||||
i = 0;
|
||||
return amdgpu_dm_backlight_get_level(dm, i);
|
||||
}
|
||||
|
||||
static const struct backlight_ops amdgpu_dm_backlight_ops = {
|
||||
|
@ -3594,31 +3607,28 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
|
|||
{
|
||||
char bl_name[16];
|
||||
struct backlight_properties props = { 0 };
|
||||
int i;
|
||||
|
||||
amdgpu_dm_update_backlight_caps(dm);
|
||||
for (i = 0; i < dm->num_of_edps; i++)
|
||||
dm->brightness[i] = AMDGPU_MAX_BL_LEVEL;
|
||||
amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
|
||||
dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
|
||||
|
||||
props.max_brightness = AMDGPU_MAX_BL_LEVEL;
|
||||
props.brightness = AMDGPU_MAX_BL_LEVEL;
|
||||
props.type = BACKLIGHT_RAW;
|
||||
|
||||
snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
|
||||
adev_to_drm(dm->adev)->primary->index);
|
||||
adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
|
||||
|
||||
dm->backlight_dev = backlight_device_register(bl_name,
|
||||
adev_to_drm(dm->adev)->dev,
|
||||
dm,
|
||||
&amdgpu_dm_backlight_ops,
|
||||
&props);
|
||||
dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
|
||||
adev_to_drm(dm->adev)->dev,
|
||||
dm,
|
||||
&amdgpu_dm_backlight_ops,
|
||||
&props);
|
||||
|
||||
if (IS_ERR(dm->backlight_dev))
|
||||
if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
|
||||
DRM_ERROR("DM: Backlight registration failed!\n");
|
||||
else
|
||||
DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static int initialize_plane(struct amdgpu_display_manager *dm,
|
||||
|
@ -3675,10 +3685,10 @@ static void register_backlight_device(struct amdgpu_display_manager *dm,
|
|||
* DM initialization because not having a backlight control
|
||||
* is better then a black screen.
|
||||
*/
|
||||
if (!dm->backlight_dev)
|
||||
if (!dm->backlight_dev[dm->num_of_edps])
|
||||
amdgpu_dm_register_backlight_device(dm);
|
||||
|
||||
if (dm->backlight_dev) {
|
||||
if (dm->backlight_dev[dm->num_of_edps]) {
|
||||
dm->backlight_link[dm->num_of_edps] = link;
|
||||
dm->num_of_edps++;
|
||||
}
|
||||
|
@ -4747,7 +4757,7 @@ fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
|
|||
const bool force_disable_dcc)
|
||||
{
|
||||
const uint64_t modifier = afb->base.modifier;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
|
||||
tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
|
||||
|
@ -4765,9 +4775,9 @@ fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
|
|||
|
||||
ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
|
||||
if (ret)
|
||||
return ret;
|
||||
drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -6198,6 +6208,7 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
|
|||
const struct dc_link *link = aconnector->dc_link;
|
||||
struct amdgpu_device *adev = drm_to_adev(connector->dev);
|
||||
struct amdgpu_display_manager *dm = &adev->dm;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Call only if mst_mgr was iniitalized before since it's not done
|
||||
|
@ -6208,12 +6219,11 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
|
|||
|
||||
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
|
||||
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
|
||||
|
||||
if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
|
||||
link->type != dc_connection_none &&
|
||||
dm->backlight_dev) {
|
||||
backlight_device_unregister(dm->backlight_dev);
|
||||
dm->backlight_dev = NULL;
|
||||
for (i = 0; i < dm->num_of_edps; i++) {
|
||||
if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
|
||||
backlight_device_unregister(dm->backlight_dev[i]);
|
||||
dm->backlight_dev[i] = NULL;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -7570,8 +7580,10 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
|
|||
* 60 - Commonly used
|
||||
* 48,72,96 - Multiples of 24
|
||||
*/
|
||||
const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
|
||||
48000, 50000, 60000, 72000, 96000 };
|
||||
static const uint32_t common_rates[] = {
|
||||
23976, 24000, 25000, 29970, 30000,
|
||||
48000, 50000, 60000, 72000, 96000
|
||||
};
|
||||
|
||||
/*
|
||||
* Find mode with highest refresh rate with the same resolution
|
||||
|
@ -9191,8 +9203,11 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
|||
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
|
||||
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
|
||||
/* restore the backlight level */
|
||||
if (dm->backlight_dev && (amdgpu_dm_backlight_get_level(dm) != dm->brightness[0]))
|
||||
amdgpu_dm_backlight_set_level(dm, dm->brightness[0]);
|
||||
for (i = 0; i < dm->num_of_edps; i++) {
|
||||
if (dm->backlight_dev[i] &&
|
||||
(amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
|
||||
amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
* send vblank event on all events not handled in flip and
|
||||
|
@ -10549,13 +10564,68 @@ static bool is_dp_capable_without_timing_msa(struct dc *dc,
|
|||
return capable;
|
||||
}
|
||||
|
||||
static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
|
||||
static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
|
||||
unsigned int offset,
|
||||
unsigned int total_length,
|
||||
uint8_t *data,
|
||||
unsigned int length,
|
||||
struct amdgpu_hdmi_vsdb_info *vsdb)
|
||||
{
|
||||
bool res;
|
||||
union dmub_rb_cmd cmd;
|
||||
struct dmub_cmd_send_edid_cea *input;
|
||||
struct dmub_cmd_edid_cea_output *output;
|
||||
|
||||
if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
|
||||
return false;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
|
||||
input = &cmd.edid_cea.data.input;
|
||||
|
||||
cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
|
||||
cmd.edid_cea.header.sub_type = 0;
|
||||
cmd.edid_cea.header.payload_bytes =
|
||||
sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
|
||||
input->offset = offset;
|
||||
input->length = length;
|
||||
input->total_length = total_length;
|
||||
memcpy(input->payload, data, length);
|
||||
|
||||
res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
|
||||
if (!res) {
|
||||
DRM_ERROR("EDID CEA parser failed\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
output = &cmd.edid_cea.data.output;
|
||||
|
||||
if (output->type == DMUB_CMD__EDID_CEA_ACK) {
|
||||
if (!output->ack.success) {
|
||||
DRM_ERROR("EDID CEA ack failed at offset %d\n",
|
||||
output->ack.offset);
|
||||
}
|
||||
} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
|
||||
if (!output->amd_vsdb.vsdb_found)
|
||||
return false;
|
||||
|
||||
vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
|
||||
vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
|
||||
vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
|
||||
vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
|
||||
} else {
|
||||
DRM_WARN("Unknown EDID CEA parser results\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
|
||||
uint8_t *edid_ext, int len,
|
||||
struct amdgpu_hdmi_vsdb_info *vsdb_info)
|
||||
{
|
||||
int i;
|
||||
struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
|
||||
struct dc *dc = adev->dm.dc;
|
||||
|
||||
/* send extension block to DMCU for parsing */
|
||||
for (i = 0; i < len; i += 8) {
|
||||
|
@ -10563,14 +10633,14 @@ static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
|
|||
int offset;
|
||||
|
||||
/* send 8 bytes a time */
|
||||
if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
|
||||
if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
|
||||
return false;
|
||||
|
||||
if (i+8 == len) {
|
||||
/* EDID block sent completed, expect result */
|
||||
int version, min_rate, max_rate;
|
||||
|
||||
res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
|
||||
res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
|
||||
if (res) {
|
||||
/* amd vsdb found */
|
||||
vsdb_info->freesync_supported = 1;
|
||||
|
@ -10584,7 +10654,7 @@ static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
|
|||
}
|
||||
|
||||
/* check for ack*/
|
||||
res = dc_edid_parser_recv_cea_ack(dc, &offset);
|
||||
res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
|
||||
if (!res)
|
||||
return false;
|
||||
}
|
||||
|
@ -10592,6 +10662,34 @@ static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
|
||||
uint8_t *edid_ext, int len,
|
||||
struct amdgpu_hdmi_vsdb_info *vsdb_info)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* send extension block to DMCU for parsing */
|
||||
for (i = 0; i < len; i += 8) {
|
||||
/* send 8 bytes a time */
|
||||
if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
|
||||
return false;
|
||||
}
|
||||
|
||||
return vsdb_info->freesync_supported;
|
||||
}
|
||||
|
||||
static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
|
||||
uint8_t *edid_ext, int len,
|
||||
struct amdgpu_hdmi_vsdb_info *vsdb_info)
|
||||
{
|
||||
struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
|
||||
|
||||
if (adev->dm.dmub_srv)
|
||||
return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
|
||||
else
|
||||
return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
|
||||
}
|
||||
|
||||
static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
|
||||
struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
|
||||
{
|
||||
|
|
|
@ -365,13 +365,13 @@ struct amdgpu_display_manager {
|
|||
|
||||
spinlock_t irq_handler_list_table_lock;
|
||||
|
||||
struct backlight_device *backlight_dev;
|
||||
struct backlight_device *backlight_dev[AMDGPU_DM_MAX_NUM_EDP];
|
||||
|
||||
const struct dc_link *backlight_link[AMDGPU_DM_MAX_NUM_EDP];
|
||||
|
||||
uint8_t num_of_edps;
|
||||
|
||||
struct amdgpu_dm_backlight_caps backlight_caps;
|
||||
struct amdgpu_dm_backlight_caps backlight_caps[AMDGPU_DM_MAX_NUM_EDP];
|
||||
|
||||
struct mod_freesync *freesync_module;
|
||||
#ifdef CONFIG_DRM_AMD_DC_HDCP
|
||||
|
|
|
@ -197,29 +197,29 @@ static ssize_t dp_link_settings_read(struct file *f, char __user *buf,
|
|||
|
||||
rd_buf_ptr = rd_buf;
|
||||
|
||||
str_len = strlen("Current: %d %d %d ");
|
||||
snprintf(rd_buf_ptr, str_len, "Current: %d %d %d ",
|
||||
str_len = strlen("Current: %d 0x%x %d ");
|
||||
snprintf(rd_buf_ptr, str_len, "Current: %d 0x%x %d ",
|
||||
link->cur_link_settings.lane_count,
|
||||
link->cur_link_settings.link_rate,
|
||||
link->cur_link_settings.link_spread);
|
||||
rd_buf_ptr += str_len;
|
||||
|
||||
str_len = strlen("Verified: %d %d %d ");
|
||||
snprintf(rd_buf_ptr, str_len, "Verified: %d %d %d ",
|
||||
str_len = strlen("Verified: %d 0x%x %d ");
|
||||
snprintf(rd_buf_ptr, str_len, "Verified: %d 0x%x %d ",
|
||||
link->verified_link_cap.lane_count,
|
||||
link->verified_link_cap.link_rate,
|
||||
link->verified_link_cap.link_spread);
|
||||
rd_buf_ptr += str_len;
|
||||
|
||||
str_len = strlen("Reported: %d %d %d ");
|
||||
snprintf(rd_buf_ptr, str_len, "Reported: %d %d %d ",
|
||||
str_len = strlen("Reported: %d 0x%x %d ");
|
||||
snprintf(rd_buf_ptr, str_len, "Reported: %d 0x%x %d ",
|
||||
link->reported_link_cap.lane_count,
|
||||
link->reported_link_cap.link_rate,
|
||||
link->reported_link_cap.link_spread);
|
||||
rd_buf_ptr += str_len;
|
||||
|
||||
str_len = strlen("Preferred: %d %d %d ");
|
||||
snprintf(rd_buf_ptr, str_len, "Preferred: %d %d %d\n",
|
||||
str_len = strlen("Preferred: %d 0x%x %d ");
|
||||
snprintf(rd_buf_ptr, str_len, "Preferred: %d 0x%x %d\n",
|
||||
link->preferred_link_setting.lane_count,
|
||||
link->preferred_link_setting.link_rate,
|
||||
link->preferred_link_setting.link_spread);
|
||||
|
@ -377,7 +377,7 @@ static ssize_t dp_phy_settings_read(struct file *f, char __user *buf,
|
|||
if (!rd_buf)
|
||||
return -EINVAL;
|
||||
|
||||
snprintf(rd_buf, rd_buf_size, " %d %d %d ",
|
||||
snprintf(rd_buf, rd_buf_size, " %d %d %d\n",
|
||||
link->cur_lane_setting.VOLTAGE_SWING,
|
||||
link->cur_lane_setting.PRE_EMPHASIS,
|
||||
link->cur_lane_setting.POST_CURSOR2);
|
||||
|
|
|
@ -655,10 +655,8 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct
|
|||
INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate);
|
||||
|
||||
hdcp_work[i].hdcp.config.psp.handle = &adev->psp;
|
||||
if (dc->ctx->dce_version == DCN_VERSION_3_1) {
|
||||
if (dc->ctx->dce_version == DCN_VERSION_3_1)
|
||||
hdcp_work[i].hdcp.config.psp.caps.dtm_v3_supported = 1;
|
||||
hdcp_work[i].hdcp.config.psp.caps.opm_state_query_supported = false;
|
||||
}
|
||||
hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i);
|
||||
hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c;
|
||||
hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c;
|
||||
|
|
|
@ -264,9 +264,9 @@ static void rv1_update_clocks(struct clk_mgr *clk_mgr_base,
|
|||
if (pp_smu->set_hard_min_fclk_by_freq &&
|
||||
pp_smu->set_hard_min_dcfclk_by_freq &&
|
||||
pp_smu->set_min_deep_sleep_dcfclk) {
|
||||
pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, new_clocks->fclk_khz / 1000);
|
||||
pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, new_clocks->dcfclk_khz / 1000);
|
||||
pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000);
|
||||
pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, khz_to_mhz_ceil(new_clocks->fclk_khz));
|
||||
pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, khz_to_mhz_ceil(new_clocks->dcfclk_khz));
|
||||
pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, khz_to_mhz_ceil(new_clocks->dcfclk_deep_sleep_khz));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -284,9 +284,9 @@ static void rv1_update_clocks(struct clk_mgr *clk_mgr_base,
|
|||
if (pp_smu->set_hard_min_fclk_by_freq &&
|
||||
pp_smu->set_hard_min_dcfclk_by_freq &&
|
||||
pp_smu->set_min_deep_sleep_dcfclk) {
|
||||
pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, new_clocks->fclk_khz / 1000);
|
||||
pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, new_clocks->dcfclk_khz / 1000);
|
||||
pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000);
|
||||
pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, khz_to_mhz_ceil(new_clocks->fclk_khz));
|
||||
pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, khz_to_mhz_ceil(new_clocks->dcfclk_khz));
|
||||
pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, khz_to_mhz_ceil(new_clocks->dcfclk_deep_sleep_khz));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -130,7 +130,7 @@ int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_di
|
|||
actual_dispclk_set_mhz = rv1_vbios_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetDispclkFreq,
|
||||
requested_dispclk_khz / 1000);
|
||||
khz_to_mhz_ceil(requested_dispclk_khz));
|
||||
|
||||
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
|
||||
if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
|
||||
|
@ -150,7 +150,7 @@ int rv1_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr)
|
|||
actual_dprefclk_set_mhz = rv1_vbios_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetDprefclkFreq,
|
||||
clk_mgr->base.dprefclk_khz / 1000);
|
||||
khz_to_mhz_ceil(clk_mgr->base.dprefclk_khz));
|
||||
|
||||
/* TODO: add code for programing DP DTO, currently this is down by command table */
|
||||
|
||||
|
|
|
@ -197,7 +197,7 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct
|
|||
|
||||
REG_UPDATE(DENTIST_DISPCLK_CNTL,
|
||||
DENTIST_DISPCLK_WDIVIDER, dispclk_wdivider);
|
||||
// REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 5, 100);
|
||||
REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 1000);
|
||||
REG_UPDATE(DENTIST_DISPCLK_CNTL,
|
||||
DENTIST_DPPCLK_WDIVIDER, dppclk_wdivider);
|
||||
REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, 1, 5, 100);
|
||||
|
@ -253,20 +253,20 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
|
|||
if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
|
||||
clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
|
||||
if (pp_smu && pp_smu->set_hard_min_dcfclk_by_freq)
|
||||
pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, clk_mgr_base->clks.dcfclk_khz / 1000);
|
||||
pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_khz));
|
||||
}
|
||||
|
||||
if (should_set_clock(safe_to_lower,
|
||||
new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
|
||||
clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
|
||||
if (pp_smu && pp_smu->set_min_deep_sleep_dcfclk)
|
||||
pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, clk_mgr_base->clks.dcfclk_deep_sleep_khz / 1000);
|
||||
pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_deep_sleep_khz));
|
||||
}
|
||||
|
||||
if (should_set_clock(safe_to_lower, new_clocks->socclk_khz, clk_mgr_base->clks.socclk_khz)) {
|
||||
clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
|
||||
if (pp_smu && pp_smu->set_hard_min_socclk_by_freq)
|
||||
pp_smu->set_hard_min_socclk_by_freq(&pp_smu->pp_smu, clk_mgr_base->clks.socclk_khz / 1000);
|
||||
pp_smu->set_hard_min_socclk_by_freq(&pp_smu->pp_smu, khz_to_mhz_ceil(clk_mgr_base->clks.socclk_khz));
|
||||
}
|
||||
|
||||
total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
|
||||
|
@ -281,7 +281,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
|
|||
if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz)) {
|
||||
clk_mgr_base->clks.dramclk_khz = new_clocks->dramclk_khz;
|
||||
if (pp_smu && pp_smu->set_hard_min_uclk_by_freq)
|
||||
pp_smu->set_hard_min_uclk_by_freq(&pp_smu->pp_smu, clk_mgr_base->clks.dramclk_khz / 1000);
|
||||
pp_smu->set_hard_min_uclk_by_freq(&pp_smu->pp_smu, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
|
||||
}
|
||||
|
||||
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
|
||||
|
@ -306,7 +306,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
|
|||
|
||||
clk_mgr_base->clks.disp_dpp_voltage_level_khz = new_clocks->disp_dpp_voltage_level_khz;
|
||||
if (pp_smu && pp_smu->set_voltage_by_freq)
|
||||
pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.disp_dpp_voltage_level_khz / 1000);
|
||||
pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.disp_dpp_voltage_level_khz));
|
||||
}
|
||||
|
||||
if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
|
||||
|
@ -502,7 +502,7 @@ static void dcn2_notify_link_rate_change(struct clk_mgr *clk_mgr_base, struct dc
|
|||
|
||||
if (max_phyclk_req != clk_mgr_base->clks.phyclk_khz) {
|
||||
clk_mgr_base->clks.phyclk_khz = max_phyclk_req;
|
||||
pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PHYCLK, clk_mgr_base->clks.phyclk_khz / 1000);
|
||||
pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PHYCLK, khz_to_mhz_ceil(clk_mgr_base->clks.phyclk_khz));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -126,7 +126,7 @@ int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dis
|
|||
actual_dispclk_set_mhz = rn_vbios_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetDispclkFreq,
|
||||
requested_dispclk_khz / 1000);
|
||||
khz_to_mhz_ceil(requested_dispclk_khz));
|
||||
|
||||
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
|
||||
if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
|
||||
|
@ -138,7 +138,7 @@ int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dis
|
|||
|
||||
// pmfw always set clock more than or equal requested clock
|
||||
if (!IS_DIAG_DC(dc->ctx->dce_environment))
|
||||
ASSERT(actual_dispclk_set_mhz >= requested_dispclk_khz / 1000);
|
||||
ASSERT(actual_dispclk_set_mhz >= khz_to_mhz_ceil(requested_dispclk_khz));
|
||||
|
||||
return actual_dispclk_set_mhz * 1000;
|
||||
}
|
||||
|
@ -150,7 +150,7 @@ int rn_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr)
|
|||
actual_dprefclk_set_mhz = rn_vbios_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetDprefclkFreq,
|
||||
clk_mgr->base.dprefclk_khz / 1000);
|
||||
khz_to_mhz_ceil(clk_mgr->base.dprefclk_khz));
|
||||
|
||||
/* TODO: add code for programing DP DTO, currently this is down by command table */
|
||||
|
||||
|
@ -167,7 +167,7 @@ int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int reque
|
|||
actual_dcfclk_set_mhz = rn_vbios_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
|
||||
requested_dcfclk_khz / 1000);
|
||||
khz_to_mhz_ceil(requested_dcfclk_khz));
|
||||
|
||||
return actual_dcfclk_set_mhz * 1000;
|
||||
}
|
||||
|
@ -182,7 +182,7 @@ int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int
|
|||
actual_min_ds_dcfclk_mhz = rn_vbios_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetMinDeepSleepDcfclk,
|
||||
requested_min_ds_dcfclk_khz / 1000);
|
||||
khz_to_mhz_ceil(requested_min_ds_dcfclk_khz));
|
||||
|
||||
return actual_min_ds_dcfclk_mhz * 1000;
|
||||
}
|
||||
|
@ -192,7 +192,7 @@ void rn_vbios_smu_set_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phy
|
|||
rn_vbios_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetPhyclkVoltageByFreq,
|
||||
requested_phyclk_khz / 1000);
|
||||
khz_to_mhz_ceil(requested_phyclk_khz));
|
||||
}
|
||||
|
||||
int rn_vbios_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz)
|
||||
|
@ -203,10 +203,10 @@ int rn_vbios_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_
|
|||
actual_dppclk_set_mhz = rn_vbios_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetDppclkFreq,
|
||||
requested_dpp_khz / 1000);
|
||||
khz_to_mhz_ceil(requested_dpp_khz));
|
||||
|
||||
if (!IS_DIAG_DC(dc->ctx->dce_environment))
|
||||
ASSERT(actual_dppclk_set_mhz >= requested_dpp_khz / 1000);
|
||||
ASSERT(actual_dppclk_set_mhz >= khz_to_mhz_ceil(requested_dpp_khz));
|
||||
|
||||
return actual_dppclk_set_mhz * 1000;
|
||||
}
|
||||
|
|
|
@ -284,12 +284,12 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
|
|||
|
||||
if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
|
||||
clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
|
||||
dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DCEFCLK, clk_mgr_base->clks.dcfclk_khz / 1000);
|
||||
dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DCEFCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_khz));
|
||||
}
|
||||
|
||||
if (should_set_clock(safe_to_lower, new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
|
||||
clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
|
||||
dcn30_smu_set_min_deep_sleep_dcef_clk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz / 1000);
|
||||
dcn30_smu_set_min_deep_sleep_dcef_clk(clk_mgr, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_deep_sleep_khz));
|
||||
}
|
||||
|
||||
if (should_set_clock(safe_to_lower, new_clocks->socclk_khz, clk_mgr_base->clks.socclk_khz))
|
||||
|
@ -317,20 +317,20 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
|
|||
/* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */
|
||||
if (clk_mgr_base->clks.p_state_change_support &&
|
||||
(update_uclk || !clk_mgr_base->clks.prev_p_state_change_support))
|
||||
dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, clk_mgr_base->clks.dramclk_khz / 1000);
|
||||
dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
|
||||
|
||||
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr_base->clks.dppclk_khz)) {
|
||||
if (clk_mgr_base->clks.dppclk_khz > new_clocks->dppclk_khz)
|
||||
dpp_clock_lowered = true;
|
||||
|
||||
clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
|
||||
dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_PIXCLK, clk_mgr_base->clks.dppclk_khz / 1000);
|
||||
dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_PIXCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dppclk_khz));
|
||||
update_dppclk = true;
|
||||
}
|
||||
|
||||
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
|
||||
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
|
||||
dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000);
|
||||
dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dispclk_khz));
|
||||
update_dispclk = true;
|
||||
}
|
||||
|
||||
|
@ -396,12 +396,17 @@ static void dcn3_set_hard_min_memclk(struct clk_mgr *clk_mgr_base, bool current_
|
|||
if (!clk_mgr->smu_present)
|
||||
return;
|
||||
|
||||
if (current_mode)
|
||||
dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
|
||||
clk_mgr_base->clks.dramclk_khz / 1000);
|
||||
else
|
||||
if (current_mode) {
|
||||
if (clk_mgr_base->clks.p_state_change_support)
|
||||
dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
|
||||
khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
|
||||
else
|
||||
dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
|
||||
clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
|
||||
} else {
|
||||
dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
|
||||
clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz);
|
||||
}
|
||||
}
|
||||
|
||||
/* Set max memclk to highest DPM value */
|
||||
|
@ -489,7 +494,7 @@ static void dcn30_notify_link_rate_change(struct clk_mgr *clk_mgr_base, struct d
|
|||
|
||||
if (max_phyclk_req != clk_mgr_base->clks.phyclk_khz) {
|
||||
clk_mgr_base->clks.phyclk_khz = max_phyclk_req;
|
||||
dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_PHYCLK, clk_mgr_base->clks.phyclk_khz / 1000);
|
||||
dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_PHYCLK, khz_to_mhz_ceil(clk_mgr_base->clks.phyclk_khz));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -133,7 +133,7 @@ int dcn301_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispc
|
|||
actual_dispclk_set_mhz = dcn301_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetDispclkFreq,
|
||||
requested_dispclk_khz / 1000);
|
||||
khz_to_mhz_ceil(requested_dispclk_khz));
|
||||
|
||||
return actual_dispclk_set_mhz * 1000;
|
||||
}
|
||||
|
@ -147,7 +147,7 @@ int dcn301_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr)
|
|||
actual_dprefclk_set_mhz = dcn301_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetDprefclkFreq,
|
||||
clk_mgr->base.dprefclk_khz / 1000);
|
||||
khz_to_mhz_ceil(clk_mgr->base.dprefclk_khz));
|
||||
|
||||
/* TODO: add code for programing DP DTO, currently this is down by command table */
|
||||
|
||||
|
@ -163,7 +163,7 @@ int dcn301_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request
|
|||
actual_dcfclk_set_mhz = dcn301_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
|
||||
requested_dcfclk_khz / 1000);
|
||||
khz_to_mhz_ceil(requested_dcfclk_khz));
|
||||
|
||||
return actual_dcfclk_set_mhz * 1000;
|
||||
}
|
||||
|
@ -177,7 +177,7 @@ int dcn301_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int r
|
|||
actual_min_ds_dcfclk_mhz = dcn301_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetMinDeepSleepDcfclk,
|
||||
requested_min_ds_dcfclk_khz / 1000);
|
||||
khz_to_mhz_ceil(requested_min_ds_dcfclk_khz));
|
||||
|
||||
return actual_min_ds_dcfclk_mhz * 1000;
|
||||
}
|
||||
|
@ -191,7 +191,7 @@ int dcn301_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_kh
|
|||
actual_dppclk_set_mhz = dcn301_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetDppclkFreq,
|
||||
requested_dpp_khz / 1000);
|
||||
khz_to_mhz_ceil(requested_dpp_khz));
|
||||
|
||||
return actual_dppclk_set_mhz * 1000;
|
||||
}
|
||||
|
|
|
@ -114,7 +114,7 @@ void vg_update_clocks(struct clk_mgr *clk_mgr_base,
|
|||
|
||||
display_count = vg_get_active_display_cnt_wa(dc, context);
|
||||
/* if we can go lower, go lower */
|
||||
if (display_count == 0) {
|
||||
if (display_count == 0 && !IS_DIAG_DC(dc->ctx->dce_environment)) {
|
||||
union display_idle_optimization_u idle_info = { 0 };
|
||||
|
||||
idle_info.idle_info.df_request_disabled = 1;
|
||||
|
|
|
@ -147,7 +147,7 @@ int dcn31_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispcl
|
|||
actual_dispclk_set_mhz = dcn31_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetDispclkFreq,
|
||||
(requested_dispclk_khz + 999) / 1000);
|
||||
khz_to_mhz_ceil(requested_dispclk_khz));
|
||||
|
||||
return actual_dispclk_set_mhz * 1000;
|
||||
}
|
||||
|
@ -162,7 +162,7 @@ int dcn31_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr)
|
|||
actual_dprefclk_set_mhz = dcn31_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetDprefclkFreq,
|
||||
(clk_mgr->base.dprefclk_khz + 999) / 1000);
|
||||
khz_to_mhz_ceil(clk_mgr->base.dprefclk_khz));
|
||||
|
||||
/* TODO: add code for programing DP DTO, currently this is down by command table */
|
||||
|
||||
|
@ -182,7 +182,7 @@ int dcn31_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requeste
|
|||
actual_dcfclk_set_mhz = dcn31_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
|
||||
(requested_dcfclk_khz + 999) / 1000);
|
||||
khz_to_mhz_ceil(requested_dcfclk_khz));
|
||||
|
||||
return actual_dcfclk_set_mhz * 1000;
|
||||
}
|
||||
|
@ -200,7 +200,7 @@ int dcn31_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int re
|
|||
actual_min_ds_dcfclk_mhz = dcn31_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetMinDeepSleepDcfclk,
|
||||
(requested_min_ds_dcfclk_khz + 999) / 1000);
|
||||
khz_to_mhz_ceil(requested_min_ds_dcfclk_khz));
|
||||
|
||||
return actual_min_ds_dcfclk_mhz * 1000;
|
||||
}
|
||||
|
@ -215,7 +215,7 @@ int dcn31_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz
|
|||
actual_dppclk_set_mhz = dcn31_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetDppclkFreq,
|
||||
(requested_dpp_khz + 999) / 1000);
|
||||
khz_to_mhz_ceil(requested_dpp_khz));
|
||||
|
||||
return actual_dppclk_set_mhz * 1000;
|
||||
}
|
||||
|
|
|
@ -541,6 +541,7 @@ static void link_disconnect_sink(struct dc_link *link)
|
|||
}
|
||||
|
||||
link->dpcd_sink_count = 0;
|
||||
//link->dpcd_caps.dpcd_rev.raw = 0;
|
||||
}
|
||||
|
||||
static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *link)
|
||||
|
@ -742,6 +743,7 @@ static bool detect_dp(struct dc_link *link,
|
|||
sink_caps,
|
||||
audio_support);
|
||||
link->dpcd_caps.dongle_type = sink_caps->dongle_type;
|
||||
link->dpcd_caps.dpcd_rev.raw = 0;
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -1663,6 +1665,12 @@ struct dc_link *link_create(const struct link_init_data *init_params)
|
|||
if (false == dc_link_construct(link, init_params))
|
||||
goto construct_fail;
|
||||
|
||||
/*
|
||||
* Must use preferred_link_setting, not reported_link_cap or verified_link_cap,
|
||||
* since struct preferred_link_setting won't be reset after S3.
|
||||
*/
|
||||
link->preferred_link_setting.dpcd_source_device_specific_field_support = true;
|
||||
|
||||
return link;
|
||||
|
||||
construct_fail:
|
||||
|
@ -3509,61 +3517,6 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
|
|||
}
|
||||
}
|
||||
|
||||
uint32_t dc_bandwidth_in_kbps_from_timing(
|
||||
const struct dc_crtc_timing *timing)
|
||||
{
|
||||
uint32_t bits_per_channel = 0;
|
||||
uint32_t kbps;
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (timing->flags.DSC)
|
||||
return dc_dsc_stream_bandwidth_in_kbps(timing,
|
||||
timing->dsc_cfg.bits_per_pixel,
|
||||
timing->dsc_cfg.num_slices_h,
|
||||
timing->dsc_cfg.is_dp);
|
||||
#endif
|
||||
|
||||
switch (timing->display_color_depth) {
|
||||
case COLOR_DEPTH_666:
|
||||
bits_per_channel = 6;
|
||||
break;
|
||||
case COLOR_DEPTH_888:
|
||||
bits_per_channel = 8;
|
||||
break;
|
||||
case COLOR_DEPTH_101010:
|
||||
bits_per_channel = 10;
|
||||
break;
|
||||
case COLOR_DEPTH_121212:
|
||||
bits_per_channel = 12;
|
||||
break;
|
||||
case COLOR_DEPTH_141414:
|
||||
bits_per_channel = 14;
|
||||
break;
|
||||
case COLOR_DEPTH_161616:
|
||||
bits_per_channel = 16;
|
||||
break;
|
||||
default:
|
||||
ASSERT(bits_per_channel != 0);
|
||||
bits_per_channel = 8;
|
||||
break;
|
||||
}
|
||||
|
||||
kbps = timing->pix_clk_100hz / 10;
|
||||
kbps *= bits_per_channel;
|
||||
|
||||
if (timing->flags.Y_ONLY != 1) {
|
||||
/*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
|
||||
kbps *= 3;
|
||||
if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
|
||||
kbps /= 2;
|
||||
else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
|
||||
kbps = kbps * 2 / 3;
|
||||
}
|
||||
|
||||
return kbps;
|
||||
|
||||
}
|
||||
|
||||
void dc_link_set_drive_settings(struct dc *dc,
|
||||
struct link_training_settings *lt_settings,
|
||||
const struct dc_link *link)
|
||||
|
@ -3769,3 +3722,58 @@ bool dc_link_should_enable_fec(const struct dc_link *link)
|
|||
|
||||
return ret;
|
||||
}
|
||||
|
||||
uint32_t dc_bandwidth_in_kbps_from_timing(
|
||||
const struct dc_crtc_timing *timing)
|
||||
{
|
||||
uint32_t bits_per_channel = 0;
|
||||
uint32_t kbps;
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
if (timing->flags.DSC)
|
||||
return dc_dsc_stream_bandwidth_in_kbps(timing,
|
||||
timing->dsc_cfg.bits_per_pixel,
|
||||
timing->dsc_cfg.num_slices_h,
|
||||
timing->dsc_cfg.is_dp);
|
||||
#endif
|
||||
|
||||
switch (timing->display_color_depth) {
|
||||
case COLOR_DEPTH_666:
|
||||
bits_per_channel = 6;
|
||||
break;
|
||||
case COLOR_DEPTH_888:
|
||||
bits_per_channel = 8;
|
||||
break;
|
||||
case COLOR_DEPTH_101010:
|
||||
bits_per_channel = 10;
|
||||
break;
|
||||
case COLOR_DEPTH_121212:
|
||||
bits_per_channel = 12;
|
||||
break;
|
||||
case COLOR_DEPTH_141414:
|
||||
bits_per_channel = 14;
|
||||
break;
|
||||
case COLOR_DEPTH_161616:
|
||||
bits_per_channel = 16;
|
||||
break;
|
||||
default:
|
||||
ASSERT(bits_per_channel != 0);
|
||||
bits_per_channel = 8;
|
||||
break;
|
||||
}
|
||||
|
||||
kbps = timing->pix_clk_100hz / 10;
|
||||
kbps *= bits_per_channel;
|
||||
|
||||
if (timing->flags.Y_ONLY != 1) {
|
||||
/*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
|
||||
kbps *= 3;
|
||||
if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
|
||||
kbps /= 2;
|
||||
else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
|
||||
kbps = kbps * 2 / 3;
|
||||
}
|
||||
|
||||
return kbps;
|
||||
|
||||
}
|
||||
|
|
|
@ -1241,29 +1241,15 @@ enum link_training_result dp_check_link_loss_status(
|
|||
static inline void decide_8b_10b_training_settings(
|
||||
struct dc_link *link,
|
||||
const struct dc_link_settings *link_setting,
|
||||
const struct dc_link_training_overrides *overrides,
|
||||
struct link_training_settings *lt_settings)
|
||||
{
|
||||
uint32_t lane;
|
||||
|
||||
memset(lt_settings, '\0', sizeof(struct link_training_settings));
|
||||
|
||||
/* Initialize link settings */
|
||||
lt_settings->link_settings.use_link_rate_set = link_setting->use_link_rate_set;
|
||||
lt_settings->link_settings.link_rate_set = link_setting->link_rate_set;
|
||||
|
||||
if (link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN)
|
||||
lt_settings->link_settings.link_rate = link->preferred_link_setting.link_rate;
|
||||
else
|
||||
lt_settings->link_settings.link_rate = link_setting->link_rate;
|
||||
|
||||
if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN)
|
||||
lt_settings->link_settings.lane_count = link->preferred_link_setting.lane_count;
|
||||
else
|
||||
lt_settings->link_settings.lane_count = link_setting->lane_count;
|
||||
|
||||
/*@todo[vdevulap] move SS to LS, should not be handled by displaypath*/
|
||||
|
||||
lt_settings->link_settings.link_rate = link_setting->link_rate;
|
||||
lt_settings->link_settings.lane_count = link_setting->lane_count;
|
||||
/* TODO hard coded to SS for now
|
||||
* lt_settings.link_settings.link_spread =
|
||||
* dal_display_path_is_ss_supported(
|
||||
|
@ -1271,30 +1257,52 @@ static inline void decide_8b_10b_training_settings(
|
|||
* LINK_SPREAD_05_DOWNSPREAD_30KHZ :
|
||||
* LINK_SPREAD_DISABLED;
|
||||
*/
|
||||
/* Initialize link spread */
|
||||
if (link->dp_ss_off)
|
||||
lt_settings->link_settings.link_spread = LINK_SPREAD_DISABLED;
|
||||
else if (overrides->downspread != NULL)
|
||||
lt_settings->link_settings.link_spread
|
||||
= *overrides->downspread
|
||||
? LINK_SPREAD_05_DOWNSPREAD_30KHZ
|
||||
: LINK_SPREAD_DISABLED;
|
||||
else
|
||||
lt_settings->link_settings.link_spread = LINK_SPREAD_05_DOWNSPREAD_30KHZ;
|
||||
|
||||
lt_settings->link_settings.link_spread = link->dp_ss_off ?
|
||||
LINK_SPREAD_DISABLED : LINK_SPREAD_05_DOWNSPREAD_30KHZ;
|
||||
lt_settings->lttpr_mode = link->lttpr_mode;
|
||||
lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting);
|
||||
lt_settings->eq_pattern_time = get_eq_training_aux_rd_interval(link, link_setting);
|
||||
lt_settings->pattern_for_cr = decide_cr_training_pattern(link_setting);
|
||||
lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_setting);
|
||||
lt_settings->enhanced_framing = 1;
|
||||
lt_settings->should_set_fec_ready = true;
|
||||
}
|
||||
|
||||
/* Initialize lane settings overrides */
|
||||
void dp_decide_training_settings(
|
||||
struct dc_link *link,
|
||||
const struct dc_link_settings *link_settings,
|
||||
struct link_training_settings *lt_settings)
|
||||
{
|
||||
if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING)
|
||||
decide_8b_10b_training_settings(link, link_settings, lt_settings);
|
||||
}
|
||||
|
||||
static void override_training_settings(
|
||||
struct dc_link *link,
|
||||
const struct dc_link_training_overrides *overrides,
|
||||
struct link_training_settings *lt_settings)
|
||||
{
|
||||
uint32_t lane;
|
||||
|
||||
/* Override link settings */
|
||||
if (link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN)
|
||||
lt_settings->link_settings.link_rate = link->preferred_link_setting.link_rate;
|
||||
if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN)
|
||||
lt_settings->link_settings.lane_count = link->preferred_link_setting.lane_count;
|
||||
|
||||
/* Override link spread */
|
||||
if (!link->dp_ss_off && overrides->downspread != NULL)
|
||||
lt_settings->link_settings.link_spread = *overrides->downspread ?
|
||||
LINK_SPREAD_05_DOWNSPREAD_30KHZ
|
||||
: LINK_SPREAD_DISABLED;
|
||||
|
||||
/* Override lane settings */
|
||||
if (overrides->voltage_swing != NULL)
|
||||
lt_settings->voltage_swing = overrides->voltage_swing;
|
||||
|
||||
if (overrides->pre_emphasis != NULL)
|
||||
lt_settings->pre_emphasis = overrides->pre_emphasis;
|
||||
|
||||
if (overrides->post_cursor2 != NULL)
|
||||
lt_settings->post_cursor2 = overrides->post_cursor2;
|
||||
|
||||
/* Initialize lane settings (VS/PE/PC2) */
|
||||
for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
|
||||
lt_settings->lane_settings[lane].VOLTAGE_SWING =
|
||||
lt_settings->voltage_swing != NULL ?
|
||||
|
@ -1313,45 +1321,22 @@ static inline void decide_8b_10b_training_settings(
|
|||
/* Initialize training timings */
|
||||
if (overrides->cr_pattern_time != NULL)
|
||||
lt_settings->cr_pattern_time = *overrides->cr_pattern_time;
|
||||
else
|
||||
lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting);
|
||||
|
||||
if (overrides->eq_pattern_time != NULL)
|
||||
lt_settings->eq_pattern_time = *overrides->eq_pattern_time;
|
||||
else
|
||||
lt_settings->eq_pattern_time = get_eq_training_aux_rd_interval(link, link_setting);
|
||||
|
||||
if (overrides->pattern_for_cr != NULL)
|
||||
lt_settings->pattern_for_cr = *overrides->pattern_for_cr;
|
||||
else
|
||||
lt_settings->pattern_for_cr = decide_cr_training_pattern(link_setting);
|
||||
if (overrides->pattern_for_eq != NULL)
|
||||
lt_settings->pattern_for_eq = *overrides->pattern_for_eq;
|
||||
else
|
||||
lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_setting);
|
||||
|
||||
if (overrides->enhanced_framing != NULL)
|
||||
lt_settings->enhanced_framing = *overrides->enhanced_framing;
|
||||
else
|
||||
lt_settings->enhanced_framing = 1;
|
||||
|
||||
if (link->preferred_training_settings.fec_enable != NULL)
|
||||
lt_settings->should_set_fec_ready = *link->preferred_training_settings.fec_enable;
|
||||
else
|
||||
lt_settings->should_set_fec_ready = true;
|
||||
}
|
||||
|
||||
void dp_decide_training_settings(
|
||||
struct dc_link *link,
|
||||
const struct dc_link_settings *link_settings,
|
||||
const struct dc_link_training_overrides *overrides,
|
||||
struct link_training_settings *lt_settings)
|
||||
{
|
||||
if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING)
|
||||
decide_8b_10b_training_settings(link, link_settings, overrides, lt_settings);
|
||||
}
|
||||
|
||||
|
||||
uint8_t dp_convert_to_count(uint8_t lttpr_repeater_count)
|
||||
{
|
||||
switch (lttpr_repeater_count) {
|
||||
|
@ -1581,6 +1566,9 @@ bool dc_link_dp_perform_link_training_skip_aux(
|
|||
dp_decide_training_settings(
|
||||
link,
|
||||
link_setting,
|
||||
<_settings);
|
||||
override_training_settings(
|
||||
link,
|
||||
&link->preferred_training_settings,
|
||||
<_settings);
|
||||
|
||||
|
@ -1727,6 +1715,9 @@ enum link_training_result dc_link_dp_perform_link_training(
|
|||
dp_decide_training_settings(
|
||||
link,
|
||||
link_settings,
|
||||
<_settings);
|
||||
override_training_settings(
|
||||
link,
|
||||
&link->preferred_training_settings,
|
||||
<_settings);
|
||||
|
||||
|
@ -1939,11 +1930,13 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
|
|||
bool fec_enable = false;
|
||||
|
||||
dp_decide_training_settings(
|
||||
link,
|
||||
link_settings,
|
||||
lt_overrides,
|
||||
<_settings);
|
||||
|
||||
link,
|
||||
link_settings,
|
||||
<_settings);
|
||||
override_training_settings(
|
||||
link,
|
||||
lt_overrides,
|
||||
<_settings);
|
||||
/* Setup MST Mode */
|
||||
if (lt_overrides->mst_enable)
|
||||
set_dp_mst_mode(link, *lt_overrides->mst_enable);
|
||||
|
@ -4793,10 +4786,18 @@ void dpcd_set_source_specific_data(struct dc_link *link)
|
|||
|
||||
uint8_t hblank_size = (uint8_t)link->dc->caps.min_horizontal_blanking_period;
|
||||
|
||||
result_write_min_hblank = core_link_write_dpcd(link,
|
||||
DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, (uint8_t *)(&hblank_size),
|
||||
sizeof(hblank_size));
|
||||
if (link->preferred_link_setting.dpcd_source_device_specific_field_support) {
|
||||
result_write_min_hblank = core_link_write_dpcd(link,
|
||||
DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, (uint8_t *)(&hblank_size),
|
||||
sizeof(hblank_size));
|
||||
|
||||
if (result_write_min_hblank == DC_ERROR_UNEXPECTED)
|
||||
link->preferred_link_setting.dpcd_source_device_specific_field_support = false;
|
||||
} else {
|
||||
DC_LOG_DC("Sink device does not support 00340h DPCD write. Skipping on purpose.\n");
|
||||
}
|
||||
}
|
||||
|
||||
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
|
||||
WPP_BIT_FLAG_DC_DETECTION_DP_CAPS,
|
||||
"result=%u link_index=%u enum dce_version=%d DPCD=0x%04X min_hblank=%u branch_dev_id=0x%x branch_dev_name='%c%c%c%c%c%c'",
|
||||
|
|
|
@ -1,3 +1,28 @@
|
|||
/*
|
||||
* Copyright 2021 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#include <inc/core_status.h>
|
||||
#include <dc_link.h>
|
||||
#include <inc/link_hwss.h>
|
||||
|
|
|
@ -62,3 +62,27 @@ void dc_stat_get_dmub_notification(const struct dc *dc, struct dmub_notification
|
|||
status = dmub_srv_stat_get_notification(dmub, notify);
|
||||
ASSERT(status == DMUB_STATUS_OK);
|
||||
}
|
||||
|
||||
/**
|
||||
*****************************************************************************
|
||||
* Function: dc_stat_get_dmub_dataout
|
||||
*
|
||||
* @brief
|
||||
* Calls dmub layer to retrieve dmub gpint dataout
|
||||
*
|
||||
* @param
|
||||
* [in] dc: dc structure
|
||||
* [in] dataout: dmub gpint dataout
|
||||
*
|
||||
* @return
|
||||
* None
|
||||
*****************************************************************************
|
||||
*/
|
||||
void dc_stat_get_dmub_dataout(const struct dc *dc, uint32_t *dataout)
|
||||
{
|
||||
struct dmub_srv *dmub = dc->ctx->dmub_srv->dmub;
|
||||
enum dmub_status status;
|
||||
|
||||
status = dmub_srv_get_gpint_dataout(dmub, dataout);
|
||||
ASSERT(status == DMUB_STATUS_OK);
|
||||
}
|
||||
|
|
|
@ -221,6 +221,9 @@ struct dc_stream_status *dc_stream_get_status_from_state(
|
|||
{
|
||||
uint8_t i;
|
||||
|
||||
if (state == NULL)
|
||||
return NULL;
|
||||
|
||||
for (i = 0; i < state->stream_count; i++) {
|
||||
if (stream == state->streams[i])
|
||||
return &state->stream_status[i];
|
||||
|
|
|
@ -45,7 +45,7 @@
|
|||
/* forward declaration */
|
||||
struct aux_payload;
|
||||
|
||||
#define DC_VER "3.2.141"
|
||||
#define DC_VER "3.2.146"
|
||||
|
||||
#define MAX_SURFACES 3
|
||||
#define MAX_PLANES 6
|
||||
|
@ -458,7 +458,65 @@ union mem_low_power_enable_options {
|
|||
uint32_t u32All;
|
||||
};
|
||||
|
||||
struct dc_debug_data {
|
||||
uint32_t ltFailCount;
|
||||
uint32_t i2cErrorCount;
|
||||
uint32_t auxErrorCount;
|
||||
};
|
||||
|
||||
struct dc_phy_addr_space_config {
|
||||
struct {
|
||||
uint64_t start_addr;
|
||||
uint64_t end_addr;
|
||||
uint64_t fb_top;
|
||||
uint64_t fb_offset;
|
||||
uint64_t fb_base;
|
||||
uint64_t agp_top;
|
||||
uint64_t agp_bot;
|
||||
uint64_t agp_base;
|
||||
} system_aperture;
|
||||
|
||||
struct {
|
||||
uint64_t page_table_start_addr;
|
||||
uint64_t page_table_end_addr;
|
||||
uint64_t page_table_base_addr;
|
||||
bool base_addr_is_mc_addr;
|
||||
} gart_config;
|
||||
|
||||
bool valid;
|
||||
bool is_hvm_enabled;
|
||||
uint64_t page_table_default_page_addr;
|
||||
};
|
||||
|
||||
struct dc_virtual_addr_space_config {
|
||||
uint64_t page_table_base_addr;
|
||||
uint64_t page_table_start_addr;
|
||||
uint64_t page_table_end_addr;
|
||||
uint32_t page_table_block_size_in_bytes;
|
||||
uint8_t page_table_depth; // 1 = 1 level, 2 = 2 level, etc. 0 = invalid
|
||||
};
|
||||
|
||||
struct dc_bounding_box_overrides {
|
||||
int sr_exit_time_ns;
|
||||
int sr_enter_plus_exit_time_ns;
|
||||
int urgent_latency_ns;
|
||||
int percent_of_ideal_drambw;
|
||||
int dram_clock_change_latency_ns;
|
||||
int dummy_clock_change_latency_ns;
|
||||
/* This forces a hard min on the DCFCLK we use
|
||||
* for DML. Unlike the debug option for forcing
|
||||
* DCFCLK, this override affects watermark calculations
|
||||
*/
|
||||
int min_dcfclk_mhz;
|
||||
};
|
||||
|
||||
struct dc_state;
|
||||
struct resource_pool;
|
||||
struct dce_hwseq;
|
||||
|
||||
struct dc_debug_options {
|
||||
bool native422_support;
|
||||
bool disable_dsc;
|
||||
enum visual_confirm visual_confirm;
|
||||
bool sanity_checks;
|
||||
bool max_disp_clk;
|
||||
|
@ -484,7 +542,6 @@ struct dc_debug_options {
|
|||
bool disable_dsc_power_gate;
|
||||
int dsc_min_slice_height_override;
|
||||
int dsc_bpp_increment_div;
|
||||
bool native422_support;
|
||||
bool disable_pplib_wm_range;
|
||||
enum wm_report_mode pplib_wm_report_mode;
|
||||
unsigned int min_disp_clk_khz;
|
||||
|
@ -554,7 +611,6 @@ struct dc_debug_options {
|
|||
bool validate_dml_output;
|
||||
bool enable_dmcub_surface_flip;
|
||||
bool usbc_combo_phy_reset_wa;
|
||||
bool disable_dsc;
|
||||
bool enable_dram_clock_change_one_display_vactive;
|
||||
union mem_low_power_enable_options enable_mem_low_power;
|
||||
bool force_vblank_alignment;
|
||||
|
@ -572,69 +628,13 @@ struct dc_debug_options {
|
|||
#endif
|
||||
};
|
||||
|
||||
struct dc_debug_data {
|
||||
uint32_t ltFailCount;
|
||||
uint32_t i2cErrorCount;
|
||||
uint32_t auxErrorCount;
|
||||
};
|
||||
|
||||
struct dc_phy_addr_space_config {
|
||||
struct {
|
||||
uint64_t start_addr;
|
||||
uint64_t end_addr;
|
||||
uint64_t fb_top;
|
||||
uint64_t fb_offset;
|
||||
uint64_t fb_base;
|
||||
uint64_t agp_top;
|
||||
uint64_t agp_bot;
|
||||
uint64_t agp_base;
|
||||
} system_aperture;
|
||||
|
||||
struct {
|
||||
uint64_t page_table_start_addr;
|
||||
uint64_t page_table_end_addr;
|
||||
uint64_t page_table_base_addr;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
bool base_addr_is_mc_addr;
|
||||
#endif
|
||||
} gart_config;
|
||||
|
||||
bool valid;
|
||||
bool is_hvm_enabled;
|
||||
uint64_t page_table_default_page_addr;
|
||||
};
|
||||
|
||||
struct dc_virtual_addr_space_config {
|
||||
uint64_t page_table_base_addr;
|
||||
uint64_t page_table_start_addr;
|
||||
uint64_t page_table_end_addr;
|
||||
uint32_t page_table_block_size_in_bytes;
|
||||
uint8_t page_table_depth; // 1 = 1 level, 2 = 2 level, etc. 0 = invalid
|
||||
};
|
||||
|
||||
struct dc_bounding_box_overrides {
|
||||
int sr_exit_time_ns;
|
||||
int sr_enter_plus_exit_time_ns;
|
||||
int urgent_latency_ns;
|
||||
int percent_of_ideal_drambw;
|
||||
int dram_clock_change_latency_ns;
|
||||
int dummy_clock_change_latency_ns;
|
||||
/* This forces a hard min on the DCFCLK we use
|
||||
* for DML. Unlike the debug option for forcing
|
||||
* DCFCLK, this override affects watermark calculations
|
||||
*/
|
||||
int min_dcfclk_mhz;
|
||||
};
|
||||
|
||||
struct resource_pool;
|
||||
struct dce_hwseq;
|
||||
struct gpu_info_soc_bounding_box_v1_0;
|
||||
struct dc {
|
||||
struct dc_debug_options debug;
|
||||
struct dc_versions versions;
|
||||
struct dc_caps caps;
|
||||
struct dc_cap_funcs cap_funcs;
|
||||
struct dc_config config;
|
||||
struct dc_debug_options debug;
|
||||
struct dc_bounding_box_overrides bb_overrides;
|
||||
struct dc_bug_wa work_arounds;
|
||||
struct dc_context *ctx;
|
||||
|
|
|
@ -109,6 +109,7 @@ struct dc_link_settings {
|
|||
enum dc_link_spread link_spread;
|
||||
bool use_link_rate_set;
|
||||
uint8_t link_rate_set;
|
||||
bool dpcd_source_device_specific_field_support;
|
||||
};
|
||||
|
||||
struct dc_lane_settings {
|
||||
|
|
|
@ -38,5 +38,6 @@
|
|||
#include "dmub/dmub_srv.h"
|
||||
|
||||
void dc_stat_get_dmub_notification(const struct dc *dc, struct dmub_notification *notify);
|
||||
void dc_stat_get_dmub_dataout(const struct dc *dc, uint32_t *dataout);
|
||||
|
||||
#endif /* _DC_STAT_H_ */
|
||||
|
|
|
@ -75,18 +75,6 @@ enum dce_environment {
|
|||
#define IS_DIAG_DC(dce_environment) \
|
||||
(IS_FPGA_MAXIMUS_DC(dce_environment) || (dce_environment == DCE_ENV_DIAG))
|
||||
|
||||
struct hw_asic_id {
|
||||
uint32_t chip_id;
|
||||
uint32_t chip_family;
|
||||
uint32_t pci_revision_id;
|
||||
uint32_t hw_internal_rev;
|
||||
uint32_t vram_type;
|
||||
uint32_t vram_width;
|
||||
uint32_t feature_flags;
|
||||
uint32_t fake_paths_num;
|
||||
void *atombios_base_address;
|
||||
};
|
||||
|
||||
struct dc_perf_trace {
|
||||
unsigned long read_count;
|
||||
unsigned long write_count;
|
||||
|
@ -94,36 +82,7 @@ struct dc_perf_trace {
|
|||
unsigned long last_entry_write;
|
||||
};
|
||||
|
||||
struct dc_context {
|
||||
struct dc *dc;
|
||||
|
||||
void *driver_context; /* e.g. amdgpu_device */
|
||||
struct dc_perf_trace *perf_trace;
|
||||
void *cgs_device;
|
||||
|
||||
enum dce_environment dce_environment;
|
||||
struct hw_asic_id asic_id;
|
||||
|
||||
/* todo: below should probably move to dc. to facilitate removal
|
||||
* of AS we will store these here
|
||||
*/
|
||||
enum dce_version dce_version;
|
||||
struct dc_bios *dc_bios;
|
||||
bool created_bios;
|
||||
struct gpio_service *gpio_service;
|
||||
uint32_t dc_sink_id_count;
|
||||
uint32_t dc_stream_id_count;
|
||||
uint32_t dc_edp_id_count;
|
||||
uint64_t fbc_gpu_addr;
|
||||
struct dc_dmub_srv *dmub_srv;
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_HDCP
|
||||
struct cp_psp cp_psp;
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
#define DC_MAX_EDID_BUFFER_SIZE 1280
|
||||
#define DC_MAX_EDID_BUFFER_SIZE 2048
|
||||
#define DC_EDID_BLOCK_SIZE 128
|
||||
#define MAX_SURFACE_NUM 4
|
||||
#define NUM_PIXEL_FORMATS 10
|
||||
|
@ -836,6 +795,46 @@ struct dc_clock_config {
|
|||
uint32_t current_clock_khz;/*current clock in use*/
|
||||
};
|
||||
|
||||
struct hw_asic_id {
|
||||
uint32_t chip_id;
|
||||
uint32_t chip_family;
|
||||
uint32_t pci_revision_id;
|
||||
uint32_t hw_internal_rev;
|
||||
uint32_t vram_type;
|
||||
uint32_t vram_width;
|
||||
uint32_t feature_flags;
|
||||
uint32_t fake_paths_num;
|
||||
void *atombios_base_address;
|
||||
};
|
||||
|
||||
struct dc_context {
|
||||
struct dc *dc;
|
||||
|
||||
void *driver_context; /* e.g. amdgpu_device */
|
||||
struct dc_perf_trace *perf_trace;
|
||||
void *cgs_device;
|
||||
|
||||
enum dce_environment dce_environment;
|
||||
struct hw_asic_id asic_id;
|
||||
|
||||
/* todo: below should probably move to dc. to facilitate removal
|
||||
* of AS we will store these here
|
||||
*/
|
||||
enum dce_version dce_version;
|
||||
struct dc_bios *dc_bios;
|
||||
bool created_bios;
|
||||
struct gpio_service *gpio_service;
|
||||
uint32_t dc_sink_id_count;
|
||||
uint32_t dc_stream_id_count;
|
||||
uint32_t dc_edp_id_count;
|
||||
uint64_t fbc_gpu_addr;
|
||||
struct dc_dmub_srv *dmub_srv;
|
||||
#ifdef CONFIG_DRM_AMD_DC_HDCP
|
||||
struct cp_psp cp_psp;
|
||||
#endif
|
||||
|
||||
};
|
||||
|
||||
/* DSC DPCD capabilities */
|
||||
union dsc_slice_caps1 {
|
||||
struct {
|
||||
|
|
|
@ -71,6 +71,8 @@ enum {
|
|||
#define DEFAULT_AUX_ENGINE_MULT 0
|
||||
#define DEFAULT_AUX_ENGINE_LENGTH 69
|
||||
|
||||
#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */
|
||||
|
||||
static void release_engine(
|
||||
struct dce_aux *engine)
|
||||
{
|
||||
|
@ -743,5 +745,11 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
|
|||
fail:
|
||||
if (!payload_reply)
|
||||
payload->reply = NULL;
|
||||
|
||||
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
|
||||
WPP_BIT_FLAG_DC_ERROR,
|
||||
"AUX transaction failed. Result: %d",
|
||||
operation_result);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -29,6 +29,8 @@
|
|||
#include "dmub/dmub_srv.h"
|
||||
#include "core_types.h"
|
||||
|
||||
#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */
|
||||
|
||||
#define MAX_PIPES 6
|
||||
|
||||
/*
|
||||
|
@ -96,10 +98,19 @@ static void dmub_psr_get_state(struct dmub_psr *dmub, enum dc_psr_state *state,
|
|||
// Return invalid state when GPINT times out
|
||||
*state = PSR_STATE_INVALID;
|
||||
|
||||
// Assert if max retry hit
|
||||
if (retry_count >= 1000)
|
||||
ASSERT(0);
|
||||
} while (++retry_count <= 1000 && *state == PSR_STATE_INVALID);
|
||||
|
||||
// Assert if max retry hit
|
||||
if (retry_count >= 1000 && *state == PSR_STATE_INVALID) {
|
||||
ASSERT(0);
|
||||
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
|
||||
WPP_BIT_FLAG_Firmware_PsrState,
|
||||
"Unable to get PSR state from FW.");
|
||||
} else
|
||||
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_VERBOSE,
|
||||
WPP_BIT_FLAG_Firmware_PsrState,
|
||||
"Got PSR state from FW. PSR state: %d, Retry count: %d",
|
||||
*state, retry_count);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -207,7 +218,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level, uint8_
|
|||
cmd.psr_set_level.header.sub_type = DMUB_CMD__PSR_SET_LEVEL;
|
||||
cmd.psr_set_level.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_level_data);
|
||||
cmd.psr_set_level.psr_set_level_data.psr_level = psr_level;
|
||||
cmd.psr_set_level.psr_set_level_data.cmd_version = PSR_VERSION_1;
|
||||
cmd.psr_set_level.psr_set_level_data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
|
||||
cmd.psr_set_level.psr_set_level_data.panel_inst = panel_inst;
|
||||
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
|
||||
dc_dmub_srv_cmd_execute(dc->dmub_srv);
|
||||
|
@ -293,7 +304,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
|
|||
copy_settings_data->debug.bitfields.use_hw_lock_mgr = 1;
|
||||
copy_settings_data->fec_enable_status = (link->fec_state == dc_link_fec_enabled);
|
||||
copy_settings_data->fec_enable_delay_in100us = link->dc->debug.fec_enable_delay_in100us;
|
||||
copy_settings_data->cmd_version = PSR_VERSION_1;
|
||||
copy_settings_data->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
|
||||
copy_settings_data->panel_inst = panel_inst;
|
||||
|
||||
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
|
||||
|
|
|
@ -121,6 +121,10 @@ struct dcn_hubbub_registers {
|
|||
uint32_t DCN_VM_AGP_BASE;
|
||||
uint32_t DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB;
|
||||
uint32_t DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB;
|
||||
uint32_t DCN_VM_FAULT_ADDR_MSB;
|
||||
uint32_t DCN_VM_FAULT_ADDR_LSB;
|
||||
uint32_t DCN_VM_FAULT_CNTL;
|
||||
uint32_t DCN_VM_FAULT_STATUS;
|
||||
uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_A;
|
||||
uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_B;
|
||||
uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_C;
|
||||
|
@ -233,7 +237,19 @@ struct dcn_hubbub_registers {
|
|||
type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C;\
|
||||
type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D;\
|
||||
type DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB;\
|
||||
type DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB
|
||||
type DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB;\
|
||||
type DCN_VM_FAULT_ADDR_MSB;\
|
||||
type DCN_VM_FAULT_ADDR_LSB;\
|
||||
type DCN_VM_ERROR_STATUS_CLEAR;\
|
||||
type DCN_VM_ERROR_STATUS_MODE;\
|
||||
type DCN_VM_ERROR_INTERRUPT_ENABLE;\
|
||||
type DCN_VM_RANGE_FAULT_DISABLE;\
|
||||
type DCN_VM_PRQ_FAULT_DISABLE;\
|
||||
type DCN_VM_ERROR_STATUS;\
|
||||
type DCN_VM_ERROR_VMID;\
|
||||
type DCN_VM_ERROR_TABLE_LEVEL;\
|
||||
type DCN_VM_ERROR_PIPE;\
|
||||
type DCN_VM_ERROR_INTERRUPT_STATUS
|
||||
|
||||
#define HUBBUB_STUTTER_REG_FIELD_LIST(type) \
|
||||
type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A;\
|
||||
|
@ -303,6 +319,7 @@ struct dcn_hubbub_registers {
|
|||
type DET3_SIZE_CURRENT;\
|
||||
type COMPBUF_SIZE;\
|
||||
type COMPBUF_SIZE_CURRENT;\
|
||||
type CONFIG_ERROR;\
|
||||
type COMPBUF_RESERVED_SPACE_64B;\
|
||||
type COMPBUF_RESERVED_SPACE_ZS;\
|
||||
type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A;\
|
||||
|
|
|
@ -871,6 +871,8 @@ void hubp1_read_state_common(struct hubp *hubp)
|
|||
struct _vcs_dpi_display_dlg_regs_st *dlg_attr = &s->dlg_attr;
|
||||
struct _vcs_dpi_display_ttu_regs_st *ttu_attr = &s->ttu_attr;
|
||||
struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
|
||||
uint32_t aperture_low_msb, aperture_low_lsb;
|
||||
uint32_t aperture_high_msb, aperture_high_lsb;
|
||||
|
||||
/* Requester */
|
||||
REG_GET(HUBPRET_CONTROL,
|
||||
|
@ -881,6 +883,22 @@ void hubp1_read_state_common(struct hubp *hubp)
|
|||
MRQ_EXPANSION_MODE, &rq_regs->mrq_expansion_mode,
|
||||
CRQ_EXPANSION_MODE, &rq_regs->crq_expansion_mode);
|
||||
|
||||
REG_GET(DCN_VM_SYSTEM_APERTURE_LOW_ADDR_MSB,
|
||||
MC_VM_SYSTEM_APERTURE_LOW_ADDR_MSB, &aperture_low_msb);
|
||||
|
||||
REG_GET(DCN_VM_SYSTEM_APERTURE_LOW_ADDR_LSB,
|
||||
MC_VM_SYSTEM_APERTURE_LOW_ADDR_LSB, &aperture_low_lsb);
|
||||
|
||||
REG_GET(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB,
|
||||
MC_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB, &aperture_high_msb);
|
||||
|
||||
REG_GET(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB,
|
||||
MC_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB, &aperture_high_lsb);
|
||||
|
||||
// On DCN1, aperture is broken down into MSB and LSB; only keep bits [47:18] to match later DCN format
|
||||
rq_regs->aperture_low_addr = (aperture_low_msb << 26) | (aperture_low_lsb >> 6);
|
||||
rq_regs->aperture_high_addr = (aperture_high_msb << 26) | (aperture_high_lsb >> 6);
|
||||
|
||||
/* DLG - Per hubp */
|
||||
REG_GET_2(BLANK_OFFSET_0,
|
||||
REFCYC_H_BLANK_END, &dlg_attr->refcyc_h_blank_end,
|
||||
|
@ -1037,6 +1055,17 @@ void hubp1_read_state_common(struct hubp *hubp)
|
|||
QoS_LEVEL_LOW_WM, &s->qos_level_low_wm,
|
||||
QoS_LEVEL_HIGH_WM, &s->qos_level_high_wm);
|
||||
|
||||
REG_GET(DCSURF_PRIMARY_SURFACE_ADDRESS,
|
||||
PRIMARY_SURFACE_ADDRESS, &s->primary_surface_addr_lo);
|
||||
|
||||
REG_GET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH,
|
||||
PRIMARY_SURFACE_ADDRESS, &s->primary_surface_addr_hi);
|
||||
|
||||
REG_GET(DCSURF_PRIMARY_META_SURFACE_ADDRESS,
|
||||
PRIMARY_META_SURFACE_ADDRESS, &s->primary_meta_addr_lo);
|
||||
|
||||
REG_GET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH,
|
||||
PRIMARY_META_SURFACE_ADDRESS, &s->primary_meta_addr_hi);
|
||||
}
|
||||
|
||||
void hubp1_read_state(struct hubp *hubp)
|
||||
|
|
|
@ -682,6 +682,10 @@ struct dcn_hubp_state {
|
|||
uint32_t min_ttu_vblank;
|
||||
uint32_t qos_level_low_wm;
|
||||
uint32_t qos_level_high_wm;
|
||||
uint32_t primary_surface_addr_lo;
|
||||
uint32_t primary_surface_addr_hi;
|
||||
uint32_t primary_meta_addr_lo;
|
||||
uint32_t primary_meta_addr_hi;
|
||||
};
|
||||
|
||||
struct dcn10_hubp {
|
||||
|
|
|
@ -1502,25 +1502,22 @@ void dcn10_init_hw(struct dc *dc)
|
|||
void dcn10_power_down_on_boot(struct dc *dc)
|
||||
{
|
||||
struct dc_link *edp_links[MAX_NUM_EDP];
|
||||
struct dc_link *edp_link;
|
||||
struct dc_link *edp_link = NULL;
|
||||
int edp_num;
|
||||
int i = 0;
|
||||
|
||||
get_edp_links(dc, edp_links, &edp_num);
|
||||
if (edp_num)
|
||||
edp_link = edp_links[0];
|
||||
|
||||
if (edp_num) {
|
||||
for (i = 0; i < edp_num; i++) {
|
||||
edp_link = edp_links[i];
|
||||
if (edp_link->link_enc->funcs->is_dig_enabled &&
|
||||
edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
|
||||
dc->hwseq->funcs.edp_backlight_control &&
|
||||
dc->hwss.power_down &&
|
||||
dc->hwss.edp_power_control) {
|
||||
dc->hwseq->funcs.edp_backlight_control(edp_link, false);
|
||||
dc->hwss.power_down(dc);
|
||||
dc->hwss.edp_power_control(edp_link, false);
|
||||
}
|
||||
}
|
||||
if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
|
||||
edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
|
||||
dc->hwseq->funcs.edp_backlight_control &&
|
||||
dc->hwss.power_down &&
|
||||
dc->hwss.edp_power_control) {
|
||||
dc->hwseq->funcs.edp_backlight_control(edp_link, false);
|
||||
dc->hwss.power_down(dc);
|
||||
dc->hwss.edp_power_control(edp_link, false);
|
||||
} else {
|
||||
for (i = 0; i < dc->link_count; i++) {
|
||||
struct dc_link *link = dc->links[i];
|
||||
|
@ -3180,8 +3177,12 @@ void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
|
|||
static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct pipe_ctx *test_pipe;
|
||||
const struct rect *r1 = &pipe_ctx->plane_res.scl_data.recout, *r2;
|
||||
const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
|
||||
const struct rect *r1 = &scl_data->recout, *r2;
|
||||
int r1_r = r1->x + r1->width, r1_b = r1->y + r1->height, r2_r, r2_b;
|
||||
int cur_layer = pipe_ctx->plane_state->layer_index;
|
||||
bool upper_pipe_exists = false;
|
||||
struct fixed31_32 one = dc_fixpt_from_int(1);
|
||||
|
||||
/**
|
||||
* Disable the cursor if there's another pipe above this with a
|
||||
|
@ -3199,8 +3200,17 @@ static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
|
|||
|
||||
if (r1->x >= r2->x && r1->y >= r2->y && r1_r <= r2_r && r1_b <= r2_b)
|
||||
return true;
|
||||
|
||||
if (test_pipe->plane_state->layer_index < cur_layer)
|
||||
upper_pipe_exists = true;
|
||||
}
|
||||
|
||||
// if plane scaled, assume an upper plane can handle cursor if it exists.
|
||||
if (upper_pipe_exists &&
|
||||
(scl_data->ratios.horz.value != one.value ||
|
||||
scl_data->ratios.vert.value != one.value))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -1388,6 +1388,12 @@ void optc1_read_otg_state(struct optc *optc1,
|
|||
|
||||
REG_GET(OPTC_INPUT_GLOBAL_CONTROL,
|
||||
OPTC_UNDERFLOW_OCCURRED_STATUS, &s->underflow_occurred_status);
|
||||
|
||||
REG_GET(OTG_VERTICAL_INTERRUPT2_CONTROL,
|
||||
OTG_VERTICAL_INTERRUPT2_INT_ENABLE, &s->vertical_interrupt2_en);
|
||||
|
||||
REG_GET(OTG_VERTICAL_INTERRUPT2_POSITION,
|
||||
OTG_VERTICAL_INTERRUPT2_LINE_START, &s->vertical_interrupt2_line);
|
||||
}
|
||||
|
||||
bool optc1_get_otg_active_size(struct timing_generator *optc,
|
||||
|
|
|
@ -578,6 +578,8 @@ struct dcn_otg_state {
|
|||
uint32_t underflow_occurred_status;
|
||||
uint32_t otg_enabled;
|
||||
uint32_t blank_enabled;
|
||||
uint32_t vertical_interrupt2_en;
|
||||
uint32_t vertical_interrupt2_line;
|
||||
};
|
||||
|
||||
void optc1_read_otg_state(struct optc *optc1,
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue