drm-next-fixes for 6.3-rc1
fbdev: - fix uninit var in error path shmem: - revert unGPLing an export i915: - Don't use stolen memory or BAR mappings for ring buffers with LLC - Add inverted backlight quirk for HP 14-r206nv - Fix GSI offset for MCR lookups - GVT fixes (memleak, debugfs attributes, kconfig, typos) amdgpu: - SMU 13 fixes - Enable TMZ for GC 10.3.6 - Misc display fixes - Buddy allocator fixes - GC 11 fixes - S0ix fix - INFO IOCTL queries for GC 11 - VCN harvest fixes for SR-IOV - UMC 8.10 RAS fixes - Don't restrict bpc to 8 - NBIO 7.5 fix - Allow freesync on PCon for more devices amdkfd: - SDMA fix - Illegal memory access fix -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmQBK9MACgkQDHTzWXnE hr64mhAAmIxLes1RHMDPPYNBcYQY80YVnc5qoAJvwoU50QMkw/UvUDaQyhWk7gGp QjrpeFe3mn+T+hkOqYi27UHvRdeWlE2VKOFpKZIkjEg8lXSaPhDrY6og4EH92GSN QUM1QadI4j4OpGbnBfT1RFGqedPsxmEBwWGN24ze20FD40bcI1FGE3ljhxfzvNNs kENR3yuP64vxvXOZn0NIIn4+lVtl2gsZf+NW6w/de00YUQ3h8CpyNSf8yOYgV802 1OxIyPpH9+RGjX1oiOQMCBdln0jJFEzxoDBXMhbYdLsCpo/3lWKVp64mzpEnT+8W mF87h1IUckgRSIoZv+aJbOoN9hpdBBldRY96FTJPJ0bLe10oha29gjgIlhmW2KEF /2WTwWUNFK2fWGrDiuuGlqP7iGSK1jchE6kjE+LYb49/LlMcPU3Ztye26IeUOotN 3GAYdUJh9lQYusDsIY1x7Vy1C1+LZkADFsE4Vi7eBZoUPIRrgUcN1fWs5cQBmP3D DGV/loVuArbuYPjlxtBg4ngCelUXmanmcTdop3192ifzhVZugMCFbbcl+qrlMPsC +NwYP/tBrkv7+T//VrgcE+iiYQqkY6bSSKFPplMMe433jgKIKok62g5K+TY38Ds3 zgO6v38+chx+yTtasUPM5du5ouf6X4DFiJKJEmKSRGOjaZYiY5o= =Ib+n -----END PGP SIGNATURE----- Merge tag 'drm-next-2023-03-03-1' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "fbdev: - fix uninit var in error path shmem: - revert unGPLing an export i915: - Don't use stolen memory or BAR mappings for ring buffers with LLC - Add inverted backlight quirk for HP 14-r206nv - Fix GSI offset for MCR lookups - GVT fixes (memleak, debugfs attributes, kconfig, typos) amdgpu: - SMU 13 fixes - Enable TMZ for GC 10.3.6 - Misc display fixes - Buddy allocator fixes - GC 11 fixes - S0ix fix - INFO IOCTL queries for GC 11 - VCN harvest fixes for SR-IOV - UMC 8.10 RAS fixes - Don't restrict bpc to 8 - NBIO 7.5 fix - Allow freesync on PCon for more devices amdkfd: - SDMA fix - Illegal memory access fix" * tag 'drm-next-2023-03-03-1' of git://anongit.freedesktop.org/drm/drm: (45 commits) drm/amdgpu/vcn: fix compilation issue with legacy gcc drm/amd/display: Extend Freesync over PCon support for more devices Revert "drm/amd/display: Do not set DRR on pipe commit" drm/amd/display: fix shift-out-of-bounds in CalculateVMAndRowBytes drm/amd/display: Ext displays with dock can't recognized after resume drm/amdgpu: fix ttm_bo calltrace warning in psp_hw_fini drm/amdgpu: remove unused variable ring drm/amd/display: fix dm irq error message in gpu recover drm/amd: Fix initialization for nbio 7.5.1 drm/amd/display: Don't restrict bpc to 8 bpc drm/amdgpu: Make umc_v8_10_convert_error_address static and remove unused variable drm/radeon: Fix eDP for single-display iMac11,2 drm/shmem-helper: Revert accidental non-GPL export drm: omapdrm: Do not use helper unininitialized in omap_fbdev_init() drm/amd/pm: downgrade log level upon SMU IF version mismatch drm/amdgpu: Add ecc info query interface for umc v8_10 drm/amdgpu: Add convert_error_address function for umc v8_10 drm/amdgpu: add bad_page_threshold check in ras_eeprom_check_err drm/amdgpu: change default behavior of bad_page_threshold parameter drm/amdgpu: exclude duplicate pages from UMC RAS UE count ...
This commit is contained in:
commit
2eb29d59dd
|
@ -6,6 +6,7 @@ config DRM_AMDGPU
|
|||
select FW_LOADER
|
||||
select DRM_DISPLAY_DP_HELPER
|
||||
select DRM_DISPLAY_HDMI_HELPER
|
||||
select DRM_DISPLAY_HDCP_HELPER
|
||||
select DRM_DISPLAY_HELPER
|
||||
select DRM_KMS_HELPER
|
||||
select DRM_SCHED
|
||||
|
|
|
@ -1073,6 +1073,9 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
|
|||
(pm_suspend_target_state != PM_SUSPEND_TO_IDLE))
|
||||
return false;
|
||||
|
||||
if (adev->asic_type < CHIP_RAVEN)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* If ACPI_FADT_LOW_POWER_S0 is not set in the FADT, it is generally
|
||||
* risky to do any special firmware-related preparations for entering
|
||||
|
|
|
@ -107,9 +107,12 @@
|
|||
* - 3.50.0 - Update AMDGPU_INFO_DEV_INFO IOCTL for minimum engine and memory clock
|
||||
* Update AMDGPU_INFO_SENSOR IOCTL for PEAK_PSTATE engine and memory clock
|
||||
* 3.51.0 - Return the PCIe gen and lanes from the INFO ioctl
|
||||
* 3.52.0 - Add AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD, add device_info fields:
|
||||
* tcp_cache_size, num_sqc_per_wgp, sqc_data_cache_size, sqc_inst_cache_size,
|
||||
* gl1c_cache_size, gl2c_cache_size, mall_size, enabled_rb_pipes_mask_hi
|
||||
*/
|
||||
#define KMS_DRIVER_MAJOR 3
|
||||
#define KMS_DRIVER_MINOR 51
|
||||
#define KMS_DRIVER_MINOR 52
|
||||
#define KMS_DRIVER_PATCHLEVEL 0
|
||||
|
||||
unsigned int amdgpu_vram_limit = UINT_MAX;
|
||||
|
@ -921,7 +924,7 @@ module_param_named(reset_method, amdgpu_reset_method, int, 0444);
|
|||
* result in the GPU entering bad status when the number of total
|
||||
* faulty pages by ECC exceeds the threshold value.
|
||||
*/
|
||||
MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = auto(default value), 0 = disable bad page retirement, -2 = ignore bad page threshold)");
|
||||
MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = ignore threshold (default value), 0 = disable bad page retirement, -2 = driver sets threshold)");
|
||||
module_param_named(bad_page_threshold, amdgpu_bad_page_threshold, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(num_kcq, "number of kernel compute queue user want to setup (8 if set to greater than 8 or less than 0, only affect gfx 8+)");
|
||||
|
@ -2414,8 +2417,10 @@ static int amdgpu_pmops_suspend(struct device *dev)
|
|||
|
||||
if (amdgpu_acpi_is_s0ix_active(adev))
|
||||
adev->in_s0ix = true;
|
||||
else
|
||||
else if (amdgpu_acpi_is_s3_active(adev))
|
||||
adev->in_s3 = true;
|
||||
if (!adev->in_s0ix && !adev->in_s3)
|
||||
return 0;
|
||||
return amdgpu_device_suspend(drm_dev, true);
|
||||
}
|
||||
|
||||
|
@ -2436,6 +2441,9 @@ static int amdgpu_pmops_resume(struct device *dev)
|
|||
struct amdgpu_device *adev = drm_to_adev(drm_dev);
|
||||
int r;
|
||||
|
||||
if (!adev->in_s0ix && !adev->in_s3)
|
||||
return 0;
|
||||
|
||||
/* Avoids registers access if device is physically gone */
|
||||
if (!pci_device_is_present(adev->pdev))
|
||||
adev->no_hw_access = true;
|
||||
|
|
|
@ -178,6 +178,8 @@ struct amdgpu_gfx_config {
|
|||
uint32_t num_sc_per_sh;
|
||||
uint32_t num_packer_per_sc;
|
||||
uint32_t pa_sc_tile_steering_override;
|
||||
/* Whether texture coordinate truncation is conformant. */
|
||||
bool ta_cntl2_truncate_coord_mode;
|
||||
uint64_t tcc_disabled_mask;
|
||||
uint32_t gc_num_tcp_per_sa;
|
||||
uint32_t gc_num_sdp_interface;
|
||||
|
|
|
@ -552,6 +552,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
|
|||
case IP_VERSION(10, 3, 2):
|
||||
case IP_VERSION(10, 3, 4):
|
||||
case IP_VERSION(10, 3, 5):
|
||||
case IP_VERSION(10, 3, 6):
|
||||
/* VANGOGH */
|
||||
case IP_VERSION(10, 3, 1):
|
||||
/* YELLOW_CARP*/
|
||||
|
|
|
@ -808,6 +808,8 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
|
||||
if (amdgpu_is_tmz(adev))
|
||||
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_TMZ;
|
||||
if (adev->gfx.config.ta_cntl2_truncate_coord_mode)
|
||||
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD;
|
||||
|
||||
vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
|
||||
vm_size -= AMDGPU_VA_RESERVED_SIZE;
|
||||
|
@ -865,6 +867,15 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 ? 4 :
|
||||
adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 ? 2 : 1;
|
||||
|
||||
dev_info->tcp_cache_size = adev->gfx.config.gc_tcp_l1_size;
|
||||
dev_info->num_sqc_per_wgp = adev->gfx.config.gc_num_sqc_per_wgp;
|
||||
dev_info->sqc_data_cache_size = adev->gfx.config.gc_l1_data_cache_size_per_sqc;
|
||||
dev_info->sqc_inst_cache_size = adev->gfx.config.gc_l1_instruction_cache_size_per_sqc;
|
||||
dev_info->gl1c_cache_size = adev->gfx.config.gc_gl1c_size_per_instance *
|
||||
adev->gfx.config.gc_gl1c_per_sa;
|
||||
dev_info->gl2c_cache_size = adev->gfx.config.gc_gl2c_per_gpu;
|
||||
dev_info->mall_size = adev->gmc.mall_size;
|
||||
|
||||
ret = copy_to_user(out, dev_info,
|
||||
min((size_t)size, sizeof(*dev_info))) ? -EFAULT : 0;
|
||||
kfree(dev_info);
|
||||
|
|
|
@ -139,7 +139,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
|
|||
|
||||
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
|
||||
places[c].lpfn = visible_pfn;
|
||||
else
|
||||
else if (adev->gmc.real_vram_size != adev->gmc.visible_vram_size)
|
||||
places[c].flags |= TTM_PL_FLAG_TOPDOWN;
|
||||
|
||||
if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
|
||||
|
|
|
@ -1683,7 +1683,7 @@ static int psp_hdcp_initialize(struct psp_context *psp)
|
|||
psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
|
||||
psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
|
||||
|
||||
if (!psp->hdcp_context.context.initialized) {
|
||||
if (!psp->hdcp_context.context.mem_context.shared_buf) {
|
||||
ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1750,7 +1750,7 @@ static int psp_dtm_initialize(struct psp_context *psp)
|
|||
psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
|
||||
psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
|
||||
|
||||
if (!psp->dtm_context.context.initialized) {
|
||||
if (!psp->dtm_context.context.mem_context.shared_buf) {
|
||||
ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1818,7 +1818,7 @@ static int psp_rap_initialize(struct psp_context *psp)
|
|||
psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
|
||||
psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
|
||||
|
||||
if (!psp->rap_context.context.initialized) {
|
||||
if (!psp->rap_context.context.mem_context.shared_buf) {
|
||||
ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
|
|
@ -176,7 +176,7 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre
|
|||
if (amdgpu_bad_page_threshold != 0) {
|
||||
amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
|
||||
err_data.err_addr_cnt);
|
||||
amdgpu_ras_save_bad_pages(adev);
|
||||
amdgpu_ras_save_bad_pages(adev, NULL);
|
||||
}
|
||||
|
||||
dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
|
||||
|
@ -2084,22 +2084,32 @@ out:
|
|||
/*
|
||||
* write error record array to eeprom, the function should be
|
||||
* protected by recovery_lock
|
||||
* new_cnt: new added UE count, excluding reserved bad pages, can be NULL
|
||||
*/
|
||||
int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
|
||||
int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
|
||||
unsigned long *new_cnt)
|
||||
{
|
||||
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
||||
struct ras_err_handler_data *data;
|
||||
struct amdgpu_ras_eeprom_control *control;
|
||||
int save_count;
|
||||
|
||||
if (!con || !con->eh_data)
|
||||
if (!con || !con->eh_data) {
|
||||
if (new_cnt)
|
||||
*new_cnt = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
mutex_lock(&con->recovery_lock);
|
||||
control = &con->eeprom_control;
|
||||
data = con->eh_data;
|
||||
save_count = data->count - control->ras_num_recs;
|
||||
mutex_unlock(&con->recovery_lock);
|
||||
|
||||
if (new_cnt)
|
||||
*new_cnt = save_count / adev->umc.retire_unit;
|
||||
|
||||
/* only new entries are saved */
|
||||
if (save_count > 0) {
|
||||
if (amdgpu_ras_eeprom_append(control,
|
||||
|
@ -2186,11 +2196,12 @@ static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
|
|||
/*
|
||||
* Justification of value bad_page_cnt_threshold in ras structure
|
||||
*
|
||||
* Generally, -1 <= amdgpu_bad_page_threshold <= max record length
|
||||
* in eeprom, and introduce two scenarios accordingly.
|
||||
* Generally, 0 <= amdgpu_bad_page_threshold <= max record length
|
||||
* in eeprom or amdgpu_bad_page_threshold == -2, introduce two
|
||||
* scenarios accordingly.
|
||||
*
|
||||
* Bad page retirement enablement:
|
||||
* - If amdgpu_bad_page_threshold = -1,
|
||||
* - If amdgpu_bad_page_threshold = -2,
|
||||
* bad_page_cnt_threshold = typical value by formula.
|
||||
*
|
||||
* - When the value from user is 0 < amdgpu_bad_page_threshold <
|
||||
|
|
|
@ -547,7 +547,8 @@ int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
|
|||
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
|
||||
struct eeprom_table_record *bps, int pages);
|
||||
|
||||
int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev);
|
||||
int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
|
||||
unsigned long *new_cnt);
|
||||
|
||||
static inline enum ta_ras_block
|
||||
amdgpu_ras_block_to_ta(enum amdgpu_ras_block block) {
|
||||
|
|
|
@ -417,7 +417,8 @@ bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev)
|
|||
{
|
||||
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
||||
|
||||
if (!__is_ras_eeprom_supported(adev))
|
||||
if (!__is_ras_eeprom_supported(adev) ||
|
||||
!amdgpu_bad_page_threshold)
|
||||
return false;
|
||||
|
||||
/* skip check eeprom table for VEGA20 Gaming */
|
||||
|
@ -428,10 +429,18 @@ bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev)
|
|||
return false;
|
||||
|
||||
if (con->eeprom_control.tbl_hdr.header == RAS_TABLE_HDR_BAD) {
|
||||
dev_warn(adev->dev, "This GPU is in BAD status.");
|
||||
dev_warn(adev->dev, "Please retire it or set a larger "
|
||||
"threshold value when reloading driver.\n");
|
||||
return true;
|
||||
if (amdgpu_bad_page_threshold == -1) {
|
||||
dev_warn(adev->dev, "RAS records:%d exceed threshold:%d",
|
||||
con->eeprom_control.ras_num_recs, con->bad_page_cnt_threshold);
|
||||
dev_warn(adev->dev,
|
||||
"But GPU can be operated due to bad_page_threshold = -1.\n");
|
||||
return false;
|
||||
} else {
|
||||
dev_warn(adev->dev, "This GPU is in BAD status.");
|
||||
dev_warn(adev->dev, "Please retire it or set a larger "
|
||||
"threshold value when reloading driver.\n");
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
|
@ -1191,8 +1200,8 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control,
|
|||
} else {
|
||||
dev_err(adev->dev, "RAS records:%d exceed threshold:%d",
|
||||
control->ras_num_recs, ras->bad_page_cnt_threshold);
|
||||
if (amdgpu_bad_page_threshold == -2) {
|
||||
dev_warn(adev->dev, "GPU will be initialized due to bad_page_threshold = -2.");
|
||||
if (amdgpu_bad_page_threshold == -1) {
|
||||
dev_warn(adev->dev, "GPU will be initialized due to bad_page_threshold = -1.");
|
||||
res = 0;
|
||||
} else {
|
||||
*exceed_err_limit = true;
|
||||
|
|
|
@ -68,7 +68,7 @@ int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev,
|
|||
if (amdgpu_bad_page_threshold != 0) {
|
||||
amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
|
||||
err_data.err_addr_cnt);
|
||||
amdgpu_ras_save_bad_pages(adev);
|
||||
amdgpu_ras_save_bad_pages(adev, NULL);
|
||||
}
|
||||
|
||||
out:
|
||||
|
@ -147,7 +147,7 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
|
|||
err_data->err_addr_cnt) {
|
||||
amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
|
||||
err_data->err_addr_cnt);
|
||||
amdgpu_ras_save_bad_pages(adev);
|
||||
amdgpu_ras_save_bad_pages(adev, &(err_data->ue_count));
|
||||
|
||||
amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
|
||||
|
||||
|
|
|
@ -74,6 +74,8 @@ struct amdgpu_umc {
|
|||
|
||||
/* UMC regiser per channel offset */
|
||||
uint32_t channel_offs;
|
||||
/* how many pages are retired in one UE */
|
||||
uint32_t retire_unit;
|
||||
/* channel index table of interleaved memory */
|
||||
const uint32_t *channel_idx_tbl;
|
||||
struct ras_common_if *ras_if;
|
||||
|
|
|
@ -453,7 +453,8 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
|
|||
/* Limit maximum size to 2GiB due to SG table limitations */
|
||||
size = min(remaining_size, 2ULL << 30);
|
||||
|
||||
if (size >= (u64)pages_per_block << PAGE_SHIFT)
|
||||
if ((size >= (u64)pages_per_block << PAGE_SHIFT) &&
|
||||
!(size & (((u64)pages_per_block << PAGE_SHIFT) - 1)))
|
||||
min_block_size = (u64)pages_per_block << PAGE_SHIFT;
|
||||
|
||||
cur_size = size;
|
||||
|
|
|
@ -1503,44 +1503,70 @@ static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
|
|||
WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, data);
|
||||
}
|
||||
|
||||
static u32 gfx_v11_0_get_sa_active_bitmap(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 gc_disabled_sa_mask, gc_user_disabled_sa_mask, sa_mask;
|
||||
|
||||
gc_disabled_sa_mask = RREG32_SOC15(GC, 0, regCC_GC_SA_UNIT_DISABLE);
|
||||
gc_disabled_sa_mask = REG_GET_FIELD(gc_disabled_sa_mask,
|
||||
CC_GC_SA_UNIT_DISABLE,
|
||||
SA_DISABLE);
|
||||
gc_user_disabled_sa_mask = RREG32_SOC15(GC, 0, regGC_USER_SA_UNIT_DISABLE);
|
||||
gc_user_disabled_sa_mask = REG_GET_FIELD(gc_user_disabled_sa_mask,
|
||||
GC_USER_SA_UNIT_DISABLE,
|
||||
SA_DISABLE);
|
||||
sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se *
|
||||
adev->gfx.config.max_shader_engines);
|
||||
|
||||
return sa_mask & (~(gc_disabled_sa_mask | gc_user_disabled_sa_mask));
|
||||
}
|
||||
|
||||
static u32 gfx_v11_0_get_rb_active_bitmap(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 data, mask;
|
||||
u32 gc_disabled_rb_mask, gc_user_disabled_rb_mask;
|
||||
u32 rb_mask;
|
||||
|
||||
data = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE);
|
||||
data |= RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE);
|
||||
gc_disabled_rb_mask = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE);
|
||||
gc_disabled_rb_mask = REG_GET_FIELD(gc_disabled_rb_mask,
|
||||
CC_RB_BACKEND_DISABLE,
|
||||
BACKEND_DISABLE);
|
||||
gc_user_disabled_rb_mask = RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE);
|
||||
gc_user_disabled_rb_mask = REG_GET_FIELD(gc_user_disabled_rb_mask,
|
||||
GC_USER_RB_BACKEND_DISABLE,
|
||||
BACKEND_DISABLE);
|
||||
rb_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se *
|
||||
adev->gfx.config.max_shader_engines);
|
||||
|
||||
data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
|
||||
data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
|
||||
|
||||
mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
|
||||
adev->gfx.config.max_sh_per_se);
|
||||
|
||||
return (~data) & mask;
|
||||
return rb_mask & (~(gc_disabled_rb_mask | gc_user_disabled_rb_mask));
|
||||
}
|
||||
|
||||
static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, j;
|
||||
u32 data;
|
||||
u32 active_rbs = 0;
|
||||
u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
|
||||
adev->gfx.config.max_sh_per_se;
|
||||
u32 rb_bitmap_width_per_sa;
|
||||
u32 max_sa;
|
||||
u32 active_sa_bitmap;
|
||||
u32 global_active_rb_bitmap;
|
||||
u32 active_rb_bitmap = 0;
|
||||
u32 i;
|
||||
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
|
||||
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
|
||||
gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff);
|
||||
data = gfx_v11_0_get_rb_active_bitmap(adev);
|
||||
active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
|
||||
rb_bitmap_width_per_sh);
|
||||
}
|
||||
/* query sa bitmap from SA_UNIT_DISABLE registers */
|
||||
active_sa_bitmap = gfx_v11_0_get_sa_active_bitmap(adev);
|
||||
/* query rb bitmap from RB_BACKEND_DISABLE registers */
|
||||
global_active_rb_bitmap = gfx_v11_0_get_rb_active_bitmap(adev);
|
||||
|
||||
/* generate active rb bitmap according to active sa bitmap */
|
||||
max_sa = adev->gfx.config.max_shader_engines *
|
||||
adev->gfx.config.max_sh_per_se;
|
||||
rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se /
|
||||
adev->gfx.config.max_sh_per_se;
|
||||
for (i = 0; i < max_sa; i++) {
|
||||
if (active_sa_bitmap & (1 << i))
|
||||
active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
|
||||
}
|
||||
gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
|
||||
adev->gfx.config.backend_enable_mask = active_rbs;
|
||||
adev->gfx.config.num_rbs = hweight32(active_rbs);
|
||||
active_rb_bitmap |= global_active_rb_bitmap;
|
||||
adev->gfx.config.backend_enable_mask = active_rb_bitmap;
|
||||
adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
|
||||
}
|
||||
|
||||
#define DEFAULT_SH_MEM_BASES (0x6000)
|
||||
|
@ -1633,6 +1659,11 @@ static void gfx_v11_0_constants_init(struct amdgpu_device *adev)
|
|||
gfx_v11_0_get_tcc_info(adev);
|
||||
adev->gfx.config.pa_sc_tile_steering_override = 0;
|
||||
|
||||
/* Set whether texture coordinate truncation is conformant. */
|
||||
tmp = RREG32_SOC15(GC, 0, regTA_CNTL2);
|
||||
adev->gfx.config.ta_cntl2_truncate_coord_mode =
|
||||
REG_GET_FIELD(tmp, TA_CNTL2, TRUNCATE_COORD_MODE);
|
||||
|
||||
/* XXX SH_MEM regs */
|
||||
/* where to put LDS, scratch, GPUVM in FSA64 space */
|
||||
mutex_lock(&adev->srbm_mutex);
|
||||
|
|
|
@ -692,6 +692,7 @@ static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
|
|||
adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM;
|
||||
adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM;
|
||||
adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA;
|
||||
adev->umc.retire_unit = 1;
|
||||
adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0];
|
||||
adev->umc.ras = &umc_v8_7_ras;
|
||||
break;
|
||||
|
|
|
@ -570,6 +570,7 @@ static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev)
|
|||
adev->umc.node_inst_num = adev->gmc.num_umc;
|
||||
adev->umc.max_ras_err_cnt_per_query = UMC_V8_10_TOTAL_CHANNEL_NUM(adev);
|
||||
adev->umc.channel_offs = UMC_V8_10_PER_CHANNEL_OFFSET;
|
||||
adev->umc.retire_unit = UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM;
|
||||
if (adev->umc.node_inst_num == 4)
|
||||
adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl_ext0[0][0][0];
|
||||
else
|
||||
|
|
|
@ -1288,6 +1288,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
|
|||
adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
|
||||
adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
|
||||
adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
|
||||
adev->umc.retire_unit = 1;
|
||||
adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
|
||||
adev->umc.ras = &umc_v6_1_ras;
|
||||
break;
|
||||
|
@ -1296,6 +1297,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
|
|||
adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
|
||||
adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
|
||||
adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
|
||||
adev->umc.retire_unit = 1;
|
||||
adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
|
||||
adev->umc.ras = &umc_v6_1_ras;
|
||||
break;
|
||||
|
@ -1305,6 +1307,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
|
|||
adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM;
|
||||
adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM;
|
||||
adev->umc.channel_offs = UMC_V6_7_PER_CHANNEL_OFFSET;
|
||||
adev->umc.retire_unit = (UMC_V6_7_NA_MAP_PA_NUM * 2);
|
||||
if (!adev->gmc.xgmi.connected_to_cpu)
|
||||
adev->umc.ras = &umc_v6_7_ras;
|
||||
if (1 & adev->smuio.funcs->get_die_id(adev))
|
||||
|
|
|
@ -382,6 +382,11 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev)
|
|||
if (def != data)
|
||||
WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_MST_CTRL_3), data);
|
||||
break;
|
||||
case IP_VERSION(7, 5, 1):
|
||||
data = RREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2);
|
||||
data &= ~RCC_DEV2_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV2_F0_MASK;
|
||||
WREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2, data);
|
||||
fallthrough;
|
||||
default:
|
||||
def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL));
|
||||
data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL,
|
||||
|
|
|
@ -209,6 +209,45 @@ static int umc_v8_10_swizzle_mode_na_to_pa(struct amdgpu_device *adev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void umc_v8_10_convert_error_address(struct amdgpu_device *adev,
|
||||
struct ras_err_data *err_data, uint64_t err_addr,
|
||||
uint32_t ch_inst, uint32_t umc_inst,
|
||||
uint32_t node_inst, uint64_t mc_umc_status)
|
||||
{
|
||||
uint64_t na_err_addr_base;
|
||||
uint64_t na_err_addr, retired_page_addr;
|
||||
uint32_t channel_index, addr_lsb, col = 0;
|
||||
int ret = 0;
|
||||
|
||||
channel_index =
|
||||
adev->umc.channel_idx_tbl[node_inst * adev->umc.umc_inst_num *
|
||||
adev->umc.channel_inst_num +
|
||||
umc_inst * adev->umc.channel_inst_num +
|
||||
ch_inst];
|
||||
|
||||
/* the lowest lsb bits should be ignored */
|
||||
addr_lsb = REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrLsb);
|
||||
err_addr &= ~((0x1ULL << addr_lsb) - 1);
|
||||
na_err_addr_base = err_addr & ~(0x3ULL << UMC_V8_10_NA_C5_BIT);
|
||||
|
||||
/* loop for all possibilities of [C6 C5] in normal address. */
|
||||
for (col = 0; col < UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM; col++) {
|
||||
na_err_addr = na_err_addr_base | (col << UMC_V8_10_NA_C5_BIT);
|
||||
|
||||
/* Mapping normal error address to retired soc physical address. */
|
||||
ret = umc_v8_10_swizzle_mode_na_to_pa(adev, channel_index,
|
||||
na_err_addr, &retired_page_addr);
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "Failed to map pa from umc na.\n");
|
||||
break;
|
||||
}
|
||||
dev_info(adev->dev, "Error Address(PA): 0x%llx\n",
|
||||
retired_page_addr);
|
||||
amdgpu_umc_fill_error_record(err_data, na_err_addr,
|
||||
retired_page_addr, channel_index, umc_inst);
|
||||
}
|
||||
}
|
||||
|
||||
static void umc_v8_10_query_error_address(struct amdgpu_device *adev,
|
||||
struct ras_err_data *err_data,
|
||||
uint32_t umc_reg_offset,
|
||||
|
@ -218,10 +257,7 @@ static void umc_v8_10_query_error_address(struct amdgpu_device *adev,
|
|||
{
|
||||
uint64_t mc_umc_status_addr;
|
||||
uint64_t mc_umc_status, err_addr;
|
||||
uint64_t mc_umc_addrt0, na_err_addr_base;
|
||||
uint64_t na_err_addr, retired_page_addr;
|
||||
uint32_t channel_index, addr_lsb, col = 0;
|
||||
int ret = 0;
|
||||
uint64_t mc_umc_addrt0;
|
||||
|
||||
mc_umc_status_addr =
|
||||
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
|
||||
|
@ -236,12 +272,6 @@ static void umc_v8_10_query_error_address(struct amdgpu_device *adev,
|
|||
return;
|
||||
}
|
||||
|
||||
channel_index =
|
||||
adev->umc.channel_idx_tbl[node_inst * adev->umc.umc_inst_num *
|
||||
adev->umc.channel_inst_num +
|
||||
umc_inst * adev->umc.channel_inst_num +
|
||||
ch_inst];
|
||||
|
||||
/* calculate error address if ue error is detected */
|
||||
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
|
||||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrV) == 1 &&
|
||||
|
@ -251,27 +281,8 @@ static void umc_v8_10_query_error_address(struct amdgpu_device *adev,
|
|||
err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
|
||||
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
|
||||
|
||||
/* the lowest lsb bits should be ignored */
|
||||
addr_lsb = REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrLsb);
|
||||
err_addr &= ~((0x1ULL << addr_lsb) - 1);
|
||||
na_err_addr_base = err_addr & ~(0x3ULL << UMC_V8_10_NA_C5_BIT);
|
||||
|
||||
/* loop for all possibilities of [C6 C5] in normal address. */
|
||||
for (col = 0; col < UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM; col++) {
|
||||
na_err_addr = na_err_addr_base | (col << UMC_V8_10_NA_C5_BIT);
|
||||
|
||||
/* Mapping normal error address to retired soc physical address. */
|
||||
ret = umc_v8_10_swizzle_mode_na_to_pa(adev, channel_index,
|
||||
na_err_addr, &retired_page_addr);
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "Failed to map pa from umc na.\n");
|
||||
break;
|
||||
}
|
||||
dev_info(adev->dev, "Error Address(PA): 0x%llx\n",
|
||||
retired_page_addr);
|
||||
amdgpu_umc_fill_error_record(err_data, na_err_addr,
|
||||
retired_page_addr, channel_index, umc_inst);
|
||||
}
|
||||
umc_v8_10_convert_error_address(adev, err_data, err_addr,
|
||||
ch_inst, umc_inst, node_inst, mc_umc_status);
|
||||
}
|
||||
|
||||
/* clear umc status */
|
||||
|
@ -349,6 +360,133 @@ static bool umc_v8_10_query_ras_poison_mode(struct amdgpu_device *adev)
|
|||
return true;
|
||||
}
|
||||
|
||||
static void umc_v8_10_ecc_info_query_correctable_error_count(struct amdgpu_device *adev,
|
||||
uint32_t node_inst, uint32_t umc_inst, uint32_t ch_inst,
|
||||
unsigned long *error_count)
|
||||
{
|
||||
uint64_t mc_umc_status;
|
||||
uint32_t eccinfo_table_idx;
|
||||
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
|
||||
|
||||
eccinfo_table_idx = node_inst * adev->umc.umc_inst_num *
|
||||
adev->umc.channel_inst_num +
|
||||
umc_inst * adev->umc.channel_inst_num +
|
||||
ch_inst;
|
||||
|
||||
/* check the MCUMC_STATUS */
|
||||
mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
|
||||
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
|
||||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) {
|
||||
*error_count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
static void umc_v8_10_ecc_info_query_uncorrectable_error_count(struct amdgpu_device *adev,
|
||||
uint32_t node_inst, uint32_t umc_inst, uint32_t ch_inst,
|
||||
unsigned long *error_count)
|
||||
{
|
||||
uint64_t mc_umc_status;
|
||||
uint32_t eccinfo_table_idx;
|
||||
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
|
||||
|
||||
eccinfo_table_idx = node_inst * adev->umc.umc_inst_num *
|
||||
adev->umc.channel_inst_num +
|
||||
umc_inst * adev->umc.channel_inst_num +
|
||||
ch_inst;
|
||||
|
||||
/* check the MCUMC_STATUS */
|
||||
mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
|
||||
if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
|
||||
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
|
||||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
|
||||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
|
||||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
|
||||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)) {
|
||||
*error_count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
static void umc_v8_10_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
|
||||
void *ras_error_status)
|
||||
{
|
||||
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
|
||||
|
||||
uint32_t node_inst = 0;
|
||||
uint32_t umc_inst = 0;
|
||||
uint32_t ch_inst = 0;
|
||||
|
||||
/* TODO: driver needs to toggle DF Cstate to ensure
|
||||
* safe access of UMC registers. Will add the protection
|
||||
*/
|
||||
LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) {
|
||||
umc_v8_10_ecc_info_query_correctable_error_count(adev,
|
||||
node_inst, umc_inst, ch_inst,
|
||||
&(err_data->ce_count));
|
||||
umc_v8_10_ecc_info_query_uncorrectable_error_count(adev,
|
||||
node_inst, umc_inst, ch_inst,
|
||||
&(err_data->ue_count));
|
||||
}
|
||||
}
|
||||
|
||||
static void umc_v8_10_ecc_info_query_error_address(struct amdgpu_device *adev,
|
||||
struct ras_err_data *err_data,
|
||||
uint32_t ch_inst,
|
||||
uint32_t umc_inst,
|
||||
uint32_t node_inst)
|
||||
{
|
||||
uint32_t eccinfo_table_idx;
|
||||
uint64_t mc_umc_status, err_addr;
|
||||
|
||||
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
|
||||
|
||||
eccinfo_table_idx = node_inst * adev->umc.umc_inst_num *
|
||||
adev->umc.channel_inst_num +
|
||||
umc_inst * adev->umc.channel_inst_num +
|
||||
ch_inst;
|
||||
|
||||
mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
|
||||
|
||||
if (mc_umc_status == 0)
|
||||
return;
|
||||
|
||||
if (!err_data->err_addr)
|
||||
return;
|
||||
|
||||
/* calculate error address if ue error is detected */
|
||||
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
|
||||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrV) == 1 &&
|
||||
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1)) {
|
||||
|
||||
err_addr = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_addr;
|
||||
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
|
||||
|
||||
umc_v8_10_convert_error_address(adev, err_data, err_addr,
|
||||
ch_inst, umc_inst, node_inst, mc_umc_status);
|
||||
}
|
||||
}
|
||||
|
||||
static void umc_v8_10_ecc_info_query_ras_error_address(struct amdgpu_device *adev,
|
||||
void *ras_error_status)
|
||||
{
|
||||
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
|
||||
|
||||
uint32_t node_inst = 0;
|
||||
uint32_t umc_inst = 0;
|
||||
uint32_t ch_inst = 0;
|
||||
|
||||
/* TODO: driver needs to toggle DF Cstate to ensure
|
||||
* safe access of UMC resgisters. Will add the protection
|
||||
* when firmware interface is ready
|
||||
*/
|
||||
LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) {
|
||||
umc_v8_10_ecc_info_query_error_address(adev,
|
||||
err_data,
|
||||
ch_inst,
|
||||
umc_inst,
|
||||
node_inst);
|
||||
}
|
||||
}
|
||||
|
||||
const struct amdgpu_ras_block_hw_ops umc_v8_10_ras_hw_ops = {
|
||||
.query_ras_error_count = umc_v8_10_query_ras_error_count,
|
||||
.query_ras_error_address = umc_v8_10_query_ras_error_address,
|
||||
|
@ -360,4 +498,6 @@ struct amdgpu_umc_ras umc_v8_10_ras = {
|
|||
},
|
||||
.err_cnt_init = umc_v8_10_err_cnt_init,
|
||||
.query_ras_poison_mode = umc_v8_10_query_ras_poison_mode,
|
||||
.ecc_info_query_ras_error_count = umc_v8_10_ecc_info_query_ras_error_count,
|
||||
.ecc_info_query_ras_error_address = umc_v8_10_ecc_info_query_ras_error_address,
|
||||
};
|
||||
|
|
|
@ -78,9 +78,17 @@ static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev);
|
|||
static int vcn_v4_0_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int i;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
adev->vcn.harvest_config = VCN_HARVEST_MMSCH;
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
|
||||
if (amdgpu_vcn_is_disabled_vcn(adev, VCN_ENCODE_RING, i)) {
|
||||
adev->vcn.harvest_config |= 1 << i;
|
||||
dev_info(adev->dev, "VCN%d is disabled by hypervisor\n", i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* re-use enc ring as unified ring */
|
||||
adev->vcn.num_enc_rings = 1;
|
||||
|
@ -238,16 +246,11 @@ static int vcn_v4_0_hw_init(void *handle)
|
|||
continue;
|
||||
|
||||
ring = &adev->vcn.inst[i].ring_enc[0];
|
||||
if (amdgpu_vcn_is_disabled_vcn(adev, VCN_ENCODE_RING, i)) {
|
||||
ring->sched.ready = false;
|
||||
ring->no_scheduler = true;
|
||||
dev_info(adev->dev, "ring %s is disabled by hypervisor\n", ring->name);
|
||||
} else {
|
||||
ring->wptr = 0;
|
||||
ring->wptr_old = 0;
|
||||
vcn_v4_0_unified_ring_set_wptr(ring);
|
||||
ring->sched.ready = true;
|
||||
}
|
||||
ring->wptr = 0;
|
||||
ring->wptr_old = 0;
|
||||
vcn_v4_0_unified_ring_set_wptr(ring);
|
||||
ring->sched.ready = true;
|
||||
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
|
||||
|
|
|
@ -2373,7 +2373,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
|
|||
if (init_mqd_managers(dqm))
|
||||
goto out_free;
|
||||
|
||||
if (allocate_hiq_sdma_mqd(dqm)) {
|
||||
if (!dev->shared_resources.enable_mes && allocate_hiq_sdma_mqd(dqm)) {
|
||||
pr_err("Failed to allocate hiq sdma mqd trunk buffer\n");
|
||||
goto out_free;
|
||||
}
|
||||
|
@ -2397,7 +2397,8 @@ static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev,
|
|||
void device_queue_manager_uninit(struct device_queue_manager *dqm)
|
||||
{
|
||||
dqm->ops.uninitialize(dqm);
|
||||
deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd);
|
||||
if (!dqm->dev->shared_resources.enable_mes)
|
||||
deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd);
|
||||
kfree(dqm);
|
||||
}
|
||||
|
||||
|
|
|
@ -778,16 +778,13 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
|
|||
struct kfd_event_waiter *event_waiters;
|
||||
uint32_t i;
|
||||
|
||||
event_waiters = kmalloc_array(num_events,
|
||||
sizeof(struct kfd_event_waiter),
|
||||
GFP_KERNEL);
|
||||
event_waiters = kcalloc(num_events, sizeof(struct kfd_event_waiter),
|
||||
GFP_KERNEL);
|
||||
if (!event_waiters)
|
||||
return NULL;
|
||||
|
||||
for (i = 0; (event_waiters) && (i < num_events) ; i++) {
|
||||
for (i = 0; i < num_events; i++)
|
||||
init_wait(&event_waiters[i].wait);
|
||||
event_waiters[i].activated = false;
|
||||
}
|
||||
|
||||
return event_waiters;
|
||||
}
|
||||
|
|
|
@ -308,11 +308,16 @@ static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
|
|||
struct queue_properties *q)
|
||||
{
|
||||
struct v11_sdma_mqd *m;
|
||||
int size;
|
||||
|
||||
m = (struct v11_sdma_mqd *) mqd_mem_obj->cpu_ptr;
|
||||
|
||||
memset(m, 0, sizeof(struct v11_sdma_mqd));
|
||||
if (mm->dev->shared_resources.enable_mes)
|
||||
size = PAGE_SIZE;
|
||||
else
|
||||
size = sizeof(struct v11_sdma_mqd);
|
||||
|
||||
memset(m, 0, size);
|
||||
*mqd = m;
|
||||
if (gart_addr)
|
||||
*gart_addr = mqd_mem_obj->gpu_addr;
|
||||
|
@ -443,6 +448,14 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
|
|||
#if defined(CONFIG_DEBUG_FS)
|
||||
mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
|
||||
#endif
|
||||
/*
|
||||
* To allocate SDMA MQDs by generic functions
|
||||
* when MES is enabled.
|
||||
*/
|
||||
if (dev->shared_resources.enable_mes) {
|
||||
mqd->allocate_mqd = allocate_mqd;
|
||||
mqd->free_mqd = kfd_free_mqd_cp;
|
||||
}
|
||||
pr_debug("%s@%i\n", __func__, __LINE__);
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -28,7 +28,6 @@ config DRM_AMD_DC_DCN
|
|||
config DRM_AMD_DC_HDCP
|
||||
bool "Enable HDCP support in DC"
|
||||
depends on DRM_AMD_DC
|
||||
select DRM_DISPLAY_HDCP_HELPER
|
||||
help
|
||||
Choose this option if you want to support HDCP authentication.
|
||||
|
||||
|
|
|
@ -41,6 +41,8 @@
|
|||
#include "dpcd_defs.h"
|
||||
#include "link/protocols/link_dpcd.h"
|
||||
#include "link_service_types.h"
|
||||
#include "link/protocols/link_dp_capability.h"
|
||||
#include "link/protocols/link_ddc.h"
|
||||
|
||||
#include "vid.h"
|
||||
#include "amdgpu.h"
|
||||
|
@ -2302,6 +2304,14 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
|
|||
if (suspend) {
|
||||
drm_dp_mst_topology_mgr_suspend(mgr);
|
||||
} else {
|
||||
/* if extended timeout is supported in hardware,
|
||||
* default to LTTPR timeout (3.2ms) first as a W/A for DP link layer
|
||||
* CTS 4.2.1.1 regression introduced by CTS specs requirement update.
|
||||
*/
|
||||
try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
|
||||
if (!dp_is_lttpr_present(aconnector->dc_link))
|
||||
try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
|
||||
|
||||
ret = drm_dp_mst_topology_mgr_resume(mgr, true);
|
||||
if (ret < 0) {
|
||||
dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
|
||||
|
@ -4265,6 +4275,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
|
|||
/* Update the actual used number of crtc */
|
||||
adev->mode_info.num_crtc = adev->dm.display_indexes_num;
|
||||
|
||||
amdgpu_dm_set_irq_funcs(adev);
|
||||
|
||||
link_cnt = dm->dc->caps.max_links;
|
||||
if (amdgpu_dm_mode_config_init(dm->adev)) {
|
||||
DRM_ERROR("DM: Failed to initialize mode config\n");
|
||||
|
@ -4757,8 +4769,6 @@ static int dm_early_init(void *handle)
|
|||
break;
|
||||
}
|
||||
|
||||
amdgpu_dm_set_irq_funcs(adev);
|
||||
|
||||
if (adev->mode_info.funcs == NULL)
|
||||
adev->mode_info.funcs = &dm_display_funcs;
|
||||
|
||||
|
@ -7235,7 +7245,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
|
|||
drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
|
||||
|
||||
/* This defaults to the max in the range, but we want 8bpc for non-edp. */
|
||||
aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
|
||||
aconnector->base.state->max_bpc = 16;
|
||||
aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
|
||||
|
||||
if (connector_type == DRM_MODE_CONNECTOR_eDP &&
|
||||
|
|
|
@ -1149,6 +1149,8 @@ static bool dm_is_freesync_pcon_whitelist(const uint32_t branch_dev_id)
|
|||
|
||||
switch (branch_dev_id) {
|
||||
case DP_BRANCH_DEVICE_ID_0060AD:
|
||||
case DP_BRANCH_DEVICE_ID_00E04C:
|
||||
case DP_BRANCH_DEVICE_ID_90CC24:
|
||||
ret_val = true;
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -779,10 +779,8 @@ void dce110_edp_wait_for_hpd_ready(
|
|||
|
||||
dal_gpio_destroy_irq(&hpd);
|
||||
|
||||
if (false == edp_hpd_high) {
|
||||
DC_LOG_WARNING(
|
||||
"%s: wait timed out!\n", __func__);
|
||||
}
|
||||
/* ensure that the panel is detected */
|
||||
ASSERT(edp_hpd_high);
|
||||
}
|
||||
|
||||
void dce110_edp_power_control(
|
||||
|
|
|
@ -998,5 +998,8 @@ void dcn30_prepare_bandwidth(struct dc *dc,
|
|||
dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
|
||||
|
||||
dcn20_prepare_bandwidth(dc, context);
|
||||
|
||||
dc_dmub_srv_p_state_delegate(dc,
|
||||
context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching, context);
|
||||
}
|
||||
|
||||
|
|
|
@ -1802,7 +1802,10 @@ static unsigned int CalculateVMAndRowBytes(
|
|||
}
|
||||
|
||||
if (SurfaceTiling == dm_sw_linear) {
|
||||
*dpte_row_height = dml_min(128, 1 << (unsigned int) dml_floor(dml_log2(PTEBufferSizeInRequests * *PixelPTEReqWidth / Pitch), 1));
|
||||
if (PTEBufferSizeInRequests == 0)
|
||||
*dpte_row_height = 1;
|
||||
else
|
||||
*dpte_row_height = dml_min(128, 1 << (unsigned int) dml_floor(dml_log2(PTEBufferSizeInRequests * *PixelPTEReqWidth / Pitch), 1));
|
||||
*dpte_row_width_ub = (dml_ceil(((double) SwathWidth - 1) / *PixelPTEReqWidth, 1) + 1) * *PixelPTEReqWidth;
|
||||
*PixelPTEBytesPerRow = *dpte_row_width_ub / *PixelPTEReqWidth * *PTERequestSize;
|
||||
} else if (ScanDirection != dm_vert) {
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
#define DPVGA_DONGLE_AUX_DEFER_WA_DELAY 40
|
||||
#define I2C_OVER_AUX_DEFER_WA_DELAY_1MS 1
|
||||
#define LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD 3200 /*us*/
|
||||
#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 552 /*us*/
|
||||
|
||||
#define EDID_SEGMENT_SIZE 256
|
||||
|
||||
|
|
|
@ -60,8 +60,6 @@
|
|||
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
|
||||
#endif
|
||||
|
||||
#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 552 /*us*/
|
||||
|
||||
struct dp_lt_fallback_entry {
|
||||
enum dc_lane_count lane_count;
|
||||
enum dc_link_rate link_rate;
|
||||
|
|
|
@ -1202,10 +1202,17 @@ static int smu_smc_hw_setup(struct smu_context *smu)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = smu_setup_pptable(smu);
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "Failed to setup pptable!\n");
|
||||
return ret;
|
||||
/*
|
||||
* It is assumed the pptable used before runpm is same as
|
||||
* the one used afterwards. Thus, we can reuse the stored
|
||||
* copy and do not need to resetup the pptable again.
|
||||
*/
|
||||
if (!adev->in_runpm) {
|
||||
ret = smu_setup_pptable(smu);
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "Failed to setup pptable!\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* smu_dump_pptable(smu); */
|
||||
|
|
|
@ -256,7 +256,7 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
|
|||
* to be backward compatible.
|
||||
* 2. New fw usually brings some optimizations. But that's visible
|
||||
* only on the paired driver.
|
||||
* Considering above, we just leave user a warning message instead
|
||||
* Considering above, we just leave user a verbal message instead
|
||||
* of halt driver loading.
|
||||
*/
|
||||
if (if_version != smu->smc_driver_if_version) {
|
||||
|
@ -264,7 +264,7 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
|
|||
"smu fw program = %d, version = 0x%08x (%d.%d.%d)\n",
|
||||
smu->smc_driver_if_version, if_version,
|
||||
smu_program, smu_version, smu_major, smu_minor, smu_debug);
|
||||
dev_warn(smu->adev->dev, "SMU driver if version not matched\n");
|
||||
dev_info(smu->adev->dev, "SMU driver if version not matched\n");
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -93,7 +93,7 @@ int smu_v12_0_check_fw_version(struct smu_context *smu)
|
|||
* to be backward compatible.
|
||||
* 2. New fw usually brings some optimizations. But that's visible
|
||||
* only on the paired driver.
|
||||
* Considering above, we just leave user a warning message instead
|
||||
* Considering above, we just leave user a verbal message instead
|
||||
* of halt driver loading.
|
||||
*/
|
||||
if (if_version != smu->smc_driver_if_version) {
|
||||
|
@ -101,7 +101,7 @@ int smu_v12_0_check_fw_version(struct smu_context *smu)
|
|||
"smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
|
||||
smu->smc_driver_if_version, if_version,
|
||||
smu_program, smu_version, smu_major, smu_minor, smu_debug);
|
||||
dev_warn(smu->adev->dev, "SMU driver if version not matched\n");
|
||||
dev_info(smu->adev->dev, "SMU driver if version not matched\n");
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -311,7 +311,7 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
|
|||
* to be backward compatible.
|
||||
* 2. New fw usually brings some optimizations. But that's visible
|
||||
* only on the paired driver.
|
||||
* Considering above, we just leave user a warning message instead
|
||||
* Considering above, we just leave user a verbal message instead
|
||||
* of halt driver loading.
|
||||
*/
|
||||
if (if_version != smu->smc_driver_if_version) {
|
||||
|
@ -319,7 +319,7 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
|
|||
"smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
|
||||
smu->smc_driver_if_version, if_version,
|
||||
smu_program, smu_version, smu_major, smu_minor, smu_debug);
|
||||
dev_warn(adev->dev, "SMU driver if version not matched\n");
|
||||
dev_info(adev->dev, "SMU driver if version not matched\n");
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -2229,10 +2229,23 @@ int smu_v13_0_gfx_ulv_control(struct smu_context *smu,
|
|||
int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu,
|
||||
enum smu_baco_seq baco_seq)
|
||||
{
|
||||
return smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_ArmD3,
|
||||
baco_seq,
|
||||
NULL);
|
||||
struct smu_baco_context *smu_baco = &smu->smu_baco;
|
||||
int ret;
|
||||
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_ArmD3,
|
||||
baco_seq,
|
||||
NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (baco_seq == BACO_SEQ_BAMACO ||
|
||||
baco_seq == BACO_SEQ_BACO)
|
||||
smu_baco->state = SMU_BACO_STATE_ENTER;
|
||||
else
|
||||
smu_baco->state = SMU_BACO_STATE_EXIT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool smu_v13_0_baco_is_support(struct smu_context *smu)
|
||||
|
|
|
@ -147,6 +147,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
|
|||
PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, 0),
|
||||
MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0),
|
||||
MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0),
|
||||
MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0),
|
||||
};
|
||||
|
||||
static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
|
||||
|
|
|
@ -744,7 +744,7 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
|
|||
|
||||
return sgt;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_shmem_get_pages_sgt);
|
||||
EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
|
||||
|
||||
/**
|
||||
* drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
|
||||
|
|
|
@ -118,9 +118,6 @@ config DRM_I915_USERPTR
|
|||
|
||||
If in doubt, say "Y".
|
||||
|
||||
config DRM_I915_GVT
|
||||
bool
|
||||
|
||||
config DRM_I915_GVT_KVMGT
|
||||
tristate "Enable KVM host support Intel GVT-g graphics virtualization"
|
||||
depends on DRM_I915
|
||||
|
@ -172,3 +169,6 @@ menu "drm/i915 Unstable Evolution"
|
|||
depends on DRM_I915
|
||||
source "drivers/gpu/drm/i915/Kconfig.unstable"
|
||||
endmenu
|
||||
|
||||
config DRM_I915_GVT
|
||||
bool
|
||||
|
|
|
@ -199,6 +199,8 @@ static struct intel_quirk intel_quirks[] = {
|
|||
/* ECS Liva Q2 */
|
||||
{ 0x3185, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
|
||||
{ 0x3184, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
|
||||
/* HP Notebook - 14-r206nv */
|
||||
{ 0x0f31, 0x103c, 0x220f, quirk_invert_brightness },
|
||||
};
|
||||
|
||||
void intel_init_quirks(struct drm_i915_private *i915)
|
||||
|
|
|
@ -559,12 +559,15 @@ static bool reg_needs_read_steering(struct intel_gt *gt,
|
|||
i915_mcr_reg_t reg,
|
||||
enum intel_steering_type type)
|
||||
{
|
||||
const u32 offset = i915_mmio_reg_offset(reg);
|
||||
u32 offset = i915_mmio_reg_offset(reg);
|
||||
const struct intel_mmio_range *entry;
|
||||
|
||||
if (likely(!gt->steering_table[type]))
|
||||
return false;
|
||||
|
||||
if (IS_GSI_REG(offset))
|
||||
offset += gt->uncore->gsi_offset;
|
||||
|
||||
for (entry = gt->steering_table[type]; entry->end; entry++) {
|
||||
if (offset >= entry->start && offset <= entry->end)
|
||||
return true;
|
||||
|
|
|
@ -53,7 +53,7 @@ int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww)
|
|||
if (unlikely(ret))
|
||||
goto err_unpin;
|
||||
|
||||
if (i915_vma_is_map_and_fenceable(vma)) {
|
||||
if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915)) {
|
||||
addr = (void __force *)i915_vma_pin_iomap(vma);
|
||||
} else {
|
||||
int type = i915_coherent_map_type(vma->vm->i915, vma->obj, false);
|
||||
|
@ -98,7 +98,7 @@ void intel_ring_unpin(struct intel_ring *ring)
|
|||
return;
|
||||
|
||||
i915_vma_unset_ggtt_write(vma);
|
||||
if (i915_vma_is_map_and_fenceable(vma))
|
||||
if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915))
|
||||
i915_vma_unpin_iomap(vma);
|
||||
else
|
||||
i915_gem_object_unpin_map(vma->obj);
|
||||
|
@ -116,7 +116,7 @@ static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
|
|||
|
||||
obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_VOLATILE |
|
||||
I915_BO_ALLOC_PM_VOLATILE);
|
||||
if (IS_ERR(obj) && i915_ggtt_has_aperture(ggtt))
|
||||
if (IS_ERR(obj) && i915_ggtt_has_aperture(ggtt) && !HAS_LLC(i915))
|
||||
obj = i915_gem_object_create_stolen(i915, size);
|
||||
if (IS_ERR(obj))
|
||||
obj = i915_gem_object_create_internal(i915, size);
|
||||
|
|
|
@ -147,9 +147,9 @@ vgpu_scan_nonprivbb_set(void *data, u64 val)
|
|||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(vgpu_scan_nonprivbb_fops,
|
||||
vgpu_scan_nonprivbb_get, vgpu_scan_nonprivbb_set,
|
||||
"0x%llx\n");
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(vgpu_scan_nonprivbb_fops,
|
||||
vgpu_scan_nonprivbb_get, vgpu_scan_nonprivbb_set,
|
||||
"0x%llx\n");
|
||||
|
||||
static int vgpu_status_get(void *data, u64 *val)
|
||||
{
|
||||
|
@ -165,7 +165,7 @@ static int vgpu_status_get(void *data, u64 *val)
|
|||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(vgpu_status_fops, vgpu_status_get, NULL, "0x%llx\n");
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(vgpu_status_fops, vgpu_status_get, NULL, "0x%llx\n");
|
||||
|
||||
/**
|
||||
* intel_gvt_debugfs_add_vgpu - register debugfs entries for a vGPU
|
||||
|
@ -180,10 +180,10 @@ void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
|
|||
|
||||
debugfs_create_file("mmio_diff", 0444, vgpu->debugfs, vgpu,
|
||||
&vgpu_mmio_diff_fops);
|
||||
debugfs_create_file("scan_nonprivbb", 0644, vgpu->debugfs, vgpu,
|
||||
&vgpu_scan_nonprivbb_fops);
|
||||
debugfs_create_file("status", 0644, vgpu->debugfs, vgpu,
|
||||
&vgpu_status_fops);
|
||||
debugfs_create_file_unsafe("scan_nonprivbb", 0644, vgpu->debugfs, vgpu,
|
||||
&vgpu_scan_nonprivbb_fops);
|
||||
debugfs_create_file_unsafe("status", 0644, vgpu->debugfs, vgpu,
|
||||
&vgpu_status_fops);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -171,7 +171,7 @@ static int verify_firmware(struct intel_gvt *gvt,
|
|||
mem = (fw->data + h->cfg_space_offset);
|
||||
|
||||
id = *(u16 *)(mem + PCI_VENDOR_ID);
|
||||
VERIFY("vender id", id, pdev->vendor);
|
||||
VERIFY("vendor id", id, pdev->vendor);
|
||||
|
||||
id = *(u16 *)(mem + PCI_DEVICE_ID);
|
||||
VERIFY("device id", id, pdev->device);
|
||||
|
|
|
@ -699,7 +699,7 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
|
|||
|
||||
clear_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status);
|
||||
|
||||
debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs));
|
||||
debugfs_lookup_and_remove(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs);
|
||||
|
||||
kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm,
|
||||
&vgpu->track_node);
|
||||
|
|
|
@ -323,7 +323,7 @@ int intel_gvt_create_vgpu(struct intel_vgpu *vgpu,
|
|||
ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
|
||||
GFP_KERNEL);
|
||||
if (ret < 0)
|
||||
goto out_unlock;;
|
||||
goto out_unlock;
|
||||
|
||||
vgpu->id = ret;
|
||||
vgpu->sched_ctl.weight = conf->weight;
|
||||
|
|
|
@ -136,7 +136,7 @@ static const struct drm_fb_helper_funcs msm_fb_helper_funcs = {
|
|||
struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct msm_fbdev *fbdev = NULL;
|
||||
struct msm_fbdev *fbdev;
|
||||
struct drm_fb_helper *helper;
|
||||
int ret;
|
||||
|
||||
|
|
|
@ -233,7 +233,7 @@ void omap_fbdev_init(struct drm_device *dev)
|
|||
|
||||
fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
|
||||
if (!fbdev)
|
||||
goto fail;
|
||||
return;
|
||||
|
||||
INIT_WORK(&fbdev->work, pan_worker);
|
||||
|
||||
|
|
|
@ -2123,11 +2123,12 @@ int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx)
|
|||
|
||||
/*
|
||||
* On DCE32 any encoder can drive any block so usually just use crtc id,
|
||||
* but Apple thinks different at least on iMac10,1, so there use linkb,
|
||||
* but Apple thinks different at least on iMac10,1 and iMac11,2, so there use linkb,
|
||||
* otherwise the internal eDP panel will stay dark.
|
||||
*/
|
||||
if (ASIC_IS_DCE32(rdev)) {
|
||||
if (dmi_match(DMI_PRODUCT_NAME, "iMac10,1"))
|
||||
if (dmi_match(DMI_PRODUCT_NAME, "iMac10,1") ||
|
||||
dmi_match(DMI_PRODUCT_NAME, "iMac11,2"))
|
||||
enc_idx = (dig->linkb) ? 1 : 0;
|
||||
else
|
||||
enc_idx = radeon_crtc->crtc_id;
|
||||
|
|
|
@ -715,6 +715,7 @@ struct drm_amdgpu_cs_chunk_data {
|
|||
#define AMDGPU_IDS_FLAGS_FUSION 0x1
|
||||
#define AMDGPU_IDS_FLAGS_PREEMPTION 0x2
|
||||
#define AMDGPU_IDS_FLAGS_TMZ 0x4
|
||||
#define AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD 0x8
|
||||
|
||||
/* indicate if acceleration can be working */
|
||||
#define AMDGPU_INFO_ACCEL_WORKING 0x00
|
||||
|
@ -1115,6 +1116,16 @@ struct drm_amdgpu_info_device {
|
|||
__u64 tcc_disabled_mask;
|
||||
__u64 min_engine_clock;
|
||||
__u64 min_memory_clock;
|
||||
/* The following fields are only set on gfx11+, older chips set 0. */
|
||||
__u32 tcp_cache_size; /* AKA GL0, VMEM cache */
|
||||
__u32 num_sqc_per_wgp;
|
||||
__u32 sqc_data_cache_size; /* AKA SMEM cache */
|
||||
__u32 sqc_inst_cache_size;
|
||||
__u32 gl1c_cache_size;
|
||||
__u32 gl2c_cache_size;
|
||||
__u64 mall_size; /* AKA infinity cache */
|
||||
/* high 32 bits of the rb pipes mask */
|
||||
__u32 enabled_rb_pipes_mask_hi;
|
||||
};
|
||||
|
||||
struct drm_amdgpu_info_hw_ip {
|
||||
|
|
Loading…
Reference in New Issue