Merge drm/drm-next into drm-misc-next
Backmerging to sync with other DRM trees. Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
This commit is contained in:
commit
8d71c78e1a
|
@ -1,8 +1,10 @@
|
|||
Product Name, Code Reference, DCN/DCE version, GC version, VCE/UVD/VCN version, SDMA version
|
||||
Radeon R* Graphics, CARRIZO/STONEY, DCE 11, 8, VCE 3 / UVD 6, 3
|
||||
Ryzen 3000 series / AMD Ryzen Embedded V1*/R1* with Radeon Vega Gfx, RAVEN/PICASSO, DCN 1.0, 9.1.0, VCN 1.0, 4.1.0
|
||||
Ryzen 4000 series, RENOIR, DCN 2.1, 9.3, VCN 2.2, 4.1.2
|
||||
Ryzen 3000 series / AMD Ryzen Embedded V1*/R1* with Radeon Vega Gfx, RAVEN2, DCN 1.0, 9.2.2, VCN 1.0.1, 4.1.1
|
||||
SteamDeck, VANGOGH, DCN 3.0.1, 10.3.1, VCN 3.1.0, 5.2.1
|
||||
Ryzen 5000 series, GREEN SARDINE, DCN 2.1, 9.3, VCN 2.2, 4.1.1
|
||||
Ryzen 6000 Zen, YELLOW CARP, 3.1.2, 10.3.3, VCN 3.1.1, 5.2.3
|
||||
Product Name, Code Reference, DCN/DCE version, GC version, VCE/UVD/VCN version, SDMA version, MP0 version
|
||||
Radeon R* Graphics, CARRIZO/STONEY, DCE 11, 8, VCE 3 / UVD 6, 3, n/a
|
||||
Ryzen 3000 series / AMD Ryzen Embedded V1*/R1* with Radeon Vega Gfx, RAVEN/PICASSO, DCN 1.0, 9.1.0, VCN 1.0, 4.1.0, 10.0.0
|
||||
Ryzen 4000 series, RENOIR, DCN 2.1, 9.3, VCN 2.2, 4.1.2, 11.0.3
|
||||
Ryzen 3000 series / AMD Ryzen Embedded V1*/R1* with Radeon Vega Gfx, RAVEN2, DCN 1.0, 9.2.2, VCN 1.0.1, 4.1.1, 10.0.1
|
||||
SteamDeck, VANGOGH, DCN 3.0.1, 10.3.1, VCN 3.1.0, 5.2.1, 11.5.0
|
||||
Ryzen 5000 series / Ryzen 7x30 series, GREEN SARDINE / Cezanne / Barcelo / Barcelo-R, DCN 2.1, 9.3, VCN 2.2, 4.1.1, 12.0.1
|
||||
Ryzen 6000 series / Ryzen 7x35 series, YELLOW CARP / Rembrandt / Rembrandt+, 3.1.2, 10.3.3, VCN 3.1.1, 5.2.3, 13.0.3
|
||||
Ryzen 7000 series (AM5), Raphael, 3.1.5, 10.3.6, 3.1.2, 5.2.6, 13.0.5
|
||||
Ryzen 7x20 series, Mendocino, 3.1.6, 10.3.7, 3.1.1, 5.2.7, 13.0.8
|
||||
|
|
|
|
@ -22,3 +22,5 @@ AMD Radeon RX 6800(XT) /6900(XT) /W6800, SIENNA_CICHLID, DCN 3.0.0, 10.3.0, VCN
|
|||
AMD Radeon RX 6700 XT / 6800M / 6700M, NAVY_FLOUNDER, DCN 3.0.0, 10.3.2, VCN 3.0.0, 5.2.2
|
||||
AMD Radeon RX 6600(XT) /6600M /W6600 /W6600M, DIMGREY_CAVEFISH, DCN 3.0.2, 10.3.4, VCN 3.0.16, 5.2.4
|
||||
AMD Radeon RX 6500M /6300M /W6500M /W6300M, BEIGE_GOBY, DCN 3.0.3, 10.3.5, VCN 3.0.33, 5.2.5
|
||||
AMD Radeon RX 7900 XT /XTX, , DCN 3.2.0, 11.0.0, VCN 4.0.0, 6.0.0
|
||||
AMD Radeon RX 7600M (XT) /7700S /7600S, , DCN 3.2.1, 11.0.2, VCN 4.0.4, 6.0.2
|
||||
|
|
|
|
@ -37,7 +37,7 @@ Accelerated Processing Units (APU) Info
|
|||
|
||||
.. csv-table::
|
||||
:header-rows: 1
|
||||
:widths: 3, 2, 2, 1, 1, 1
|
||||
:widths: 3, 2, 2, 1, 1, 1, 1
|
||||
:file: ./apu-asic-info-table.csv
|
||||
|
||||
Discrete GPU Info
|
||||
|
|
|
@ -137,6 +137,7 @@ amdgpu-y += \
|
|||
gfx_v10_0.o \
|
||||
imu_v11_0.o \
|
||||
gfx_v11_0.o \
|
||||
gfx_v11_0_3.o \
|
||||
imu_v11_0_3.o
|
||||
|
||||
# add async DMA block
|
||||
|
|
|
@ -2076,6 +2076,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
|
|||
struct drm_device *dev = adev_to_drm(adev);
|
||||
struct pci_dev *parent;
|
||||
int i, r;
|
||||
bool total;
|
||||
|
||||
amdgpu_device_enable_virtual_display(adev);
|
||||
|
||||
|
@ -2159,6 +2160,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
|
|||
if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
|
||||
adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
|
||||
|
||||
total = true;
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
|
||||
DRM_ERROR("disabled ip block: %d <%s>\n",
|
||||
|
@ -2172,7 +2174,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
|
|||
} else if (r) {
|
||||
DRM_ERROR("early_init of IP block <%s> failed %d\n",
|
||||
adev->ip_blocks[i].version->funcs->name, r);
|
||||
return r;
|
||||
total = false;
|
||||
} else {
|
||||
adev->ip_blocks[i].status.valid = true;
|
||||
}
|
||||
|
@ -2203,6 +2205,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
|
|||
|
||||
}
|
||||
}
|
||||
if (!total)
|
||||
return -ENODEV;
|
||||
|
||||
adev->cg_flags &= amdgpu_cg_mask;
|
||||
adev->pg_flags &= amdgpu_pg_mask;
|
||||
|
@ -5854,8 +5858,8 @@ void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
|
|||
int amdgpu_in_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
return atomic_read(&adev->reset_domain->in_gpu_reset);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_device_halt() - bring hardware to some kind of halt state
|
||||
*
|
||||
|
|
|
@ -106,9 +106,10 @@
|
|||
* - 3.49.0 - Add gang submit into CS IOCTL
|
||||
* - 3.50.0 - Update AMDGPU_INFO_DEV_INFO IOCTL for minimum engine and memory clock
|
||||
* Update AMDGPU_INFO_SENSOR IOCTL for PEAK_PSTATE engine and memory clock
|
||||
* 3.51.0 - Return the PCIe gen and lanes from the INFO ioctl
|
||||
*/
|
||||
#define KMS_DRIVER_MAJOR 3
|
||||
#define KMS_DRIVER_MINOR 50
|
||||
#define KMS_DRIVER_MINOR 51
|
||||
#define KMS_DRIVER_PATCHLEVEL 0
|
||||
|
||||
unsigned int amdgpu_vram_limit = UINT_MAX;
|
||||
|
|
|
@ -156,6 +156,9 @@ static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev)
|
|||
return amdgpu_compute_multipipe == 1;
|
||||
}
|
||||
|
||||
if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
|
||||
return true;
|
||||
|
||||
/* FIXME: spreading the queues across pipes causes perf regressions
|
||||
* on POLARIS11 compute workloads */
|
||||
if (adev->asic_type == CHIP_POLARIS11)
|
||||
|
@ -696,6 +699,50 @@ late_fini:
|
|||
return r;
|
||||
}
|
||||
|
||||
int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int err = 0;
|
||||
struct amdgpu_gfx_ras *ras = NULL;
|
||||
|
||||
/* adev->gfx.ras is NULL, which means gfx does not
|
||||
* support ras function, then do nothing here.
|
||||
*/
|
||||
if (!adev->gfx.ras)
|
||||
return 0;
|
||||
|
||||
ras = adev->gfx.ras;
|
||||
|
||||
err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
|
||||
if (err) {
|
||||
dev_err(adev->dev, "Failed to register gfx ras block!\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
strcpy(ras->ras_block.ras_comm.name, "gfx");
|
||||
ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__GFX;
|
||||
ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
|
||||
adev->gfx.ras_if = &ras->ras_block.ras_comm;
|
||||
|
||||
/* If not define special ras_late_init function, use gfx default ras_late_init */
|
||||
if (!ras->ras_block.ras_late_init)
|
||||
ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
|
||||
|
||||
/* If not defined special ras_cb function, use default ras_cb */
|
||||
if (!ras->ras_block.ras_cb)
|
||||
ras->ras_block.ras_cb = amdgpu_gfx_process_ras_data_cb;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
if (adev->gfx.ras && adev->gfx.ras->poison_consumption_handler)
|
||||
return adev->gfx.ras->poison_consumption_handler(adev, entry);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
|
||||
void *err_data,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
|
|
|
@ -210,6 +210,11 @@ struct amdgpu_gfx_ras {
|
|||
struct amdgpu_ras_block_object ras_block;
|
||||
void (*enable_watchdog_timer)(struct amdgpu_device *adev);
|
||||
bool (*query_utcl2_poison_status)(struct amdgpu_device *adev);
|
||||
int (*rlc_gc_fed_irq)(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry);
|
||||
int (*poison_consumption_handler)(struct amdgpu_device *adev,
|
||||
struct amdgpu_iv_entry *entry);
|
||||
};
|
||||
|
||||
struct amdgpu_gfx_funcs {
|
||||
|
@ -323,6 +328,7 @@ struct amdgpu_gfx {
|
|||
struct amdgpu_irq_src priv_inst_irq;
|
||||
struct amdgpu_irq_src cp_ecc_error_irq;
|
||||
struct amdgpu_irq_src sq_irq;
|
||||
struct amdgpu_irq_src rlc_gc_fed_irq;
|
||||
struct sq_work sq_work;
|
||||
|
||||
/* gfx status */
|
||||
|
@ -432,4 +438,7 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
|
|||
int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev);
|
||||
void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id);
|
||||
|
||||
int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev);
|
||||
int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev,
|
||||
struct amdgpu_iv_entry *entry);
|
||||
#endif
|
||||
|
|
|
@ -497,6 +497,7 @@ void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
|
|||
!--id_mgr->reserved_use_count) {
|
||||
/* give the reserved ID back to normal round robin */
|
||||
list_add(&id_mgr->reserved->list, &id_mgr->ids_lru);
|
||||
id_mgr->reserved = NULL;
|
||||
}
|
||||
vm->reserved_vmid[vmhub] = false;
|
||||
mutex_unlock(&id_mgr->lock);
|
||||
|
|
|
@ -161,8 +161,14 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
|
|||
struct dma_fence *f;
|
||||
unsigned i;
|
||||
|
||||
/* use sched fence if available */
|
||||
f = job->base.s_fence ? &job->base.s_fence->finished : &job->hw_fence;
|
||||
/* Check if any fences where initialized */
|
||||
if (job->base.s_fence && job->base.s_fence->finished.ops)
|
||||
f = &job->base.s_fence->finished;
|
||||
else if (job->hw_fence.ops)
|
||||
f = &job->hw_fence;
|
||||
else
|
||||
f = NULL;
|
||||
|
||||
for (i = 0; i < job->num_ibs; ++i)
|
||||
amdgpu_ib_free(ring->adev, &job->ibs[i], f);
|
||||
}
|
||||
|
|
|
@ -43,6 +43,7 @@
|
|||
#include "amdgpu_gem.h"
|
||||
#include "amdgpu_display.h"
|
||||
#include "amdgpu_ras.h"
|
||||
#include "amd_pcie.h"
|
||||
|
||||
void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
|
||||
{
|
||||
|
@ -767,6 +768,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
case AMDGPU_INFO_DEV_INFO: {
|
||||
struct drm_amdgpu_info_device *dev_info;
|
||||
uint64_t vm_size;
|
||||
uint32_t pcie_gen_mask;
|
||||
int ret;
|
||||
|
||||
dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
|
||||
|
@ -799,7 +801,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
dev_info->num_rb_pipes = adev->gfx.config.max_backends_per_se *
|
||||
adev->gfx.config.max_shader_engines;
|
||||
dev_info->num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
|
||||
dev_info->_pad = 0;
|
||||
dev_info->ids_flags = 0;
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
|
||||
|
@ -853,6 +854,17 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
|
||||
dev_info->tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask;
|
||||
|
||||
/* Combine the chip gen mask with the platform (CPU/mobo) mask. */
|
||||
pcie_gen_mask = adev->pm.pcie_gen_mask & (adev->pm.pcie_gen_mask >> 16);
|
||||
dev_info->pcie_gen = fls(pcie_gen_mask);
|
||||
dev_info->pcie_num_lanes =
|
||||
adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 ? 32 :
|
||||
adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 ? 16 :
|
||||
adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 ? 12 :
|
||||
adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 ? 8 :
|
||||
adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 ? 4 :
|
||||
adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 ? 2 : 1;
|
||||
|
||||
ret = copy_to_user(out, dev_info,
|
||||
min((size_t)size, sizeof(*dev_info))) ? -EFAULT : 0;
|
||||
kfree(dev_info);
|
||||
|
|
|
@ -1574,9 +1574,9 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
|
|||
attachment = READ_ONCE(bo->tbo.base.import_attach);
|
||||
|
||||
if (attachment)
|
||||
seq_printf(m, " imported from %p", dma_buf);
|
||||
seq_printf(m, " imported from ino:%lu", file_inode(dma_buf->file)->i_ino);
|
||||
else if (dma_buf)
|
||||
seq_printf(m, " exported as %p", dma_buf);
|
||||
seq_printf(m, " exported as ino:%lu", file_inode(dma_buf->file)->i_ino);
|
||||
|
||||
amdgpu_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
|
||||
amdgpu_bo_print_flag(m, bo, NO_CPU_ACCESS);
|
||||
|
|
|
@ -336,7 +336,7 @@ static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
|
|||
|
||||
if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) {
|
||||
/* runtime db doesn't exist, exit */
|
||||
dev_warn(adev->dev, "PSP runtime database doesn't exist\n");
|
||||
dev_dbg(adev->dev, "PSP runtime database doesn't exist\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -920,9 +920,6 @@ static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_de
|
|||
if (block >= AMDGPU_RAS_BLOCK__LAST)
|
||||
return NULL;
|
||||
|
||||
if (!amdgpu_ras_is_supported(adev, block))
|
||||
return NULL;
|
||||
|
||||
list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
|
||||
if (!node->ras_obj) {
|
||||
dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
|
||||
|
@ -1620,14 +1617,14 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *
|
|||
struct amdgpu_ras_block_object *block_obj =
|
||||
amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
|
||||
|
||||
if (!block_obj || !block_obj->hw_ops)
|
||||
if (!block_obj)
|
||||
return;
|
||||
|
||||
/* both query_poison_status and handle_poison_consumption are optional,
|
||||
* but at least one of them should be implemented if we need poison
|
||||
* consumption handler
|
||||
*/
|
||||
if (block_obj->hw_ops->query_poison_status) {
|
||||
if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) {
|
||||
poison_stat = block_obj->hw_ops->query_poison_status(adev);
|
||||
if (!poison_stat) {
|
||||
/* Not poison consumption interrupt, no need to handle it */
|
||||
|
@ -1641,7 +1638,7 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *
|
|||
if (!adev->gmc.xgmi.connected_to_cpu)
|
||||
amdgpu_umc_poison_handler(adev, false);
|
||||
|
||||
if (block_obj->hw_ops->handle_poison_consumption)
|
||||
if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
|
||||
poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
|
||||
|
||||
/* gpu reset is fallback for failed and default cases */
|
||||
|
@ -1649,6 +1646,8 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *
|
|||
dev_info(adev->dev, "GPU reset for %s RAS poison consumption is issued!\n",
|
||||
block_obj->ras_comm.name);
|
||||
amdgpu_ras_reset_gpu(adev);
|
||||
} else {
|
||||
amdgpu_gfx_poison_consumption_handler(adev, entry);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3023,11 +3022,26 @@ int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_co
|
|||
int amdgpu_ras_is_supported(struct amdgpu_device *adev,
|
||||
unsigned int block)
|
||||
{
|
||||
int ret = 0;
|
||||
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
|
||||
|
||||
if (block >= AMDGPU_RAS_BLOCK_COUNT)
|
||||
return 0;
|
||||
return ras && (adev->ras_enabled & (1 << block));
|
||||
|
||||
ret = ras && (adev->ras_enabled & (1 << block));
|
||||
|
||||
/* For the special asic with mem ecc enabled but sram ecc
|
||||
* not enabled, even if the ras block is not supported on
|
||||
* .ras_enabled, if the asic supports poison mode and the
|
||||
* ras block has ras configuration, it can be considered
|
||||
* that the ras block supports ras function.
|
||||
*/
|
||||
if (!ret &&
|
||||
amdgpu_ras_is_poison_mode_supported(adev) &&
|
||||
amdgpu_ras_get_ras_block(adev, block, 0))
|
||||
ret = 1;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
|
||||
|
|
|
@ -305,3 +305,38 @@ void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int err = 0;
|
||||
struct amdgpu_sdma_ras *ras = NULL;
|
||||
|
||||
/* adev->sdma.ras is NULL, which means sdma does not
|
||||
* support ras function, then do nothing here.
|
||||
*/
|
||||
if (!adev->sdma.ras)
|
||||
return 0;
|
||||
|
||||
ras = adev->sdma.ras;
|
||||
|
||||
err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
|
||||
if (err) {
|
||||
dev_err(adev->dev, "Failed to register sdma ras block!\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
strcpy(ras->ras_block.ras_comm.name, "sdma");
|
||||
ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__SDMA;
|
||||
ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
|
||||
adev->sdma.ras_if = &ras->ras_block.ras_comm;
|
||||
|
||||
/* If not define special ras_late_init function, use default ras_late_init */
|
||||
if (!ras->ras_block.ras_late_init)
|
||||
ras->ras_block.ras_late_init = amdgpu_sdma_ras_late_init;
|
||||
|
||||
/* If not defined special ras_cb function, use default ras_cb */
|
||||
if (!ras->ras_block.ras_cb)
|
||||
ras->ras_block.ras_cb = amdgpu_sdma_process_ras_data_cb;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -129,5 +129,6 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev, u32 instance,
|
|||
void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
|
||||
bool duplicate);
|
||||
void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev);
|
||||
int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -153,10 +153,10 @@ TRACE_EVENT(amdgpu_cs,
|
|||
|
||||
TP_fast_assign(
|
||||
__entry->bo_list = p->bo_list;
|
||||
__entry->ring = to_amdgpu_ring(job->base.sched)->idx;
|
||||
__entry->ring = to_amdgpu_ring(job->base.entity->rq->sched)->idx;
|
||||
__entry->dw = ib->length_dw;
|
||||
__entry->fences = amdgpu_fence_count_emitted(
|
||||
to_amdgpu_ring(job->base.sched));
|
||||
to_amdgpu_ring(job->base.entity->rq->sched));
|
||||
),
|
||||
TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u",
|
||||
__entry->bo_list, __entry->ring, __entry->dw,
|
||||
|
|
|
@ -1072,7 +1072,6 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl
|
|||
default:
|
||||
return NULL;
|
||||
}
|
||||
break;
|
||||
case IP_VERSION(10, 0, 0):
|
||||
case IP_VERSION(10, 0, 1):
|
||||
if (adev->asic_type == CHIP_RAVEN) {
|
||||
|
@ -1087,6 +1086,8 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl
|
|||
return "navi10";
|
||||
case IP_VERSION(11, 0, 2):
|
||||
return "vega20";
|
||||
case IP_VERSION(11, 0, 3):
|
||||
return "renoir";
|
||||
case IP_VERSION(11, 0, 4):
|
||||
return "arcturus";
|
||||
case IP_VERSION(11, 0, 5):
|
||||
|
@ -1104,12 +1105,7 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl
|
|||
case IP_VERSION(11, 5, 0):
|
||||
return "vangogh";
|
||||
case IP_VERSION(12, 0, 1):
|
||||
if (adev->asic_type == CHIP_RENOIR) {
|
||||
if (adev->apu_flags & AMD_APU_IS_RENOIR)
|
||||
return "renoir";
|
||||
return "green_sardine";
|
||||
}
|
||||
break;
|
||||
return "green_sardine";
|
||||
case IP_VERSION(13, 0, 2):
|
||||
return "aldebaran";
|
||||
case IP_VERSION(13, 0, 1):
|
||||
|
|
|
@ -36,26 +36,26 @@
|
|||
#include "soc15d.h"
|
||||
|
||||
/* Firmware Names */
|
||||
#define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
|
||||
#define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin"
|
||||
#define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin"
|
||||
#define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin"
|
||||
#define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin"
|
||||
#define FIRMWARE_GREEN_SARDINE "amdgpu/green_sardine_vcn.bin"
|
||||
#define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin"
|
||||
#define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin"
|
||||
#define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin"
|
||||
#define FIRMWARE_SIENNA_CICHLID "amdgpu/sienna_cichlid_vcn.bin"
|
||||
#define FIRMWARE_NAVY_FLOUNDER "amdgpu/navy_flounder_vcn.bin"
|
||||
#define FIRMWARE_VANGOGH "amdgpu/vangogh_vcn.bin"
|
||||
#define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
|
||||
#define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin"
|
||||
#define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin"
|
||||
#define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin"
|
||||
#define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin"
|
||||
#define FIRMWARE_GREEN_SARDINE "amdgpu/green_sardine_vcn.bin"
|
||||
#define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin"
|
||||
#define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin"
|
||||
#define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin"
|
||||
#define FIRMWARE_SIENNA_CICHLID "amdgpu/sienna_cichlid_vcn.bin"
|
||||
#define FIRMWARE_NAVY_FLOUNDER "amdgpu/navy_flounder_vcn.bin"
|
||||
#define FIRMWARE_VANGOGH "amdgpu/vangogh_vcn.bin"
|
||||
#define FIRMWARE_DIMGREY_CAVEFISH "amdgpu/dimgrey_cavefish_vcn.bin"
|
||||
#define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin"
|
||||
#define FIRMWARE_BEIGE_GOBY "amdgpu/beige_goby_vcn.bin"
|
||||
#define FIRMWARE_YELLOW_CARP "amdgpu/yellow_carp_vcn.bin"
|
||||
#define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2.bin"
|
||||
#define FIRMWARE_VCN4_0_0 "amdgpu/vcn_4_0_0.bin"
|
||||
#define FIRMWARE_VCN4_0_2 "amdgpu/vcn_4_0_2.bin"
|
||||
#define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin"
|
||||
#define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin"
|
||||
#define FIRMWARE_BEIGE_GOBY "amdgpu/beige_goby_vcn.bin"
|
||||
#define FIRMWARE_YELLOW_CARP "amdgpu/yellow_carp_vcn.bin"
|
||||
#define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2.bin"
|
||||
#define FIRMWARE_VCN4_0_0 "amdgpu/vcn_4_0_0.bin"
|
||||
#define FIRMWARE_VCN4_0_2 "amdgpu/vcn_4_0_2.bin"
|
||||
#define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin"
|
||||
|
||||
MODULE_FIRMWARE(FIRMWARE_RAVEN);
|
||||
MODULE_FIRMWARE(FIRMWARE_PICASSO);
|
||||
|
@ -110,84 +110,9 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
|||
for (i = 0; i < adev->vcn.num_vcn_inst; i++)
|
||||
atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
|
||||
|
||||
switch (adev->ip_versions[UVD_HWIP][0]) {
|
||||
case IP_VERSION(1, 0, 0):
|
||||
case IP_VERSION(1, 0, 1):
|
||||
case IP_VERSION(2, 5, 0):
|
||||
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
|
||||
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
|
||||
adev->vcn.indirect_sram = true;
|
||||
break;
|
||||
case IP_VERSION(2, 2, 0):
|
||||
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
|
||||
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
|
||||
adev->vcn.indirect_sram = true;
|
||||
break;
|
||||
case IP_VERSION(2, 6, 0):
|
||||
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
|
||||
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
|
||||
adev->vcn.indirect_sram = true;
|
||||
break;
|
||||
case IP_VERSION(2, 0, 0):
|
||||
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
|
||||
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
|
||||
adev->vcn.indirect_sram = true;
|
||||
break;
|
||||
case IP_VERSION(2, 0, 2):
|
||||
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
|
||||
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
|
||||
adev->vcn.indirect_sram = true;
|
||||
break;
|
||||
case IP_VERSION(3, 0, 0):
|
||||
case IP_VERSION(3, 0, 64):
|
||||
case IP_VERSION(3, 0, 192):
|
||||
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
|
||||
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
|
||||
adev->vcn.indirect_sram = true;
|
||||
break;
|
||||
case IP_VERSION(3, 0, 2):
|
||||
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
|
||||
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
|
||||
adev->vcn.indirect_sram = true;
|
||||
break;
|
||||
case IP_VERSION(3, 0, 16):
|
||||
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
|
||||
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
|
||||
adev->vcn.indirect_sram = true;
|
||||
break;
|
||||
case IP_VERSION(3, 0, 33):
|
||||
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
|
||||
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
|
||||
adev->vcn.indirect_sram = true;
|
||||
break;
|
||||
case IP_VERSION(3, 1, 1):
|
||||
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
|
||||
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
|
||||
adev->vcn.indirect_sram = true;
|
||||
break;
|
||||
case IP_VERSION(3, 1, 2):
|
||||
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
|
||||
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
|
||||
adev->vcn.indirect_sram = true;
|
||||
break;
|
||||
case IP_VERSION(4, 0, 0):
|
||||
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
|
||||
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
|
||||
adev->vcn.indirect_sram = true;
|
||||
break;
|
||||
case IP_VERSION(4, 0, 2):
|
||||
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
|
||||
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
|
||||
adev->vcn.indirect_sram = true;
|
||||
break;
|
||||
case IP_VERSION(4, 0, 4):
|
||||
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
|
||||
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
|
||||
adev->vcn.indirect_sram = true;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
|
||||
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
|
||||
adev->vcn.indirect_sram = true;
|
||||
|
||||
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
|
||||
adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
|
||||
|
|
|
@ -29,13 +29,16 @@
|
|||
#include "df/df_3_6_offset.h"
|
||||
#include "xgmi/xgmi_4_0_0_smn.h"
|
||||
#include "xgmi/xgmi_4_0_0_sh_mask.h"
|
||||
#include "xgmi/xgmi_6_1_0_sh_mask.h"
|
||||
#include "wafl/wafl2_4_0_0_smn.h"
|
||||
#include "wafl/wafl2_4_0_0_sh_mask.h"
|
||||
|
||||
#include "amdgpu_reset.h"
|
||||
|
||||
#define smnPCS_XGMI3X16_PCS_ERROR_STATUS 0x11a0020c
|
||||
#define smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK 0x11a00218
|
||||
#define smnPCS_GOPX1_PCS_ERROR_STATUS 0x12200210
|
||||
#define smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK 0x12200218
|
||||
|
||||
static DEFINE_MUTEX(xgmi_mutex);
|
||||
|
||||
|
@ -79,11 +82,27 @@ static const int xgmi3x16_pcs_err_status_reg_aldebaran[] = {
|
|||
smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x700000
|
||||
};
|
||||
|
||||
static const int xgmi3x16_pcs_err_noncorrectable_mask_reg_aldebaran[] = {
|
||||
smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK,
|
||||
smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x100000,
|
||||
smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x200000,
|
||||
smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x300000,
|
||||
smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x400000,
|
||||
smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x500000,
|
||||
smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x600000,
|
||||
smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x700000
|
||||
};
|
||||
|
||||
static const int walf_pcs_err_status_reg_aldebaran[] = {
|
||||
smnPCS_GOPX1_PCS_ERROR_STATUS,
|
||||
smnPCS_GOPX1_PCS_ERROR_STATUS + 0x100000
|
||||
};
|
||||
|
||||
static const int walf_pcs_err_noncorrectable_mask_reg_aldebaran[] = {
|
||||
smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK,
|
||||
smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK + 0x100000
|
||||
};
|
||||
|
||||
static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = {
|
||||
{"XGMI PCS DataLossErr",
|
||||
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)},
|
||||
|
@ -162,6 +181,67 @@ static const struct amdgpu_pcs_ras_field wafl_pcs_ras_fields[] = {
|
|||
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
|
||||
};
|
||||
|
||||
static const struct amdgpu_pcs_ras_field xgmi3x16_pcs_ras_fields[] = {
|
||||
{"XGMI3X16 PCS DataLossErr",
|
||||
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DataLossErr)},
|
||||
{"XGMI3X16 PCS TrainingErr",
|
||||
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, TrainingErr)},
|
||||
{"XGMI3X16 PCS FlowCtrlAckErr",
|
||||
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, FlowCtrlAckErr)},
|
||||
{"XGMI3X16 PCS RxFifoUnderflowErr",
|
||||
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxFifoUnderflowErr)},
|
||||
{"XGMI3X16 PCS RxFifoOverflowErr",
|
||||
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxFifoOverflowErr)},
|
||||
{"XGMI3X16 PCS CRCErr",
|
||||
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, CRCErr)},
|
||||
{"XGMI3X16 PCS BERExceededErr",
|
||||
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, BERExceededErr)},
|
||||
{"XGMI3X16 PCS TxVcidDataErr",
|
||||
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, TxVcidDataErr)},
|
||||
{"XGMI3X16 PCS ReplayBufParityErr",
|
||||
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayBufParityErr)},
|
||||
{"XGMI3X16 PCS DataParityErr",
|
||||
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DataParityErr)},
|
||||
{"XGMI3X16 PCS ReplayFifoOverflowErr",
|
||||
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
|
||||
{"XGMI3X16 PCS ReplayFifoUnderflowErr",
|
||||
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
|
||||
{"XGMI3X16 PCS ElasticFifoOverflowErr",
|
||||
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
|
||||
{"XGMI3X16 PCS DeskewErr",
|
||||
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DeskewErr)},
|
||||
{"XGMI3X16 PCS FlowCtrlCRCErr",
|
||||
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, FlowCtrlCRCErr)},
|
||||
{"XGMI3X16 PCS DataStartupLimitErr",
|
||||
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DataStartupLimitErr)},
|
||||
{"XGMI3X16 PCS FCInitTimeoutErr",
|
||||
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, FCInitTimeoutErr)},
|
||||
{"XGMI3X16 PCS RecoveryTimeoutErr",
|
||||
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
|
||||
{"XGMI3X16 PCS ReadySerialTimeoutErr",
|
||||
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
|
||||
{"XGMI3X16 PCS ReadySerialAttemptErr",
|
||||
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
|
||||
{"XGMI3X16 PCS RecoveryAttemptErr",
|
||||
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RecoveryAttemptErr)},
|
||||
{"XGMI3X16 PCS RecoveryRelockAttemptErr",
|
||||
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
|
||||
{"XGMI3X16 PCS ReplayAttemptErr",
|
||||
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayAttemptErr)},
|
||||
{"XGMI3X16 PCS SyncHdrErr",
|
||||
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, SyncHdrErr)},
|
||||
{"XGMI3X16 PCS TxReplayTimeoutErr",
|
||||
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, TxReplayTimeoutErr)},
|
||||
{"XGMI3X16 PCS RxReplayTimeoutErr",
|
||||
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxReplayTimeoutErr)},
|
||||
{"XGMI3X16 PCS LinkSubTxTimeoutErr",
|
||||
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, LinkSubTxTimeoutErr)},
|
||||
{"XGMI3X16 PCS LinkSubRxTimeoutErr",
|
||||
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, LinkSubRxTimeoutErr)},
|
||||
{"XGMI3X16 PCS RxCMDPktErr",
|
||||
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxCMDPktErr)},
|
||||
};
|
||||
|
||||
/**
|
||||
* DOC: AMDGPU XGMI Support
|
||||
*
|
||||
|
@ -809,39 +889,47 @@ static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
|
|||
|
||||
static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
|
||||
uint32_t value,
|
||||
uint32_t mask_value,
|
||||
uint32_t *ue_count,
|
||||
uint32_t *ce_count,
|
||||
bool is_xgmi_pcs)
|
||||
bool is_xgmi_pcs,
|
||||
bool check_mask)
|
||||
{
|
||||
int i;
|
||||
int ue_cnt;
|
||||
int ue_cnt = 0;
|
||||
const struct amdgpu_pcs_ras_field *pcs_ras_fields = NULL;
|
||||
uint32_t field_array_size = 0;
|
||||
|
||||
if (is_xgmi_pcs) {
|
||||
/* query xgmi pcs error status,
|
||||
* only ue is supported */
|
||||
for (i = 0; i < ARRAY_SIZE(xgmi_pcs_ras_fields); i ++) {
|
||||
ue_cnt = (value &
|
||||
xgmi_pcs_ras_fields[i].pcs_err_mask) >>
|
||||
xgmi_pcs_ras_fields[i].pcs_err_shift;
|
||||
if (ue_cnt) {
|
||||
dev_info(adev->dev, "%s detected\n",
|
||||
xgmi_pcs_ras_fields[i].err_name);
|
||||
*ue_count += ue_cnt;
|
||||
}
|
||||
if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(6, 1, 0)) {
|
||||
pcs_ras_fields = &xgmi3x16_pcs_ras_fields[0];
|
||||
field_array_size = ARRAY_SIZE(xgmi3x16_pcs_ras_fields);
|
||||
} else {
|
||||
pcs_ras_fields = &xgmi_pcs_ras_fields[0];
|
||||
field_array_size = ARRAY_SIZE(xgmi_pcs_ras_fields);
|
||||
}
|
||||
} else {
|
||||
/* query wafl pcs error status,
|
||||
* only ue is supported */
|
||||
for (i = 0; i < ARRAY_SIZE(wafl_pcs_ras_fields); i++) {
|
||||
ue_cnt = (value &
|
||||
wafl_pcs_ras_fields[i].pcs_err_mask) >>
|
||||
wafl_pcs_ras_fields[i].pcs_err_shift;
|
||||
if (ue_cnt) {
|
||||
dev_info(adev->dev, "%s detected\n",
|
||||
wafl_pcs_ras_fields[i].err_name);
|
||||
*ue_count += ue_cnt;
|
||||
}
|
||||
pcs_ras_fields = &wafl_pcs_ras_fields[0];
|
||||
field_array_size = ARRAY_SIZE(wafl_pcs_ras_fields);
|
||||
}
|
||||
|
||||
if (check_mask)
|
||||
value = value & ~mask_value;
|
||||
|
||||
/* query xgmi/walf pcs error status,
|
||||
* only ue is supported */
|
||||
for (i = 0; value && i < field_array_size; i++) {
|
||||
ue_cnt = (value &
|
||||
pcs_ras_fields[i].pcs_err_mask) >>
|
||||
pcs_ras_fields[i].pcs_err_shift;
|
||||
if (ue_cnt) {
|
||||
dev_info(adev->dev, "%s detected\n",
|
||||
pcs_ras_fields[i].err_name);
|
||||
*ue_count += ue_cnt;
|
||||
}
|
||||
|
||||
/* reset bit value if the bit is checked */
|
||||
value &= ~(pcs_ras_fields[i].pcs_err_mask);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -852,7 +940,7 @@ static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
|
|||
{
|
||||
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
|
||||
int i;
|
||||
uint32_t data;
|
||||
uint32_t data, mask_data = 0;
|
||||
uint32_t ue_cnt = 0, ce_cnt = 0;
|
||||
|
||||
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL))
|
||||
|
@ -867,15 +955,15 @@ static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
|
|||
for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++) {
|
||||
data = RREG32_PCIE(xgmi_pcs_err_status_reg_arct[i]);
|
||||
if (data)
|
||||
amdgpu_xgmi_query_pcs_error_status(adev,
|
||||
data, &ue_cnt, &ce_cnt, true);
|
||||
amdgpu_xgmi_query_pcs_error_status(adev, data,
|
||||
mask_data, &ue_cnt, &ce_cnt, true, false);
|
||||
}
|
||||
/* check wafl pcs error */
|
||||
for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_arct); i++) {
|
||||
data = RREG32_PCIE(wafl_pcs_err_status_reg_arct[i]);
|
||||
if (data)
|
||||
amdgpu_xgmi_query_pcs_error_status(adev,
|
||||
data, &ue_cnt, &ce_cnt, false);
|
||||
amdgpu_xgmi_query_pcs_error_status(adev, data,
|
||||
mask_data, &ue_cnt, &ce_cnt, false, false);
|
||||
}
|
||||
break;
|
||||
case CHIP_VEGA20:
|
||||
|
@ -883,31 +971,35 @@ static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
|
|||
for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) {
|
||||
data = RREG32_PCIE(xgmi_pcs_err_status_reg_vg20[i]);
|
||||
if (data)
|
||||
amdgpu_xgmi_query_pcs_error_status(adev,
|
||||
data, &ue_cnt, &ce_cnt, true);
|
||||
amdgpu_xgmi_query_pcs_error_status(adev, data,
|
||||
mask_data, &ue_cnt, &ce_cnt, true, false);
|
||||
}
|
||||
/* check wafl pcs error */
|
||||
for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_vg20); i++) {
|
||||
data = RREG32_PCIE(wafl_pcs_err_status_reg_vg20[i]);
|
||||
if (data)
|
||||
amdgpu_xgmi_query_pcs_error_status(adev,
|
||||
data, &ue_cnt, &ce_cnt, false);
|
||||
amdgpu_xgmi_query_pcs_error_status(adev, data,
|
||||
mask_data, &ue_cnt, &ce_cnt, false, false);
|
||||
}
|
||||
break;
|
||||
case CHIP_ALDEBARAN:
|
||||
/* check xgmi3x16 pcs error */
|
||||
for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_aldebaran); i++) {
|
||||
data = RREG32_PCIE(xgmi3x16_pcs_err_status_reg_aldebaran[i]);
|
||||
mask_data =
|
||||
RREG32_PCIE(xgmi3x16_pcs_err_noncorrectable_mask_reg_aldebaran[i]);
|
||||
if (data)
|
||||
amdgpu_xgmi_query_pcs_error_status(adev,
|
||||
data, &ue_cnt, &ce_cnt, true);
|
||||
amdgpu_xgmi_query_pcs_error_status(adev, data,
|
||||
mask_data, &ue_cnt, &ce_cnt, true, true);
|
||||
}
|
||||
/* check wafl pcs error */
|
||||
for (i = 0; i < ARRAY_SIZE(walf_pcs_err_status_reg_aldebaran); i++) {
|
||||
data = RREG32_PCIE(walf_pcs_err_status_reg_aldebaran[i]);
|
||||
mask_data =
|
||||
RREG32_PCIE(walf_pcs_err_noncorrectable_mask_reg_aldebaran[i]);
|
||||
if (data)
|
||||
amdgpu_xgmi_query_pcs_error_status(adev,
|
||||
data, &ue_cnt, &ce_cnt, false);
|
||||
amdgpu_xgmi_query_pcs_error_status(adev, data,
|
||||
mask_data, &ue_cnt, &ce_cnt, false, true);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
#include "clearstate_gfx11.h"
|
||||
#include "v11_structs.h"
|
||||
#include "gfx_v11_0.h"
|
||||
#include "gfx_v11_0_3.h"
|
||||
#include "nbio_v4_3.h"
|
||||
#include "mes_v11_0.h"
|
||||
|
||||
|
@ -815,7 +816,14 @@ static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
|
|||
switch (adev->ip_versions[GC_HWIP][0]) {
|
||||
case IP_VERSION(11, 0, 0):
|
||||
case IP_VERSION(11, 0, 2):
|
||||
adev->gfx.config.max_hw_contexts = 8;
|
||||
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
|
||||
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
|
||||
adev->gfx.config.sc_hiz_tile_fifo_size = 0;
|
||||
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
|
||||
break;
|
||||
case IP_VERSION(11, 0, 3):
|
||||
adev->gfx.ras = &gfx_v11_0_3_ras;
|
||||
adev->gfx.config.max_hw_contexts = 8;
|
||||
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
|
||||
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
|
||||
|
@ -1251,10 +1259,8 @@ static int gfx_v11_0_sw_init(void *handle)
|
|||
|
||||
switch (adev->ip_versions[GC_HWIP][0]) {
|
||||
case IP_VERSION(11, 0, 0):
|
||||
case IP_VERSION(11, 0, 1):
|
||||
case IP_VERSION(11, 0, 2):
|
||||
case IP_VERSION(11, 0, 3):
|
||||
case IP_VERSION(11, 0, 4):
|
||||
adev->gfx.me.num_me = 1;
|
||||
adev->gfx.me.num_pipe_per_me = 1;
|
||||
adev->gfx.me.num_queue_per_pipe = 1;
|
||||
|
@ -1262,6 +1268,15 @@ static int gfx_v11_0_sw_init(void *handle)
|
|||
adev->gfx.mec.num_pipe_per_mec = 4;
|
||||
adev->gfx.mec.num_queue_per_pipe = 4;
|
||||
break;
|
||||
case IP_VERSION(11, 0, 1):
|
||||
case IP_VERSION(11, 0, 4):
|
||||
adev->gfx.me.num_me = 1;
|
||||
adev->gfx.me.num_pipe_per_me = 1;
|
||||
adev->gfx.me.num_queue_per_pipe = 1;
|
||||
adev->gfx.mec.num_mec = 1;
|
||||
adev->gfx.mec.num_pipe_per_mec = 4;
|
||||
adev->gfx.mec.num_queue_per_pipe = 4;
|
||||
break;
|
||||
default:
|
||||
adev->gfx.me.num_me = 1;
|
||||
adev->gfx.me.num_pipe_per_me = 1;
|
||||
|
@ -1293,6 +1308,20 @@ static int gfx_v11_0_sw_init(void *handle)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
/* ECC error */
|
||||
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
|
||||
GFX_11_0_0__SRCID__CP_ECC_ERROR,
|
||||
&adev->gfx.cp_ecc_error_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* FED error */
|
||||
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
|
||||
GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT,
|
||||
&adev->gfx.rlc_gc_fed_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
|
||||
|
||||
if (adev->gfx.imu.funcs) {
|
||||
|
@ -1380,6 +1409,11 @@ static int gfx_v11_0_sw_init(void *handle)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
if (amdgpu_gfx_ras_sw_init(adev)) {
|
||||
dev_err(adev->dev, "Failed to initialize gfx ras block!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4372,6 +4406,7 @@ static int gfx_v11_0_hw_fini(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int r;
|
||||
|
||||
amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
|
||||
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
|
||||
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
|
||||
|
||||
|
@ -5803,6 +5838,36 @@ static void gfx_v11_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev
|
|||
}
|
||||
}
|
||||
|
||||
#define CP_ME1_PIPE_INST_ADDR_INTERVAL 0x1
|
||||
#define SET_ECC_ME_PIPE_STATE(reg_addr, state) \
|
||||
do { \
|
||||
uint32_t tmp = RREG32_SOC15_IP(GC, reg_addr); \
|
||||
tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, state); \
|
||||
WREG32_SOC15_IP(GC, reg_addr, tmp); \
|
||||
} while (0)
|
||||
|
||||
static int gfx_v11_0_set_cp_ecc_error_state(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
unsigned type,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
uint32_t ecc_irq_state = 0;
|
||||
uint32_t pipe0_int_cntl_addr = 0;
|
||||
int i = 0;
|
||||
|
||||
ecc_irq_state = (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0;
|
||||
|
||||
pipe0_int_cntl_addr = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
|
||||
|
||||
WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0, CP_ECC_ERROR_INT_ENABLE, ecc_irq_state);
|
||||
|
||||
for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++)
|
||||
SET_ECC_ME_PIPE_STATE(pipe0_int_cntl_addr + i * CP_ME1_PIPE_INST_ADDR_INTERVAL,
|
||||
ecc_irq_state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gfx_v11_0_set_eop_interrupt_state(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *src,
|
||||
unsigned type,
|
||||
|
@ -5979,6 +6044,16 @@ static int gfx_v11_0_priv_inst_irq(struct amdgpu_device *adev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int gfx_v11_0_rlc_gc_fed_irq(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
if (adev->gfx.ras && adev->gfx.ras->rlc_gc_fed_irq)
|
||||
return adev->gfx.ras->rlc_gc_fed_irq(adev, source, entry);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if 0
|
||||
static int gfx_v11_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *src,
|
||||
|
@ -6209,6 +6284,15 @@ static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = {
|
|||
.process = gfx_v11_0_priv_inst_irq,
|
||||
};
|
||||
|
||||
static const struct amdgpu_irq_src_funcs gfx_v11_0_cp_ecc_error_irq_funcs = {
|
||||
.set = gfx_v11_0_set_cp_ecc_error_state,
|
||||
.process = amdgpu_gfx_cp_ecc_error_irq,
|
||||
};
|
||||
|
||||
static const struct amdgpu_irq_src_funcs gfx_v11_0_rlc_gc_fed_irq_funcs = {
|
||||
.process = gfx_v11_0_rlc_gc_fed_irq,
|
||||
};
|
||||
|
||||
static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
|
||||
|
@ -6219,6 +6303,13 @@ static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev)
|
|||
|
||||
adev->gfx.priv_inst_irq.num_types = 1;
|
||||
adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs;
|
||||
|
||||
adev->gfx.cp_ecc_error_irq.num_types = 1; /* CP ECC error */
|
||||
adev->gfx.cp_ecc_error_irq.funcs = &gfx_v11_0_cp_ecc_error_irq_funcs;
|
||||
|
||||
adev->gfx.rlc_gc_fed_irq.num_types = 1; /* 0x80 FED error */
|
||||
adev->gfx.rlc_gc_fed_irq.funcs = &gfx_v11_0_rlc_gc_fed_irq_funcs;
|
||||
|
||||
}
|
||||
|
||||
static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev)
|
||||
|
|
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
* Copyright 2023 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "soc21.h"
|
||||
#include "gc/gc_11_0_3_offset.h"
|
||||
#include "gc/gc_11_0_3_sh_mask.h"
|
||||
#include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
|
||||
#include "soc15.h"
|
||||
#include "soc15d.h"
|
||||
#include "gfx_v11_0.h"
|
||||
|
||||
|
||||
static int gfx_v11_0_3_rlc_gc_fed_irq(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
uint32_t rlc_status0 = 0, rlc_status1 = 0;
|
||||
struct ras_common_if *ras_if = NULL;
|
||||
struct ras_dispatch_if ih_data = {
|
||||
.entry = entry,
|
||||
};
|
||||
|
||||
rlc_status0 = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_RLCS_FED_STATUS_0));
|
||||
rlc_status1 = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_RLCS_FED_STATUS_1));
|
||||
|
||||
if (!rlc_status0 && !rlc_status1) {
|
||||
dev_warn(adev->dev, "RLC_GC_FED irq is generated, but rlc_status0 and rlc_status1 are empty!\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Use RLC_RLCS_FED_STATUS_0/1 to distinguish FED error block. */
|
||||
if (REG_GET_FIELD(rlc_status0, RLC_RLCS_FED_STATUS_0, SDMA0_FED_ERR) ||
|
||||
REG_GET_FIELD(rlc_status0, RLC_RLCS_FED_STATUS_0, SDMA1_FED_ERR))
|
||||
ras_if = adev->sdma.ras_if;
|
||||
else
|
||||
ras_if = adev->gfx.ras_if;
|
||||
|
||||
if (!ras_if) {
|
||||
dev_err(adev->dev, "Gfx or sdma ras block not initialized, rlc_status0:0x%x.\n",
|
||||
rlc_status0);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ih_data.head = *ras_if;
|
||||
|
||||
dev_warn(adev->dev, "RLC %s FED IRQ\n", ras_if->name);
|
||||
amdgpu_ras_interrupt_dispatch(adev, &ih_data);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gfx_v11_0_3_poison_consumption_handler(struct amdgpu_device *adev,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
/* Workaround: when vmid and pasid are both zero, trigger gpu reset in KGD. */
|
||||
if (entry && (entry->client_id == SOC21_IH_CLIENTID_GFX) &&
|
||||
(entry->src_id == GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT) &&
|
||||
!entry->vmid && !entry->pasid)
|
||||
amdgpu_ras_reset_gpu(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct amdgpu_gfx_ras gfx_v11_0_3_ras = {
|
||||
.rlc_gc_fed_irq = gfx_v11_0_3_rlc_gc_fed_irq,
|
||||
.poison_consumption_handler = gfx_v11_0_3_poison_consumption_handler,
|
||||
};
|
|
@ -0,0 +1,29 @@
|
|||
/*
|
||||
* Copyright 2023 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __GFX_V11_0_3_H__
|
||||
#define __GFX_V11_0_3_H__
|
||||
|
||||
extern struct amdgpu_gfx_ras gfx_v11_0_3_ras;
|
||||
|
||||
#endif
|
|
@ -1003,7 +1003,7 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
|
|||
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
|
||||
if (err == -ENODEV) {
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
|
||||
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
|
||||
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
|
||||
}
|
||||
} else {
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
|
||||
|
|
|
@ -1345,7 +1345,7 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
|
|||
|
||||
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
|
||||
if (err)
|
||||
return err;
|
||||
goto out;
|
||||
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
|
||||
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
|
||||
|
||||
|
@ -1355,13 +1355,14 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
|
|||
else
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
|
||||
|
||||
/* ignore failures to load */
|
||||
err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
|
||||
if (!err) {
|
||||
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
|
||||
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT);
|
||||
} else {
|
||||
err = 0;
|
||||
adev->gfx.mec2_fw = NULL;
|
||||
amdgpu_ucode_release(&adev->gfx.mec2_fw);
|
||||
}
|
||||
} else {
|
||||
adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
|
||||
|
@ -1370,10 +1371,10 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
|
|||
|
||||
gfx_v9_0_check_if_need_gfxoff(adev);
|
||||
gfx_v9_0_check_fw_write_wait(adev);
|
||||
if (err) {
|
||||
|
||||
out:
|
||||
if (err)
|
||||
amdgpu_ucode_release(&adev->gfx.mec_fw);
|
||||
amdgpu_ucode_release(&adev->gfx.mec2_fw);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1935,27 +1936,6 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
|
|||
break;
|
||||
}
|
||||
|
||||
if (adev->gfx.ras) {
|
||||
err = amdgpu_ras_register_ras_block(adev, &adev->gfx.ras->ras_block);
|
||||
if (err) {
|
||||
DRM_ERROR("Failed to register gfx ras block!\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
strcpy(adev->gfx.ras->ras_block.ras_comm.name, "gfx");
|
||||
adev->gfx.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__GFX;
|
||||
adev->gfx.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
|
||||
adev->gfx.ras_if = &adev->gfx.ras->ras_block.ras_comm;
|
||||
|
||||
/* If not define special ras_late_init function, use gfx default ras_late_init */
|
||||
if (!adev->gfx.ras->ras_block.ras_late_init)
|
||||
adev->gfx.ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init;
|
||||
|
||||
/* If not defined special ras_cb function, use default ras_cb */
|
||||
if (!adev->gfx.ras->ras_block.ras_cb)
|
||||
adev->gfx.ras->ras_block.ras_cb = amdgpu_gfx_process_ras_data_cb;
|
||||
}
|
||||
|
||||
adev->gfx.config.gb_addr_config = gb_addr_config;
|
||||
|
||||
adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
|
||||
|
@ -2197,6 +2177,11 @@ static int gfx_v9_0_sw_init(void *handle)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
if (amdgpu_gfx_ras_sw_init(adev)) {
|
||||
dev_err(adev->dev, "Failed to initialize gfx ras block!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -169,23 +169,23 @@ static void mmhub_v3_0_init_system_aperture_regs(struct amdgpu_device *adev)
|
|||
uint64_t value;
|
||||
uint32_t tmp;
|
||||
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
/*
|
||||
* the new L1 policy will block SRIOV guest from writing
|
||||
* these regs, and they will be programed at host.
|
||||
* so skip programing these regs.
|
||||
*/
|
||||
/* Disable AGP. */
|
||||
WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BASE, 0);
|
||||
WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_TOP, 0);
|
||||
WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BOT, 0x00FFFFFF);
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return;
|
||||
|
||||
/* Program the system aperture low logical page number. */
|
||||
WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
|
||||
adev->gmc.vram_start >> 18);
|
||||
WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
|
||||
adev->gmc.vram_end >> 18);
|
||||
}
|
||||
/*
|
||||
* the new L1 policy will block SRIOV guest from writing
|
||||
* these regs, and they will be programed at host.
|
||||
* so skip programing these regs.
|
||||
*/
|
||||
/* Disable AGP. */
|
||||
WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BASE, 0);
|
||||
WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_TOP, 0);
|
||||
WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BOT, 0x00FFFFFF);
|
||||
/* Program the system aperture low logical page number. */
|
||||
WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
|
||||
adev->gmc.vram_start >> 18);
|
||||
WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
|
||||
adev->gmc.vram_end >> 18);
|
||||
|
||||
/* Set default page address. */
|
||||
value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start +
|
||||
|
|
|
@ -98,7 +98,7 @@ static const struct amdgpu_video_codecs nv_video_codecs_decode =
|
|||
};
|
||||
|
||||
/* Sienna Cichlid */
|
||||
static const struct amdgpu_video_codec_info sc_video_codecs_decode_array[] =
|
||||
static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[] =
|
||||
{
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
|
||||
|
@ -110,10 +110,27 @@ static const struct amdgpu_video_codec_info sc_video_codecs_decode_array[] =
|
|||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
|
||||
};
|
||||
|
||||
static const struct amdgpu_video_codecs sc_video_codecs_decode =
|
||||
static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn1[] =
|
||||
{
|
||||
.codec_count = ARRAY_SIZE(sc_video_codecs_decode_array),
|
||||
.codec_array = sc_video_codecs_decode_array,
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
|
||||
};
|
||||
|
||||
static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn0 =
|
||||
{
|
||||
.codec_count = ARRAY_SIZE(sc_video_codecs_decode_array_vcn0),
|
||||
.codec_array = sc_video_codecs_decode_array_vcn0,
|
||||
};
|
||||
|
||||
static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn1 =
|
||||
{
|
||||
.codec_count = ARRAY_SIZE(sc_video_codecs_decode_array_vcn1),
|
||||
.codec_array = sc_video_codecs_decode_array_vcn1,
|
||||
};
|
||||
|
||||
/* SRIOV Sienna Cichlid, not const since data is controlled by host */
|
||||
|
@ -123,7 +140,7 @@ static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] =
|
|||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
|
||||
};
|
||||
|
||||
static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array[] =
|
||||
static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] =
|
||||
{
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
|
||||
|
@ -135,16 +152,33 @@ static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array[] =
|
|||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
|
||||
};
|
||||
|
||||
static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn1[] =
|
||||
{
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
|
||||
};
|
||||
|
||||
static struct amdgpu_video_codecs sriov_sc_video_codecs_encode =
|
||||
{
|
||||
.codec_count = ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
|
||||
.codec_array = sriov_sc_video_codecs_encode_array,
|
||||
};
|
||||
|
||||
static struct amdgpu_video_codecs sriov_sc_video_codecs_decode =
|
||||
static struct amdgpu_video_codecs sriov_sc_video_codecs_decode_vcn0 =
|
||||
{
|
||||
.codec_count = ARRAY_SIZE(sriov_sc_video_codecs_decode_array),
|
||||
.codec_array = sriov_sc_video_codecs_decode_array,
|
||||
.codec_count = ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn0),
|
||||
.codec_array = sriov_sc_video_codecs_decode_array_vcn0,
|
||||
};
|
||||
|
||||
static struct amdgpu_video_codecs sriov_sc_video_codecs_decode_vcn1 =
|
||||
{
|
||||
.codec_count = ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn1),
|
||||
.codec_array = sriov_sc_video_codecs_decode_array_vcn1,
|
||||
};
|
||||
|
||||
/* Beige Goby*/
|
||||
|
@ -181,20 +215,37 @@ static const struct amdgpu_video_codecs yc_video_codecs_decode = {
|
|||
static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
|
||||
const struct amdgpu_video_codecs **codecs)
|
||||
{
|
||||
if (adev->vcn.num_vcn_inst == hweight8(adev->vcn.harvest_config))
|
||||
return -EINVAL;
|
||||
|
||||
switch (adev->ip_versions[UVD_HWIP][0]) {
|
||||
case IP_VERSION(3, 0, 0):
|
||||
case IP_VERSION(3, 0, 64):
|
||||
case IP_VERSION(3, 0, 192):
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
if (encode)
|
||||
*codecs = &sriov_sc_video_codecs_encode;
|
||||
else
|
||||
*codecs = &sriov_sc_video_codecs_decode;
|
||||
if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) {
|
||||
if (encode)
|
||||
*codecs = &sriov_sc_video_codecs_encode;
|
||||
else
|
||||
*codecs = &sriov_sc_video_codecs_decode_vcn1;
|
||||
} else {
|
||||
if (encode)
|
||||
*codecs = &sriov_sc_video_codecs_encode;
|
||||
else
|
||||
*codecs = &sriov_sc_video_codecs_decode_vcn0;
|
||||
}
|
||||
} else {
|
||||
if (encode)
|
||||
*codecs = &nv_video_codecs_encode;
|
||||
else
|
||||
*codecs = &sc_video_codecs_decode;
|
||||
if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) {
|
||||
if (encode)
|
||||
*codecs = &nv_video_codecs_encode;
|
||||
else
|
||||
*codecs = &sc_video_codecs_decode_vcn1;
|
||||
} else {
|
||||
if (encode)
|
||||
*codecs = &nv_video_codecs_encode;
|
||||
else
|
||||
*codecs = &sc_video_codecs_decode_vcn0;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
case IP_VERSION(3, 0, 16):
|
||||
|
@ -202,7 +253,7 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
|
|||
if (encode)
|
||||
*codecs = &nv_video_codecs_encode;
|
||||
else
|
||||
*codecs = &sc_video_codecs_decode;
|
||||
*codecs = &sc_video_codecs_decode_vcn0;
|
||||
return 0;
|
||||
case IP_VERSION(3, 1, 1):
|
||||
case IP_VERSION(3, 1, 2):
|
||||
|
@ -993,9 +1044,19 @@ static int nv_common_late_init(void *handle)
|
|||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
xgpu_nv_mailbox_get_irq(adev);
|
||||
amdgpu_virt_update_sriov_video_codec(adev,
|
||||
sriov_sc_video_codecs_encode_array, ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
|
||||
sriov_sc_video_codecs_decode_array, ARRAY_SIZE(sriov_sc_video_codecs_decode_array));
|
||||
if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) {
|
||||
amdgpu_virt_update_sriov_video_codec(adev,
|
||||
sriov_sc_video_codecs_encode_array,
|
||||
ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
|
||||
sriov_sc_video_codecs_decode_array_vcn1,
|
||||
ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn1));
|
||||
} else {
|
||||
amdgpu_virt_update_sriov_video_codec(adev,
|
||||
sriov_sc_video_codecs_encode_array,
|
||||
ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
|
||||
sriov_sc_video_codecs_decode_array_vcn1,
|
||||
ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn1));
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1851,6 +1851,11 @@ static int sdma_v4_0_sw_init(void *handle)
|
|||
}
|
||||
}
|
||||
|
||||
if (amdgpu_sdma_ras_sw_init(adev)) {
|
||||
dev_err(adev->dev, "Failed to initialize sdma ras block!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -2688,22 +2693,6 @@ static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev)
|
|||
break;
|
||||
}
|
||||
|
||||
if (adev->sdma.ras) {
|
||||
amdgpu_ras_register_ras_block(adev, &adev->sdma.ras->ras_block);
|
||||
|
||||
strcpy(adev->sdma.ras->ras_block.ras_comm.name, "sdma");
|
||||
adev->sdma.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__SDMA;
|
||||
adev->sdma.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
|
||||
adev->sdma.ras_if = &adev->sdma.ras->ras_block.ras_comm;
|
||||
|
||||
/* If don't define special ras_late_init function, use default ras_late_init */
|
||||
if (!adev->sdma.ras->ras_block.ras_late_init)
|
||||
adev->sdma.ras->ras_block.ras_late_init = amdgpu_sdma_ras_late_init;
|
||||
|
||||
/* If not defined special ras_cb function, use default ras_cb */
|
||||
if (!adev->sdma.ras->ras_block.ras_cb)
|
||||
adev->sdma.ras->ras_block.ras_cb = amdgpu_sdma_process_ras_data_cb;
|
||||
}
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
|
||||
|
|
|
@ -1211,6 +1211,24 @@ static void sdma_v6_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
|
|||
amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
|
||||
}
|
||||
|
||||
static struct amdgpu_sdma_ras sdma_v6_0_3_ras = {
|
||||
.ras_block = {
|
||||
.ras_late_init = amdgpu_ras_block_late_init,
|
||||
},
|
||||
};
|
||||
|
||||
static void sdma_v6_0_set_ras_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
switch (adev->ip_versions[SDMA0_HWIP][0]) {
|
||||
case IP_VERSION(6, 0, 3):
|
||||
adev->sdma.ras = &sdma_v6_0_3_ras;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static int sdma_v6_0_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
@ -1220,6 +1238,7 @@ static int sdma_v6_0_early_init(void *handle)
|
|||
sdma_v6_0_set_vm_pte_funcs(adev);
|
||||
sdma_v6_0_set_irq_funcs(adev);
|
||||
sdma_v6_0_set_mqd_funcs(adev);
|
||||
sdma_v6_0_set_ras_funcs(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1264,6 +1283,11 @@ static int sdma_v6_0_sw_init(void *handle)
|
|||
return r;
|
||||
}
|
||||
|
||||
if (amdgpu_sdma_ras_sw_init(adev)) {
|
||||
dev_err(adev->dev, "Failed to initialize sdma ras block!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -1403,10 +1427,12 @@ static int sdma_v6_0_set_trap_irq_state(struct amdgpu_device *adev,
|
|||
|
||||
u32 reg_offset = sdma_v6_0_get_reg_offset(adev, type, regSDMA0_CNTL);
|
||||
|
||||
sdma_cntl = RREG32(reg_offset);
|
||||
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
|
||||
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
|
||||
WREG32(reg_offset, sdma_cntl);
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
sdma_cntl = RREG32(reg_offset);
|
||||
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
|
||||
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
|
||||
WREG32(reg_offset, sdma_cntl);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -48,19 +48,31 @@
|
|||
static const struct amd_ip_funcs soc21_common_ip_funcs;
|
||||
|
||||
/* SOC21 */
|
||||
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array[] =
|
||||
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] =
|
||||
{
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
|
||||
};
|
||||
|
||||
static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode =
|
||||
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] =
|
||||
{
|
||||
.codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array),
|
||||
.codec_array = vcn_4_0_0_video_codecs_encode_array,
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
|
||||
};
|
||||
|
||||
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array[] =
|
||||
static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn0 =
|
||||
{
|
||||
.codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array_vcn0),
|
||||
.codec_array = vcn_4_0_0_video_codecs_encode_array_vcn0,
|
||||
};
|
||||
|
||||
static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn1 =
|
||||
{
|
||||
.codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array_vcn1),
|
||||
.codec_array = vcn_4_0_0_video_codecs_encode_array_vcn1,
|
||||
};
|
||||
|
||||
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn0[] =
|
||||
{
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
|
||||
|
@ -69,23 +81,46 @@ static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array[
|
|||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
|
||||
};
|
||||
|
||||
static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode =
|
||||
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn1[] =
|
||||
{
|
||||
.codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array),
|
||||
.codec_array = vcn_4_0_0_video_codecs_decode_array,
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
|
||||
};
|
||||
|
||||
static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn0 =
|
||||
{
|
||||
.codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array_vcn0),
|
||||
.codec_array = vcn_4_0_0_video_codecs_decode_array_vcn0,
|
||||
};
|
||||
|
||||
static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn1 =
|
||||
{
|
||||
.codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array_vcn1),
|
||||
.codec_array = vcn_4_0_0_video_codecs_decode_array_vcn1,
|
||||
};
|
||||
|
||||
static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode,
|
||||
const struct amdgpu_video_codecs **codecs)
|
||||
{
|
||||
switch (adev->ip_versions[UVD_HWIP][0]) {
|
||||
if (adev->vcn.num_vcn_inst == hweight8(adev->vcn.harvest_config))
|
||||
return -EINVAL;
|
||||
|
||||
switch (adev->ip_versions[UVD_HWIP][0]) {
|
||||
case IP_VERSION(4, 0, 0):
|
||||
case IP_VERSION(4, 0, 2):
|
||||
if (encode)
|
||||
*codecs = &vcn_4_0_0_video_codecs_encode;
|
||||
else
|
||||
*codecs = &vcn_4_0_0_video_codecs_decode;
|
||||
if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) {
|
||||
if (encode)
|
||||
*codecs = &vcn_4_0_0_video_codecs_encode_vcn1;
|
||||
else
|
||||
*codecs = &vcn_4_0_0_video_codecs_decode_vcn1;
|
||||
} else {
|
||||
if (encode)
|
||||
*codecs = &vcn_4_0_0_video_codecs_encode_vcn0;
|
||||
else
|
||||
*codecs = &vcn_4_0_0_video_codecs_decode_vcn0;
|
||||
}
|
||||
return 0;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
|
|
@ -57,13 +57,6 @@ static inline uint32_t get_umc_v6_7_reg_offset(struct amdgpu_device *adev,
|
|||
return adev->umc.channel_offs * ch_inst + UMC_V6_7_INST_DIST * umc_inst;
|
||||
}
|
||||
|
||||
static inline uint32_t get_umc_v6_7_channel_index(struct amdgpu_device *adev,
|
||||
uint32_t umc_inst,
|
||||
uint32_t ch_inst)
|
||||
{
|
||||
return adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
|
||||
}
|
||||
|
||||
static void umc_v6_7_query_error_status_helper(struct amdgpu_device *adev,
|
||||
uint64_t mc_umc_status, uint32_t umc_reg_offset)
|
||||
{
|
||||
|
|
|
@ -1771,6 +1771,10 @@ static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p,
|
|||
if (atomic_read(&job->base.entity->fence_seq))
|
||||
return -EINVAL;
|
||||
|
||||
/* if VCN0 is harvested, we can't support AV1 */
|
||||
if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)
|
||||
return -EINVAL;
|
||||
|
||||
scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC]
|
||||
[AMDGPU_RING_PRIO_DEFAULT].sched;
|
||||
drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
|
||||
|
|
|
@ -1632,6 +1632,10 @@ static int vcn_v4_0_limit_sched(struct amdgpu_cs_parser *p,
|
|||
if (atomic_read(&job->base.entity->fence_seq))
|
||||
return -EINVAL;
|
||||
|
||||
/* if VCN0 is harvested, we can't support AV1 */
|
||||
if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)
|
||||
return -EINVAL;
|
||||
|
||||
scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_ENC]
|
||||
[AMDGPU_RING_PRIO_0].sched;
|
||||
drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
|
||||
|
|
|
@ -59,30 +59,27 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
|
|||
|
||||
/* check if sh_mem_config register already configured */
|
||||
if (qpd->sh_mem_config == 0) {
|
||||
qpd->sh_mem_config =
|
||||
SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
|
||||
qpd->sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
|
||||
SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
|
||||
|
||||
if (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 2)) {
|
||||
/* Aldebaran can safely support different XNACK modes
|
||||
* per process
|
||||
*/
|
||||
if (!pdd->process->xnack_enabled)
|
||||
qpd->sh_mem_config |=
|
||||
1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
|
||||
} else if (dqm->dev->noretry &&
|
||||
!dqm->dev->use_iommu_v2) {
|
||||
qpd->sh_mem_config |=
|
||||
1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
|
||||
}
|
||||
if (dqm->dev->noretry && !dqm->dev->use_iommu_v2)
|
||||
qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
|
||||
|
||||
qpd->sh_mem_ape1_limit = 0;
|
||||
qpd->sh_mem_ape1_base = 0;
|
||||
}
|
||||
|
||||
if (KFD_SUPPORT_XNACK_PER_PROCESS(dqm->dev)) {
|
||||
if (!pdd->process->xnack_enabled)
|
||||
qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
|
||||
else
|
||||
qpd->sh_mem_config &= ~(1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT);
|
||||
}
|
||||
|
||||
qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd);
|
||||
|
||||
pr_debug("sh_mem_bases 0x%X\n", qpd->sh_mem_bases);
|
||||
pr_debug("sh_mem_bases 0x%X sh_mem_config 0x%X\n", qpd->sh_mem_bases,
|
||||
qpd->sh_mem_config);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -206,6 +206,8 @@ enum cache_policy {
|
|||
|
||||
#define KFD_GC_VERSION(dev) ((dev)->adev->ip_versions[GC_HWIP][0])
|
||||
#define KFD_IS_SOC15(dev) ((KFD_GC_VERSION(dev)) >= (IP_VERSION(9, 0, 1)))
|
||||
#define KFD_SUPPORT_XNACK_PER_PROCESS(dev)\
|
||||
(KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2))
|
||||
|
||||
struct kfd_event_interrupt_class {
|
||||
bool (*interrupt_isr)(struct kfd_dev *dev,
|
||||
|
|
|
@ -1330,7 +1330,7 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
|
|||
* per-process XNACK mode selection. But let the dev->noretry
|
||||
* setting still influence the default XNACK mode.
|
||||
*/
|
||||
if (supported && KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2))
|
||||
if (supported && KFD_SUPPORT_XNACK_PER_PROCESS(dev))
|
||||
continue;
|
||||
|
||||
/* GFXv10 and later GPUs do not support shader preemption
|
||||
|
|
|
@ -66,7 +66,6 @@
|
|||
|
||||
#include "ivsrcid/ivsrcid_vislands30.h"
|
||||
|
||||
#include "i2caux_interface.h"
|
||||
#include <linux/backlight.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleparam.h>
|
||||
|
@ -1504,8 +1503,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
|||
case IP_VERSION(3, 0, 1):
|
||||
case IP_VERSION(3, 1, 2):
|
||||
case IP_VERSION(3, 1, 3):
|
||||
case IP_VERSION(3, 1, 4):
|
||||
case IP_VERSION(3, 1, 5):
|
||||
case IP_VERSION(3, 1, 6):
|
||||
init_data.flags.gpu_vm_support = true;
|
||||
break;
|
||||
|
@ -1734,15 +1731,11 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
|
|||
adev->dm.vblank_control_workqueue = NULL;
|
||||
}
|
||||
|
||||
for (i = 0; i < adev->dm.display_indexes_num; i++) {
|
||||
drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
|
||||
}
|
||||
|
||||
amdgpu_dm_destroy_drm_device(&adev->dm);
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
|
||||
if (adev->dm.secure_display_ctxs) {
|
||||
for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
|
||||
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
||||
if (adev->dm.secure_display_ctxs[i].crtc) {
|
||||
flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work);
|
||||
flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work);
|
||||
|
@ -1949,10 +1942,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
|
|||
dmub_asic = DMUB_ASIC_DCN21;
|
||||
break;
|
||||
case IP_VERSION(3, 0, 0):
|
||||
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
|
||||
dmub_asic = DMUB_ASIC_DCN30;
|
||||
else
|
||||
dmub_asic = DMUB_ASIC_DCN30;
|
||||
dmub_asic = DMUB_ASIC_DCN30;
|
||||
break;
|
||||
case IP_VERSION(3, 0, 1):
|
||||
dmub_asic = DMUB_ASIC_DCN301;
|
||||
|
@ -5342,8 +5332,6 @@ static void fill_stream_properties_from_drm_display_mode(
|
|||
|
||||
timing_out->aspect_ratio = get_aspect_ratio(mode_in);
|
||||
|
||||
stream->output_color_space = get_output_color_space(timing_out);
|
||||
|
||||
stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
|
||||
stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
|
||||
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
|
||||
|
@ -5354,6 +5342,8 @@ static void fill_stream_properties_from_drm_display_mode(
|
|||
adjust_colour_depth_from_display_info(timing_out, info);
|
||||
}
|
||||
}
|
||||
|
||||
stream->output_color_space = get_output_color_space(timing_out);
|
||||
}
|
||||
|
||||
static void fill_audio_info(struct audio_info *audio_info,
|
||||
|
@ -9685,8 +9675,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
if (dm_old_con_state->abm_level !=
|
||||
dm_new_con_state->abm_level)
|
||||
if (dm_old_con_state->abm_level != dm_new_con_state->abm_level ||
|
||||
dm_old_con_state->scaling != dm_new_con_state->scaling)
|
||||
new_crtc_state->connectors_changed = true;
|
||||
}
|
||||
|
||||
|
|
|
@ -223,7 +223,7 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
|
|||
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
|
||||
/* Disable secure_display if it was enabled */
|
||||
if (!enable) {
|
||||
for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
|
||||
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
||||
if (adev->dm.secure_display_ctxs[i].crtc == crtc) {
|
||||
/* stop ROI update on this crtc */
|
||||
flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work);
|
||||
|
@ -544,12 +544,14 @@ amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev)
|
|||
struct secure_display_context *secure_display_ctxs = NULL;
|
||||
int i;
|
||||
|
||||
secure_display_ctxs = kcalloc(AMDGPU_MAX_CRTCS, sizeof(struct secure_display_context), GFP_KERNEL);
|
||||
secure_display_ctxs = kcalloc(adev->mode_info.num_crtc,
|
||||
sizeof(struct secure_display_context),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!secure_display_ctxs)
|
||||
return NULL;
|
||||
|
||||
for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
|
||||
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
||||
INIT_WORK(&secure_display_ctxs[i].forward_roi_work, amdgpu_dm_forward_crc_window);
|
||||
INIT_WORK(&secure_display_ctxs[i].notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read);
|
||||
secure_display_ctxs[i].crtc = &adev->mode_info.crtcs[i]->base;
|
||||
|
|
|
@ -105,8 +105,7 @@ static void vblank_control_worker(struct work_struct *work)
|
|||
else if (dm->active_vblank_irq_count)
|
||||
dm->active_vblank_irq_count--;
|
||||
|
||||
dc_allow_idle_optimizations(
|
||||
dm->dc, dm->active_vblank_irq_count == 0 ? true : false);
|
||||
dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
|
||||
|
||||
DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
|
||||
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include "resource.h"
|
||||
#include "dsc.h"
|
||||
#include "dc_link_dp.h"
|
||||
#include "dc_link.h"
|
||||
#include "link_hwss.h"
|
||||
#include "dc/dc_dmub_srv.h"
|
||||
|
||||
|
@ -3395,7 +3396,7 @@ static int trigger_hpd_mst_set(void *data, u64 val)
|
|||
continue;
|
||||
|
||||
link = aconnector->dc_link;
|
||||
dp_receiver_power_ctrl(link, false);
|
||||
dc_link_dp_receiver_power_ctrl(link, false);
|
||||
drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_port->mst_mgr, false);
|
||||
link->mst_stream_alloc_table.stream_count = 0;
|
||||
memset(link->mst_stream_alloc_table.stream_allocations, 0,
|
||||
|
|
|
@ -170,9 +170,10 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
|
|||
struct mod_hdcp_display *display = &hdcp_work[link_index].display;
|
||||
struct mod_hdcp_link *link = &hdcp_work[link_index].link;
|
||||
struct mod_hdcp_display_query query;
|
||||
unsigned int conn_index = aconnector->base.index;
|
||||
|
||||
mutex_lock(&hdcp_w->mutex);
|
||||
hdcp_w->aconnector = aconnector;
|
||||
hdcp_w->aconnector[conn_index] = aconnector;
|
||||
|
||||
query.display = NULL;
|
||||
mod_hdcp_query_display(&hdcp_w->hdcp, aconnector->base.index, &query);
|
||||
|
@ -204,7 +205,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
|
|||
msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
|
||||
} else {
|
||||
display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION;
|
||||
hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
|
||||
hdcp_w->encryption_status[conn_index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
|
||||
cancel_delayed_work(&hdcp_w->property_validate_dwork);
|
||||
}
|
||||
|
||||
|
@ -223,9 +224,10 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
|
|||
{
|
||||
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
|
||||
struct drm_connector_state *conn_state = aconnector->base.state;
|
||||
unsigned int conn_index = aconnector->base.index;
|
||||
|
||||
mutex_lock(&hdcp_w->mutex);
|
||||
hdcp_w->aconnector = aconnector;
|
||||
hdcp_w->aconnector[conn_index] = aconnector;
|
||||
|
||||
/* the removal of display will invoke auth reset -> hdcp destroy and
|
||||
* we'd expect the Content Protection (CP) property changed back to
|
||||
|
@ -247,13 +249,18 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
|
|||
void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
|
||||
{
|
||||
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
|
||||
unsigned int conn_index;
|
||||
|
||||
mutex_lock(&hdcp_w->mutex);
|
||||
|
||||
mod_hdcp_reset_connection(&hdcp_w->hdcp, &hdcp_w->output);
|
||||
|
||||
cancel_delayed_work(&hdcp_w->property_validate_dwork);
|
||||
hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
|
||||
|
||||
for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) {
|
||||
hdcp_w->encryption_status[conn_index] =
|
||||
MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
|
||||
}
|
||||
|
||||
process_output(hdcp_w);
|
||||
|
||||
|
@ -290,49 +297,80 @@ static void event_callback(struct work_struct *work)
|
|||
|
||||
|
||||
}
|
||||
|
||||
static void event_property_update(struct work_struct *work)
|
||||
{
|
||||
|
||||
struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, property_update_work);
|
||||
struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector;
|
||||
struct drm_device *dev = hdcp_work->aconnector->base.dev;
|
||||
struct amdgpu_dm_connector *aconnector = NULL;
|
||||
struct drm_device *dev;
|
||||
long ret;
|
||||
unsigned int conn_index;
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_state *conn_state;
|
||||
|
||||
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
|
||||
mutex_lock(&hdcp_work->mutex);
|
||||
for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) {
|
||||
aconnector = hdcp_work->aconnector[conn_index];
|
||||
|
||||
if (!aconnector)
|
||||
continue;
|
||||
|
||||
if (aconnector->base.state && aconnector->base.state->commit) {
|
||||
ret = wait_for_completion_interruptible_timeout(&aconnector->base.state->commit->hw_done, 10 * HZ);
|
||||
connector = &aconnector->base;
|
||||
|
||||
if (ret == 0) {
|
||||
DRM_ERROR("HDCP state unknown! Setting it to DESIRED");
|
||||
hdcp_work->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
|
||||
/* check if display connected */
|
||||
if (connector->status != connector_status_connected)
|
||||
continue;
|
||||
|
||||
conn_state = aconnector->base.state;
|
||||
|
||||
if (!conn_state)
|
||||
continue;
|
||||
|
||||
dev = connector->dev;
|
||||
|
||||
if (!dev)
|
||||
continue;
|
||||
|
||||
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
|
||||
mutex_lock(&hdcp_work->mutex);
|
||||
|
||||
if (conn_state->commit) {
|
||||
ret = wait_for_completion_interruptible_timeout(
|
||||
&conn_state->commit->hw_done, 10 * HZ);
|
||||
if (ret == 0) {
|
||||
DRM_ERROR(
|
||||
"HDCP state unknown! Setting it to DESIRED");
|
||||
hdcp_work->encryption_status[conn_index] =
|
||||
MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (aconnector->base.state) {
|
||||
if (hdcp_work->encryption_status != MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) {
|
||||
if (aconnector->base.state->hdcp_content_type ==
|
||||
if (hdcp_work->encryption_status[conn_index] !=
|
||||
MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) {
|
||||
if (conn_state->hdcp_content_type ==
|
||||
DRM_MODE_HDCP_CONTENT_TYPE0 &&
|
||||
hdcp_work->encryption_status <=
|
||||
MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON)
|
||||
drm_hdcp_update_content_protection(&aconnector->base,
|
||||
DRM_MODE_CONTENT_PROTECTION_ENABLED);
|
||||
else if (aconnector->base.state->hdcp_content_type ==
|
||||
DRM_MODE_HDCP_CONTENT_TYPE1 &&
|
||||
hdcp_work->encryption_status ==
|
||||
MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON)
|
||||
drm_hdcp_update_content_protection(&aconnector->base,
|
||||
DRM_MODE_CONTENT_PROTECTION_ENABLED);
|
||||
} else {
|
||||
drm_hdcp_update_content_protection(&aconnector->base,
|
||||
DRM_MODE_CONTENT_PROTECTION_DESIRED);
|
||||
}
|
||||
}
|
||||
hdcp_work->encryption_status[conn_index] <=
|
||||
MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON) {
|
||||
|
||||
mutex_unlock(&hdcp_work->mutex);
|
||||
drm_modeset_unlock(&dev->mode_config.connection_mutex);
|
||||
DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_ENABLED\n");
|
||||
drm_hdcp_update_content_protection(
|
||||
connector,
|
||||
DRM_MODE_CONTENT_PROTECTION_ENABLED);
|
||||
} else if (conn_state->hdcp_content_type ==
|
||||
DRM_MODE_HDCP_CONTENT_TYPE1 &&
|
||||
hdcp_work->encryption_status[conn_index] ==
|
||||
MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON) {
|
||||
drm_hdcp_update_content_protection(
|
||||
connector,
|
||||
DRM_MODE_CONTENT_PROTECTION_ENABLED);
|
||||
}
|
||||
} else {
|
||||
DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_DESIRED\n");
|
||||
drm_hdcp_update_content_protection(
|
||||
connector, DRM_MODE_CONTENT_PROTECTION_DESIRED);
|
||||
|
||||
}
|
||||
mutex_unlock(&hdcp_work->mutex);
|
||||
drm_modeset_unlock(&dev->mode_config.connection_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
static void event_property_validate(struct work_struct *work)
|
||||
|
@ -340,19 +378,47 @@ static void event_property_validate(struct work_struct *work)
|
|||
struct hdcp_workqueue *hdcp_work =
|
||||
container_of(to_delayed_work(work), struct hdcp_workqueue, property_validate_dwork);
|
||||
struct mod_hdcp_display_query query;
|
||||
struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector;
|
||||
|
||||
if (!aconnector)
|
||||
return;
|
||||
struct amdgpu_dm_connector *aconnector;
|
||||
unsigned int conn_index;
|
||||
|
||||
mutex_lock(&hdcp_work->mutex);
|
||||
|
||||
query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
|
||||
mod_hdcp_query_display(&hdcp_work->hdcp, aconnector->base.index, &query);
|
||||
for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX;
|
||||
conn_index++) {
|
||||
aconnector = hdcp_work->aconnector[conn_index];
|
||||
|
||||
if (query.encryption_status != hdcp_work->encryption_status) {
|
||||
hdcp_work->encryption_status = query.encryption_status;
|
||||
schedule_work(&hdcp_work->property_update_work);
|
||||
if (!aconnector)
|
||||
continue;
|
||||
|
||||
/* check if display connected */
|
||||
if (aconnector->base.status != connector_status_connected)
|
||||
continue;
|
||||
|
||||
if (!aconnector->base.state)
|
||||
continue;
|
||||
|
||||
query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
|
||||
mod_hdcp_query_display(&hdcp_work->hdcp, aconnector->base.index,
|
||||
&query);
|
||||
|
||||
DRM_DEBUG_DRIVER("[HDCP_DM] disp %d, connector->CP %u, (query, work): (%d, %d)\n",
|
||||
aconnector->base.index,
|
||||
aconnector->base.state->content_protection,
|
||||
query.encryption_status,
|
||||
hdcp_work->encryption_status[conn_index]);
|
||||
|
||||
if (query.encryption_status !=
|
||||
hdcp_work->encryption_status[conn_index]) {
|
||||
DRM_DEBUG_DRIVER("[HDCP_DM] encryption_status change from %x to %x\n",
|
||||
hdcp_work->encryption_status[conn_index], query.encryption_status);
|
||||
|
||||
hdcp_work->encryption_status[conn_index] =
|
||||
query.encryption_status;
|
||||
|
||||
DRM_DEBUG_DRIVER("[HDCP_DM] trigger property_update_work\n");
|
||||
|
||||
schedule_work(&hdcp_work->property_update_work);
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&hdcp_work->mutex);
|
||||
|
@ -686,6 +752,13 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct
|
|||
hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c;
|
||||
hdcp_work[i].hdcp.config.ddc.funcs.write_dpcd = lp_write_dpcd;
|
||||
hdcp_work[i].hdcp.config.ddc.funcs.read_dpcd = lp_read_dpcd;
|
||||
|
||||
memset(hdcp_work[i].aconnector, 0,
|
||||
sizeof(struct amdgpu_dm_connector *) *
|
||||
AMDGPU_DM_MAX_DISPLAY_INDEX);
|
||||
memset(hdcp_work[i].encryption_status, 0,
|
||||
sizeof(enum mod_hdcp_encryption_status) *
|
||||
AMDGPU_DM_MAX_DISPLAY_INDEX);
|
||||
}
|
||||
|
||||
cp_psp->funcs.update_stream_config = update_config;
|
||||
|
|
|
@ -43,7 +43,7 @@ struct hdcp_workqueue {
|
|||
struct delayed_work callback_dwork;
|
||||
struct delayed_work watchdog_timer_dwork;
|
||||
struct delayed_work property_validate_dwork;
|
||||
struct amdgpu_dm_connector *aconnector;
|
||||
struct amdgpu_dm_connector *aconnector[AMDGPU_DM_MAX_DISPLAY_INDEX];
|
||||
struct mutex mutex;
|
||||
|
||||
struct mod_hdcp hdcp;
|
||||
|
@ -51,8 +51,7 @@ struct hdcp_workqueue {
|
|||
struct mod_hdcp_display display;
|
||||
struct mod_hdcp_link link;
|
||||
|
||||
enum mod_hdcp_encryption_status encryption_status;
|
||||
|
||||
enum mod_hdcp_encryption_status encryption_status[AMDGPU_DM_MAX_DISPLAY_INDEX];
|
||||
/* when display is unplugged from mst hub, connctor will be
|
||||
* destroyed within dm_dp_mst_connector_destroy. connector
|
||||
* hdcp perperties, like type, undesired, desired, enabled,
|
||||
|
|
|
@ -39,12 +39,10 @@
|
|||
#include "dc.h"
|
||||
#include "dm_helpers.h"
|
||||
|
||||
#include "dc_link_ddc.h"
|
||||
#include "dc_link_dp.h"
|
||||
#include "ddc_service_types.h"
|
||||
#include "dpcd_defs.h"
|
||||
|
||||
#include "i2caux_interface.h"
|
||||
#include "dmub_cmd.h"
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
#include "amdgpu_dm_debugfs.h"
|
||||
|
@ -494,7 +492,6 @@ static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs
|
|||
static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
|
||||
{
|
||||
drm_encoder_cleanup(encoder);
|
||||
kfree(encoder);
|
||||
}
|
||||
|
||||
static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
|
||||
|
|
|
@ -64,9 +64,9 @@ AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LI
|
|||
|
||||
include $(AMD_DC)
|
||||
|
||||
DISPLAY_CORE = dc.o dc_stat.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
|
||||
dc_surface.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o \
|
||||
dc_link_enc_cfg.o dc_link_dpia.o dc_link_dpcd.o
|
||||
DISPLAY_CORE = dc.o dc_stat.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
|
||||
dc_surface.o dc_link_dp.o dc_debug.o dc_stream.o \
|
||||
dc_link_enc_cfg.o
|
||||
|
||||
DISPLAY_CORE += dc_vm_helper.o
|
||||
|
||||
|
|
|
@ -33,7 +33,6 @@
|
|||
#include "include/gpio_service_interface.h"
|
||||
#include "include/grph_object_ctrl_defs.h"
|
||||
#include "include/bios_parser_interface.h"
|
||||
#include "include/i2caux_interface.h"
|
||||
#include "include/logger_interface.h"
|
||||
|
||||
#include "command_table.h"
|
||||
|
|
|
@ -32,7 +32,6 @@
|
|||
#include "dc_bios_types.h"
|
||||
#include "include/grph_object_ctrl_defs.h"
|
||||
#include "include/bios_parser_interface.h"
|
||||
#include "include/i2caux_interface.h"
|
||||
#include "include/logger_interface.h"
|
||||
|
||||
#include "command_table2.h"
|
||||
|
@ -1698,14 +1697,15 @@ static enum bp_result bios_parser_enable_disp_power_gating(
|
|||
static enum bp_result bios_parser_enable_lvtma_control(
|
||||
struct dc_bios *dcb,
|
||||
uint8_t uc_pwr_on,
|
||||
uint8_t panel_instance)
|
||||
uint8_t panel_instance,
|
||||
uint8_t bypass_panel_control_wait)
|
||||
{
|
||||
struct bios_parser *bp = BP_FROM_DCB(dcb);
|
||||
|
||||
if (!bp->cmd_tbl.enable_lvtma_control)
|
||||
return BP_RESULT_FAILURE;
|
||||
|
||||
return bp->cmd_tbl.enable_lvtma_control(bp, uc_pwr_on, panel_instance);
|
||||
return bp->cmd_tbl.enable_lvtma_control(bp, uc_pwr_on, panel_instance, bypass_panel_control_wait);
|
||||
}
|
||||
|
||||
static bool bios_parser_is_accelerated_mode(
|
||||
|
@ -2929,7 +2929,6 @@ static enum bp_result construct_integrated_info(
|
|||
struct atom_common_table_header *header;
|
||||
struct atom_data_revision revision;
|
||||
|
||||
struct clock_voltage_caps temp = {0, 0};
|
||||
uint32_t i;
|
||||
uint32_t j;
|
||||
|
||||
|
@ -3032,14 +3031,8 @@ static enum bp_result construct_integrated_info(
|
|||
for (i = 1; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
|
||||
for (j = i; j > 0; --j) {
|
||||
if (info->disp_clk_voltage[j].max_supported_clk <
|
||||
info->disp_clk_voltage[j-1].max_supported_clk
|
||||
) {
|
||||
/* swap j and j - 1*/
|
||||
temp = info->disp_clk_voltage[j-1];
|
||||
info->disp_clk_voltage[j-1] =
|
||||
info->disp_clk_voltage[j];
|
||||
info->disp_clk_voltage[j] = temp;
|
||||
}
|
||||
info->disp_clk_voltage[j-1].max_supported_clk)
|
||||
swap(info->disp_clk_voltage[j-1], info->disp_clk_voltage[j]);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -986,7 +986,8 @@ static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id)
|
|||
static enum bp_result enable_lvtma_control(
|
||||
struct bios_parser *bp,
|
||||
uint8_t uc_pwr_on,
|
||||
uint8_t panel_instance);
|
||||
uint8_t panel_instance,
|
||||
uint8_t bypass_panel_control_wait);
|
||||
|
||||
static void init_enable_lvtma_control(struct bios_parser *bp)
|
||||
{
|
||||
|
@ -998,7 +999,8 @@ static void init_enable_lvtma_control(struct bios_parser *bp)
|
|||
static void enable_lvtma_control_dmcub(
|
||||
struct dc_dmub_srv *dmcub,
|
||||
uint8_t uc_pwr_on,
|
||||
uint8_t panel_instance)
|
||||
uint8_t panel_instance,
|
||||
uint8_t bypass_panel_control_wait)
|
||||
{
|
||||
|
||||
union dmub_rb_cmd cmd;
|
||||
|
@ -1012,6 +1014,8 @@ static void enable_lvtma_control_dmcub(
|
|||
uc_pwr_on;
|
||||
cmd.lvtma_control.data.panel_inst =
|
||||
panel_instance;
|
||||
cmd.lvtma_control.data.bypass_panel_control_wait =
|
||||
bypass_panel_control_wait;
|
||||
dc_dmub_srv_cmd_queue(dmcub, &cmd);
|
||||
dc_dmub_srv_cmd_execute(dmcub);
|
||||
dc_dmub_srv_wait_idle(dmcub);
|
||||
|
@ -1021,7 +1025,8 @@ static void enable_lvtma_control_dmcub(
|
|||
static enum bp_result enable_lvtma_control(
|
||||
struct bios_parser *bp,
|
||||
uint8_t uc_pwr_on,
|
||||
uint8_t panel_instance)
|
||||
uint8_t panel_instance,
|
||||
uint8_t bypass_panel_control_wait)
|
||||
{
|
||||
enum bp_result result = BP_RESULT_FAILURE;
|
||||
|
||||
|
@ -1029,7 +1034,8 @@ static enum bp_result enable_lvtma_control(
|
|||
bp->base.ctx->dc->debug.dmub_command_table) {
|
||||
enable_lvtma_control_dmcub(bp->base.ctx->dmub_srv,
|
||||
uc_pwr_on,
|
||||
panel_instance);
|
||||
panel_instance,
|
||||
bypass_panel_control_wait);
|
||||
return BP_RESULT_OK;
|
||||
}
|
||||
return result;
|
||||
|
|
|
@ -96,7 +96,8 @@ struct cmd_tbl {
|
|||
struct bios_parser *bp, uint8_t id);
|
||||
enum bp_result (*enable_lvtma_control)(struct bios_parser *bp,
|
||||
uint8_t uc_pwr_on,
|
||||
uint8_t panel_instance);
|
||||
uint8_t panel_instance,
|
||||
uint8_t bypass_panel_control_wait);
|
||||
};
|
||||
|
||||
void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp);
|
||||
|
|
|
@ -146,6 +146,9 @@ static int dcn314_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
|
|||
if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu &&
|
||||
param == TABLE_WATERMARKS)
|
||||
DC_LOG_WARNING("Watermarks table not configured properly by SMU");
|
||||
else if (msg_id == VBIOSSMC_MSG_SetHardMinDcfclkByFreq ||
|
||||
msg_id == VBIOSSMC_MSG_SetMinDeepSleepDcfclk)
|
||||
DC_LOG_WARNING("DCFCLK_DPM is not enabled by BIOS");
|
||||
else
|
||||
ASSERT(0);
|
||||
REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK);
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include "core_types.h"
|
||||
#include "dm_helpers.h"
|
||||
#include "dc_link_dp.h"
|
||||
#include "link.h"
|
||||
|
||||
#include "atomfirmware.h"
|
||||
#include "smu13_driver_if.h"
|
||||
|
@ -255,6 +256,94 @@ static void dcn32_update_dppclk_dispclk_freq(struct clk_mgr_internal *clk_mgr, s
|
|||
}
|
||||
}
|
||||
|
||||
static void dcn32_update_clocks_update_dentist(
|
||||
struct clk_mgr_internal *clk_mgr,
|
||||
struct dc_state *context,
|
||||
uint32_t old_dispclk_khz)
|
||||
{
|
||||
uint32_t new_disp_divider = 0;
|
||||
uint32_t old_disp_divider = 0;
|
||||
uint32_t new_dispclk_wdivider = 0;
|
||||
uint32_t old_dispclk_wdivider = 0;
|
||||
uint32_t i;
|
||||
|
||||
if (old_dispclk_khz == 0 || clk_mgr->base.clks.dispclk_khz == 0)
|
||||
return;
|
||||
|
||||
new_disp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR
|
||||
* clk_mgr->base.dentist_vco_freq_khz / clk_mgr->base.clks.dispclk_khz;
|
||||
old_disp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR
|
||||
* clk_mgr->base.dentist_vco_freq_khz / old_dispclk_khz;
|
||||
|
||||
new_dispclk_wdivider = dentist_get_did_from_divider(new_disp_divider);
|
||||
old_dispclk_wdivider = dentist_get_did_from_divider(old_disp_divider);
|
||||
|
||||
/* When changing divider to or from 127, some extra programming is required to prevent corruption */
|
||||
if (old_dispclk_wdivider == 127 && new_dispclk_wdivider != 127) {
|
||||
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
|
||||
uint32_t fifo_level;
|
||||
struct dccg *dccg = clk_mgr->base.ctx->dc->res_pool->dccg;
|
||||
struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
|
||||
int32_t N;
|
||||
int32_t j;
|
||||
|
||||
if (!pipe_ctx->stream)
|
||||
continue;
|
||||
/* Virtual encoders don't have this function */
|
||||
if (!stream_enc->funcs->get_fifo_cal_average_level)
|
||||
continue;
|
||||
fifo_level = stream_enc->funcs->get_fifo_cal_average_level(
|
||||
stream_enc);
|
||||
N = fifo_level / 4;
|
||||
dccg->funcs->set_fifo_errdet_ovr_en(
|
||||
dccg,
|
||||
true);
|
||||
for (j = 0; j < N - 4; j++)
|
||||
dccg->funcs->otg_drop_pixel(
|
||||
dccg,
|
||||
pipe_ctx->stream_res.tg->inst);
|
||||
dccg->funcs->set_fifo_errdet_ovr_en(
|
||||
dccg,
|
||||
false);
|
||||
}
|
||||
} else if (new_dispclk_wdivider == 127 && old_dispclk_wdivider != 127) {
|
||||
/* request clock with 126 divider first */
|
||||
uint32_t temp_disp_divider = dentist_get_divider_from_did(126);
|
||||
uint32_t temp_dispclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR * clk_mgr->base.dentist_vco_freq_khz) / temp_disp_divider;
|
||||
|
||||
if (clk_mgr->smu_present)
|
||||
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(temp_dispclk_khz));
|
||||
|
||||
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
|
||||
struct dccg *dccg = clk_mgr->base.ctx->dc->res_pool->dccg;
|
||||
struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
|
||||
uint32_t fifo_level;
|
||||
int32_t N;
|
||||
int32_t j;
|
||||
|
||||
if (!pipe_ctx->stream)
|
||||
continue;
|
||||
/* Virtual encoders don't have this function */
|
||||
if (!stream_enc->funcs->get_fifo_cal_average_level)
|
||||
continue;
|
||||
fifo_level = stream_enc->funcs->get_fifo_cal_average_level(
|
||||
stream_enc);
|
||||
N = fifo_level / 4;
|
||||
dccg->funcs->set_fifo_errdet_ovr_en(dccg, true);
|
||||
for (j = 0; j < 12 - N; j++)
|
||||
dccg->funcs->otg_add_pixel(dccg,
|
||||
pipe_ctx->stream_res.tg->inst);
|
||||
dccg->funcs->set_fifo_errdet_ovr_en(dccg, false);
|
||||
}
|
||||
}
|
||||
|
||||
/* do requested DISPCLK updates*/
|
||||
if (clk_mgr->smu_present)
|
||||
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(clk_mgr->base.clks.dispclk_khz));
|
||||
}
|
||||
|
||||
static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
struct dc_state *context,
|
||||
bool safe_to_lower)
|
||||
|
@ -273,6 +362,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
|
|||
bool p_state_change_support;
|
||||
bool fclk_p_state_change_support;
|
||||
int total_plane_count;
|
||||
int old_dispclk_khz = clk_mgr_base->clks.dispclk_khz;
|
||||
|
||||
if (dc->work_arounds.skip_clock_update)
|
||||
return;
|
||||
|
@ -396,9 +486,6 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
|
|||
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
|
||||
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
|
||||
|
||||
if (clk_mgr->smu_present)
|
||||
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dispclk_khz));
|
||||
|
||||
update_dispclk = true;
|
||||
}
|
||||
|
||||
|
@ -418,13 +505,13 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
|
|||
if (dpp_clock_lowered) {
|
||||
/* if clock is being lowered, increase DTO before lowering refclk */
|
||||
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
|
||||
dcn20_update_clocks_update_dentist(clk_mgr, context);
|
||||
dcn32_update_clocks_update_dentist(clk_mgr, context, old_dispclk_khz);
|
||||
if (clk_mgr->smu_present)
|
||||
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dppclk_khz));
|
||||
} else {
|
||||
/* if clock is being raised, increase refclk before lowering DTO */
|
||||
if (update_dppclk || update_dispclk)
|
||||
dcn20_update_clocks_update_dentist(clk_mgr, context);
|
||||
dcn32_update_clocks_update_dentist(clk_mgr, context, old_dispclk_khz);
|
||||
/* There is a check inside dcn20_update_clocks_update_dpp_dto which ensures
|
||||
* that we do not lower dto when it is not safe to lower. We do not need to
|
||||
* compare the current and new dppclk before calling this function.
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
|
||||
#include "resource.h"
|
||||
|
||||
#include "gpio_service_interface.h"
|
||||
#include "clk_mgr.h"
|
||||
#include "clock_source.h"
|
||||
#include "dc_bios_types.h"
|
||||
|
@ -53,7 +54,7 @@
|
|||
#include "link_enc_cfg.h"
|
||||
|
||||
#include "dc_link.h"
|
||||
#include "dc_link_ddc.h"
|
||||
#include "link.h"
|
||||
#include "dm_helpers.h"
|
||||
#include "mem_input.h"
|
||||
|
||||
|
@ -68,8 +69,6 @@
|
|||
|
||||
#include "dmub/dmub_srv.h"
|
||||
|
||||
#include "i2caux_interface.h"
|
||||
|
||||
#include "dce/dmub_psr.h"
|
||||
|
||||
#include "dce/dmub_hw_lock_mgr.h"
|
||||
|
@ -871,6 +870,7 @@ static bool dc_construct_ctx(struct dc *dc,
|
|||
|
||||
dc_ctx->perf_trace = dc_perf_trace_create();
|
||||
if (!dc_ctx->perf_trace) {
|
||||
kfree(dc_ctx);
|
||||
ASSERT_CRITICAL(false);
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -90,8 +90,8 @@ static const struct out_csc_color_matrix_type output_csc_matrix[] = {
|
|||
{ 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3,
|
||||
0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} },
|
||||
{ COLOR_SPACE_YCBCR2020_TYPE,
|
||||
{ 0x1000, 0xF149, 0xFEB7, 0x0000, 0x0868, 0x15B2,
|
||||
0x01E6, 0x0000, 0xFB88, 0xF478, 0x1000, 0x0000} },
|
||||
{ 0x1000, 0xF149, 0xFEB7, 0x1004, 0x0868, 0x15B2,
|
||||
0x01E6, 0x201, 0xFB88, 0xF478, 0x1000, 0x1004} },
|
||||
{ COLOR_SPACE_YCBCR709_BLACK_TYPE,
|
||||
{ 0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000,
|
||||
0x0000, 0x0200, 0x0000, 0x0000, 0x0000, 0x1000} },
|
||||
|
|
|
@ -33,9 +33,10 @@
|
|||
#include "gpio_service_interface.h"
|
||||
#include "core_status.h"
|
||||
#include "dc_link_dp.h"
|
||||
#include "dc_link_dpia.h"
|
||||
#include "dc_link_ddc.h"
|
||||
#include "link/link_dp_dpia.h"
|
||||
#include "link/link_ddc.h"
|
||||
#include "link_hwss.h"
|
||||
#include "link.h"
|
||||
#include "opp.h"
|
||||
|
||||
#include "link_encoder.h"
|
||||
|
@ -50,8 +51,12 @@
|
|||
#include "dmub/dmub_srv.h"
|
||||
#include "inc/hw/panel_cntl.h"
|
||||
#include "inc/link_enc_cfg.h"
|
||||
#include "inc/link_dpcd.h"
|
||||
#include "link/link_dpcd.h"
|
||||
#include "link/link_dp_trace.h"
|
||||
#include "link/link_hpd.h"
|
||||
#include "link/link_dp_training.h"
|
||||
#include "link/link_dp_phy.h"
|
||||
#include "link/link_dp_capability.h"
|
||||
|
||||
#include "dc/dcn30/dcn30_vpg.h"
|
||||
|
||||
|
@ -78,7 +83,7 @@ static void dc_link_destruct(struct dc_link *link)
|
|||
}
|
||||
|
||||
if (link->ddc)
|
||||
dal_ddc_service_destroy(&link->ddc);
|
||||
link_destroy_ddc_service(&link->ddc);
|
||||
|
||||
if (link->panel_cntl)
|
||||
link->panel_cntl->funcs->destroy(&link->panel_cntl);
|
||||
|
@ -102,108 +107,6 @@ static void dc_link_destruct(struct dc_link *link)
|
|||
dc_sink_release(link->remote_sinks[i]);
|
||||
}
|
||||
|
||||
struct gpio *get_hpd_gpio(struct dc_bios *dcb,
|
||||
struct graphics_object_id link_id,
|
||||
struct gpio_service *gpio_service)
|
||||
{
|
||||
enum bp_result bp_result;
|
||||
struct graphics_object_hpd_info hpd_info;
|
||||
struct gpio_pin_info pin_info;
|
||||
|
||||
if (dcb->funcs->get_hpd_info(dcb, link_id, &hpd_info) != BP_RESULT_OK)
|
||||
return NULL;
|
||||
|
||||
bp_result = dcb->funcs->get_gpio_pin_info(dcb,
|
||||
hpd_info.hpd_int_gpio_uid, &pin_info);
|
||||
|
||||
if (bp_result != BP_RESULT_OK) {
|
||||
ASSERT(bp_result == BP_RESULT_NORECORD);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return dal_gpio_service_create_irq(gpio_service,
|
||||
pin_info.offset,
|
||||
pin_info.mask);
|
||||
}
|
||||
|
||||
/*
|
||||
* Function: program_hpd_filter
|
||||
*
|
||||
* @brief
|
||||
* Programs HPD filter on associated HPD line
|
||||
*
|
||||
* @param [in] delay_on_connect_in_ms: Connect filter timeout
|
||||
* @param [in] delay_on_disconnect_in_ms: Disconnect filter timeout
|
||||
*
|
||||
* @return
|
||||
* true on success, false otherwise
|
||||
*/
|
||||
static bool program_hpd_filter(const struct dc_link *link)
|
||||
{
|
||||
bool result = false;
|
||||
struct gpio *hpd;
|
||||
int delay_on_connect_in_ms = 0;
|
||||
int delay_on_disconnect_in_ms = 0;
|
||||
|
||||
if (link->is_hpd_filter_disabled)
|
||||
return false;
|
||||
/* Verify feature is supported */
|
||||
switch (link->connector_signal) {
|
||||
case SIGNAL_TYPE_DVI_SINGLE_LINK:
|
||||
case SIGNAL_TYPE_DVI_DUAL_LINK:
|
||||
case SIGNAL_TYPE_HDMI_TYPE_A:
|
||||
/* Program hpd filter */
|
||||
delay_on_connect_in_ms = 500;
|
||||
delay_on_disconnect_in_ms = 100;
|
||||
break;
|
||||
case SIGNAL_TYPE_DISPLAY_PORT:
|
||||
case SIGNAL_TYPE_DISPLAY_PORT_MST:
|
||||
/* Program hpd filter to allow DP signal to settle */
|
||||
/* 500: not able to detect MST <-> SST switch as HPD is low for
|
||||
* only 100ms on DELL U2413
|
||||
* 0: some passive dongle still show aux mode instead of i2c
|
||||
* 20-50: not enough to hide bouncing HPD with passive dongle.
|
||||
* also see intermittent i2c read issues.
|
||||
*/
|
||||
delay_on_connect_in_ms = 80;
|
||||
delay_on_disconnect_in_ms = 0;
|
||||
break;
|
||||
case SIGNAL_TYPE_LVDS:
|
||||
case SIGNAL_TYPE_EDP:
|
||||
default:
|
||||
/* Don't program hpd filter */
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Obtain HPD handle */
|
||||
hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
|
||||
link->ctx->gpio_service);
|
||||
|
||||
if (!hpd)
|
||||
return result;
|
||||
|
||||
/* Setup HPD filtering */
|
||||
if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) {
|
||||
struct gpio_hpd_config config;
|
||||
|
||||
config.delay_on_connect = delay_on_connect_in_ms;
|
||||
config.delay_on_disconnect = delay_on_disconnect_in_ms;
|
||||
|
||||
dal_irq_setup_hpd_filter(hpd, &config);
|
||||
|
||||
dal_gpio_close(hpd);
|
||||
|
||||
result = true;
|
||||
} else {
|
||||
ASSERT_CRITICAL(false);
|
||||
}
|
||||
|
||||
/* Release HPD handle */
|
||||
dal_gpio_destroy_irq(&hpd);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
bool dc_link_wait_for_t12(struct dc_link *link)
|
||||
{
|
||||
if (link->connector_signal == SIGNAL_TYPE_EDP && link->dc->hwss.edp_wait_for_T12) {
|
||||
|
@ -226,7 +129,6 @@ bool dc_link_wait_for_t12(struct dc_link *link)
|
|||
bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
|
||||
{
|
||||
uint32_t is_hpd_high = 0;
|
||||
struct gpio *hpd_pin;
|
||||
|
||||
if (link->connector_signal == SIGNAL_TYPE_LVDS) {
|
||||
*type = dc_connection_single;
|
||||
|
@ -250,17 +152,9 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
|
|||
return true;
|
||||
}
|
||||
|
||||
/* todo: may need to lock gpio access */
|
||||
hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
|
||||
link->ctx->gpio_service);
|
||||
if (!hpd_pin)
|
||||
if (!query_hpd_status(link, &is_hpd_high))
|
||||
goto hpd_gpio_failure;
|
||||
|
||||
dal_gpio_open(hpd_pin, GPIO_MODE_INTERRUPT);
|
||||
dal_gpio_get_value(hpd_pin, &is_hpd_high);
|
||||
dal_gpio_close(hpd_pin);
|
||||
dal_gpio_destroy_irq(&hpd_pin);
|
||||
|
||||
if (is_hpd_high) {
|
||||
*type = dc_connection_single;
|
||||
/* TODO: need to do the actual detection */
|
||||
|
@ -386,7 +280,7 @@ bool dc_link_is_dp_sink_present(struct dc_link *link)
|
|||
(connector_id == CONNECTOR_ID_EDP) ||
|
||||
(connector_id == CONNECTOR_ID_USBC));
|
||||
|
||||
ddc = dal_ddc_service_get_ddc_pin(link->ddc);
|
||||
ddc = get_ddc_pin(link->ddc);
|
||||
|
||||
if (!ddc) {
|
||||
BREAK_TO_DEBUGGER();
|
||||
|
@ -531,11 +425,179 @@ static enum signal_type decide_signal_from_strap_and_dongle_type(enum display_do
|
|||
return signal;
|
||||
}
|
||||
|
||||
static bool i2c_read(
|
||||
struct ddc_service *ddc,
|
||||
uint32_t address,
|
||||
uint8_t *buffer,
|
||||
uint32_t len)
|
||||
{
|
||||
uint8_t offs_data = 0;
|
||||
struct i2c_payload payloads[2] = {
|
||||
{
|
||||
.write = true,
|
||||
.address = address,
|
||||
.length = 1,
|
||||
.data = &offs_data },
|
||||
{
|
||||
.write = false,
|
||||
.address = address,
|
||||
.length = len,
|
||||
.data = buffer } };
|
||||
|
||||
struct i2c_command command = {
|
||||
.payloads = payloads,
|
||||
.number_of_payloads = 2,
|
||||
.engine = DDC_I2C_COMMAND_ENGINE,
|
||||
.speed = ddc->ctx->dc->caps.i2c_speed_in_khz };
|
||||
|
||||
return dm_helpers_submit_i2c(
|
||||
ddc->ctx,
|
||||
ddc->link,
|
||||
&command);
|
||||
}
|
||||
|
||||
enum {
|
||||
DP_SINK_CAP_SIZE =
|
||||
DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV + 1
|
||||
};
|
||||
|
||||
static void query_dp_dual_mode_adaptor(
|
||||
struct ddc_service *ddc,
|
||||
struct display_sink_capability *sink_cap)
|
||||
{
|
||||
uint8_t i;
|
||||
bool is_valid_hdmi_signature;
|
||||
enum display_dongle_type *dongle = &sink_cap->dongle_type;
|
||||
uint8_t type2_dongle_buf[DP_ADAPTOR_TYPE2_SIZE];
|
||||
bool is_type2_dongle = false;
|
||||
int retry_count = 2;
|
||||
struct dp_hdmi_dongle_signature_data *dongle_signature;
|
||||
|
||||
/* Assume we have no valid DP passive dongle connected */
|
||||
*dongle = DISPLAY_DONGLE_NONE;
|
||||
sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK;
|
||||
|
||||
/* Read DP-HDMI dongle I2c (no response interpreted as DP-DVI dongle)*/
|
||||
if (!i2c_read(
|
||||
ddc,
|
||||
DP_HDMI_DONGLE_ADDRESS,
|
||||
type2_dongle_buf,
|
||||
sizeof(type2_dongle_buf))) {
|
||||
/* Passive HDMI dongles can sometimes fail here without retrying*/
|
||||
while (retry_count > 0) {
|
||||
if (i2c_read(ddc,
|
||||
DP_HDMI_DONGLE_ADDRESS,
|
||||
type2_dongle_buf,
|
||||
sizeof(type2_dongle_buf)))
|
||||
break;
|
||||
retry_count--;
|
||||
}
|
||||
if (retry_count == 0) {
|
||||
*dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
|
||||
sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
|
||||
|
||||
CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf),
|
||||
"DP-DVI passive dongle %dMhz: ",
|
||||
DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check if Type 2 dongle.*/
|
||||
if (type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_ID] == DP_ADAPTOR_TYPE2_ID)
|
||||
is_type2_dongle = true;
|
||||
|
||||
dongle_signature =
|
||||
(struct dp_hdmi_dongle_signature_data *)type2_dongle_buf;
|
||||
|
||||
is_valid_hdmi_signature = true;
|
||||
|
||||
/* Check EOT */
|
||||
if (dongle_signature->eot != DP_HDMI_DONGLE_SIGNATURE_EOT) {
|
||||
is_valid_hdmi_signature = false;
|
||||
}
|
||||
|
||||
/* Check signature */
|
||||
for (i = 0; i < sizeof(dongle_signature->id); ++i) {
|
||||
/* If its not the right signature,
|
||||
* skip mismatch in subversion byte.*/
|
||||
if (dongle_signature->id[i] !=
|
||||
dp_hdmi_dongle_signature_str[i] && i != 3) {
|
||||
|
||||
if (is_type2_dongle) {
|
||||
is_valid_hdmi_signature = false;
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
if (is_type2_dongle) {
|
||||
uint32_t max_tmds_clk =
|
||||
type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK];
|
||||
|
||||
max_tmds_clk = max_tmds_clk * 2 + max_tmds_clk / 2;
|
||||
|
||||
if (0 == max_tmds_clk ||
|
||||
max_tmds_clk < DP_ADAPTOR_TYPE2_MIN_TMDS_CLK ||
|
||||
max_tmds_clk > DP_ADAPTOR_TYPE2_MAX_TMDS_CLK) {
|
||||
*dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
|
||||
|
||||
CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
|
||||
sizeof(type2_dongle_buf),
|
||||
"DP-DVI passive dongle %dMhz: ",
|
||||
DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
|
||||
} else {
|
||||
if (is_valid_hdmi_signature == true) {
|
||||
*dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE;
|
||||
|
||||
CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
|
||||
sizeof(type2_dongle_buf),
|
||||
"Type 2 DP-HDMI passive dongle %dMhz: ",
|
||||
max_tmds_clk);
|
||||
} else {
|
||||
*dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE;
|
||||
|
||||
CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
|
||||
sizeof(type2_dongle_buf),
|
||||
"Type 2 DP-HDMI passive dongle (no signature) %dMhz: ",
|
||||
max_tmds_clk);
|
||||
|
||||
}
|
||||
|
||||
/* Multiply by 1000 to convert to kHz. */
|
||||
sink_cap->max_hdmi_pixel_clock =
|
||||
max_tmds_clk * 1000;
|
||||
}
|
||||
sink_cap->is_dongle_type_one = false;
|
||||
|
||||
} else {
|
||||
if (is_valid_hdmi_signature == true) {
|
||||
*dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE;
|
||||
|
||||
CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
|
||||
sizeof(type2_dongle_buf),
|
||||
"Type 1 DP-HDMI passive dongle %dMhz: ",
|
||||
sink_cap->max_hdmi_pixel_clock / 1000);
|
||||
} else {
|
||||
*dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE;
|
||||
|
||||
CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
|
||||
sizeof(type2_dongle_buf),
|
||||
"Type 1 DP-HDMI passive dongle (no signature) %dMhz: ",
|
||||
sink_cap->max_hdmi_pixel_clock / 1000);
|
||||
}
|
||||
sink_cap->is_dongle_type_one = true;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static enum signal_type dp_passive_dongle_detection(struct ddc_service *ddc,
|
||||
struct display_sink_capability *sink_cap,
|
||||
struct audio_support *audio_support)
|
||||
{
|
||||
dal_ddc_service_i2c_query_dp_dual_mode_adaptor(ddc, sink_cap);
|
||||
query_dp_dual_mode_adaptor(ddc, sink_cap);
|
||||
|
||||
return decide_signal_from_strap_and_dongle_type(sink_cap->dongle_type,
|
||||
audio_support);
|
||||
|
@ -775,7 +837,7 @@ static bool wait_for_entering_dp_alt_mode(struct dc_link *link)
|
|||
return true;
|
||||
|
||||
is_in_alt_mode = link->link_enc->funcs->is_in_alt_mode(link->link_enc);
|
||||
DC_LOG_WARNING("DP Alt mode state on HPD: %d\n", is_in_alt_mode);
|
||||
DC_LOG_DC("DP Alt mode state on HPD: %d\n", is_in_alt_mode);
|
||||
|
||||
if (is_in_alt_mode)
|
||||
return true;
|
||||
|
@ -971,7 +1033,7 @@ static bool should_verify_link_capability_destructively(struct dc_link *link,
|
|||
dc_is_embedded_signal(link->local_sink->sink_signal) ||
|
||||
link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
|
||||
destrictive = false;
|
||||
} else if (dp_get_link_encoding_format(&max_link_cap) ==
|
||||
} else if (link_dp_get_encoding_format(&max_link_cap) ==
|
||||
DP_8b_10b_ENCODING) {
|
||||
if (link->dpcd_caps.is_mst_capable ||
|
||||
is_link_enc_unavailable) {
|
||||
|
@ -1155,11 +1217,11 @@ static bool detect_link_and_local_sink(struct dc_link *link,
|
|||
else
|
||||
link->dpcd_sink_count = 1;
|
||||
|
||||
dal_ddc_service_set_transaction_type(link->ddc,
|
||||
set_ddc_transaction_type(link->ddc,
|
||||
sink_caps.transaction_type);
|
||||
|
||||
link->aux_mode =
|
||||
dal_ddc_service_is_in_aux_transaction_mode(link->ddc);
|
||||
link_is_in_aux_transaction_mode(link->ddc);
|
||||
|
||||
sink_init_data.link = link;
|
||||
sink_init_data.sink_signal = sink_caps.signal;
|
||||
|
@ -1367,58 +1429,6 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
|
|||
return is_local_sink_detect_success && !is_delegated_to_mst_top_mgr;
|
||||
}
|
||||
|
||||
bool dc_link_get_hpd_state(struct dc_link *dc_link)
|
||||
{
|
||||
uint32_t state;
|
||||
|
||||
dal_gpio_lock_pin(dc_link->hpd_gpio);
|
||||
dal_gpio_get_value(dc_link->hpd_gpio, &state);
|
||||
dal_gpio_unlock_pin(dc_link->hpd_gpio);
|
||||
|
||||
return state;
|
||||
}
|
||||
|
||||
static enum hpd_source_id get_hpd_line(struct dc_link *link)
|
||||
{
|
||||
struct gpio *hpd;
|
||||
enum hpd_source_id hpd_id;
|
||||
|
||||
hpd_id = HPD_SOURCEID_UNKNOWN;
|
||||
|
||||
hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
|
||||
link->ctx->gpio_service);
|
||||
|
||||
if (hpd) {
|
||||
switch (dal_irq_get_source(hpd)) {
|
||||
case DC_IRQ_SOURCE_HPD1:
|
||||
hpd_id = HPD_SOURCEID1;
|
||||
break;
|
||||
case DC_IRQ_SOURCE_HPD2:
|
||||
hpd_id = HPD_SOURCEID2;
|
||||
break;
|
||||
case DC_IRQ_SOURCE_HPD3:
|
||||
hpd_id = HPD_SOURCEID3;
|
||||
break;
|
||||
case DC_IRQ_SOURCE_HPD4:
|
||||
hpd_id = HPD_SOURCEID4;
|
||||
break;
|
||||
case DC_IRQ_SOURCE_HPD5:
|
||||
hpd_id = HPD_SOURCEID5;
|
||||
break;
|
||||
case DC_IRQ_SOURCE_HPD6:
|
||||
hpd_id = HPD_SOURCEID6;
|
||||
break;
|
||||
default:
|
||||
BREAK_TO_DEBUGGER();
|
||||
break;
|
||||
}
|
||||
|
||||
dal_gpio_destroy_irq(&hpd);
|
||||
}
|
||||
|
||||
return hpd_id;
|
||||
}
|
||||
|
||||
static enum channel_id get_ddc_line(struct dc_link *link)
|
||||
{
|
||||
struct ddc *ddc;
|
||||
|
@ -1426,7 +1436,7 @@ static enum channel_id get_ddc_line(struct dc_link *link)
|
|||
|
||||
channel = CHANNEL_ID_UNKNOWN;
|
||||
|
||||
ddc = dal_ddc_service_get_ddc_pin(link->ddc);
|
||||
ddc = get_ddc_pin(link->ddc);
|
||||
|
||||
if (ddc) {
|
||||
switch (dal_ddc_get_line(ddc)) {
|
||||
|
@ -1583,7 +1593,7 @@ static bool dc_link_construct_legacy(struct dc_link *link,
|
|||
if (link->dc->res_pool->funcs->link_init)
|
||||
link->dc->res_pool->funcs->link_init(link);
|
||||
|
||||
link->hpd_gpio = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
|
||||
link->hpd_gpio = link_get_hpd_gpio(link->ctx->dc_bios, link->link_id,
|
||||
link->ctx->gpio_service);
|
||||
|
||||
if (link->hpd_gpio) {
|
||||
|
@ -1663,7 +1673,7 @@ static bool dc_link_construct_legacy(struct dc_link *link,
|
|||
ddc_service_init_data.ctx = link->ctx;
|
||||
ddc_service_init_data.id = link->link_id;
|
||||
ddc_service_init_data.link = link;
|
||||
link->ddc = dal_ddc_service_create(&ddc_service_init_data);
|
||||
link->ddc = link_create_ddc_service(&ddc_service_init_data);
|
||||
|
||||
if (!link->ddc) {
|
||||
DC_ERROR("Failed to create ddc_service!\n");
|
||||
|
@ -1676,7 +1686,7 @@ static bool dc_link_construct_legacy(struct dc_link *link,
|
|||
}
|
||||
|
||||
link->ddc_hw_inst =
|
||||
dal_ddc_get_line(dal_ddc_service_get_ddc_pin(link->ddc));
|
||||
dal_ddc_get_line(get_ddc_pin(link->ddc));
|
||||
|
||||
|
||||
if (link->dc->res_pool->funcs->panel_cntl_create &&
|
||||
|
@ -1813,7 +1823,7 @@ link_enc_create_fail:
|
|||
if (link->panel_cntl != NULL)
|
||||
link->panel_cntl->funcs->destroy(&link->panel_cntl);
|
||||
panel_cntl_create_fail:
|
||||
dal_ddc_service_destroy(&link->ddc);
|
||||
link_destroy_ddc_service(&link->ddc);
|
||||
ddc_create_fail:
|
||||
create_fail:
|
||||
|
||||
|
@ -1871,7 +1881,7 @@ static bool dc_link_construct_dpia(struct dc_link *link,
|
|||
/* Set indicator for dpia link so that ddc won't be created */
|
||||
ddc_service_init_data.is_dpia_link = true;
|
||||
|
||||
link->ddc = dal_ddc_service_create(&ddc_service_init_data);
|
||||
link->ddc = link_create_ddc_service(&ddc_service_init_data);
|
||||
if (!link->ddc) {
|
||||
DC_ERROR("Failed to create ddc_service!\n");
|
||||
goto ddc_create_fail;
|
||||
|
@ -1996,7 +2006,7 @@ static enum dc_status enable_link_dp(struct dc_state *state,
|
|||
* Temporary w/a to get DP2.0 link rates to work with SST.
|
||||
* TODO DP2.0 - Workaround: Remove w/a if and when the issue is resolved.
|
||||
*/
|
||||
if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING &&
|
||||
if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING &&
|
||||
pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
|
||||
link->dc->debug.set_mst_en_for_sst) {
|
||||
dp_enable_mst_on_sink(link, true);
|
||||
|
@ -2009,7 +2019,7 @@ static enum dc_status enable_link_dp(struct dc_state *state,
|
|||
link->dc->hwss.edp_wait_for_hpd_ready(link, true);
|
||||
}
|
||||
|
||||
if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
|
||||
if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
|
||||
/* TODO - DP2.0 HW: calculate 32 symbol clock for HPO encoder */
|
||||
} else {
|
||||
pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
|
||||
|
@ -2050,7 +2060,7 @@ static enum dc_status enable_link_dp(struct dc_state *state,
|
|||
else
|
||||
fec_enable = true;
|
||||
|
||||
if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING)
|
||||
if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING)
|
||||
dp_set_fec_enable(link, fec_enable);
|
||||
|
||||
// during mode set we do DP_SET_POWER off then on, aux writes are lost
|
||||
|
@ -2166,7 +2176,7 @@ void dc_link_blank_dp_stream(struct dc_link *link, bool hw_init)
|
|||
}
|
||||
|
||||
if ((!link->wa_flags.dp_keep_receiver_powered) || hw_init)
|
||||
dp_receiver_power_ctrl(link, false);
|
||||
dc_link_dp_receiver_power_ctrl(link, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2339,7 +2349,7 @@ static void write_i2c_retimer_setting(
|
|||
value = settings->reg_settings[i].i2c_reg_val;
|
||||
else {
|
||||
i2c_success =
|
||||
dal_ddc_service_query_ddc_data(
|
||||
link_query_ddc_data(
|
||||
pipe_ctx->stream->link->ddc,
|
||||
slave_address, &offset, 1, &value, 1);
|
||||
if (!i2c_success)
|
||||
|
@ -2389,7 +2399,7 @@ static void write_i2c_retimer_setting(
|
|||
value = settings->reg_settings_6g[i].i2c_reg_val;
|
||||
else {
|
||||
i2c_success =
|
||||
dal_ddc_service_query_ddc_data(
|
||||
link_query_ddc_data(
|
||||
pipe_ctx->stream->link->ddc,
|
||||
slave_address, &offset, 1, &value, 1);
|
||||
if (!i2c_success)
|
||||
|
@ -2631,7 +2641,7 @@ static void disable_link(struct dc_link *link, const struct link_resource *link_
|
|||
|
||||
if (dc_is_dp_sst_signal(signal) ||
|
||||
link->mst_stream_alloc_table.stream_count == 0) {
|
||||
if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING) {
|
||||
if (link_dp_get_encoding_format(&link_settings) == DP_8b_10b_ENCODING) {
|
||||
dp_set_fec_enable(link, false);
|
||||
dp_set_fec_ready(link, link_res, false);
|
||||
}
|
||||
|
@ -2687,7 +2697,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
|
|||
}
|
||||
|
||||
if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
|
||||
dal_ddc_service_write_scdc_data(
|
||||
write_scdc_data(
|
||||
stream->link->ddc,
|
||||
stream->phy_pix_clk,
|
||||
stream->timing.flags.LTE_340MCSC_SCRAMBLE);
|
||||
|
@ -2708,7 +2718,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
|
|||
stream->phy_pix_clk);
|
||||
|
||||
if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
|
||||
dal_ddc_service_read_scdc_data(link->ddc);
|
||||
read_scdc_data(link->ddc);
|
||||
}
|
||||
|
||||
static void enable_link_lvds(struct pipe_ctx *pipe_ctx)
|
||||
|
@ -3679,7 +3689,7 @@ static enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx,
|
|||
}
|
||||
|
||||
/* slot X.Y for SST payload allocate */
|
||||
if (allocate && dp_get_link_encoding_format(&link->cur_link_settings) ==
|
||||
if (allocate && link_dp_get_encoding_format(&link->cur_link_settings) ==
|
||||
DP_128b_132b_ENCODING) {
|
||||
avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(stream, link);
|
||||
|
||||
|
@ -3762,7 +3772,7 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
|
|||
|
||||
/* program DP source TX for payload */
|
||||
if (link_hwss->ext.update_stream_allocation_table == NULL ||
|
||||
dp_get_link_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
|
||||
link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
|
||||
DC_LOG_ERROR("Failure: unknown encoding format\n");
|
||||
return DC_ERROR_UNEXPECTED;
|
||||
}
|
||||
|
@ -3878,7 +3888,7 @@ enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw
|
|||
|
||||
/* update mst stream allocation table hardware state */
|
||||
if (link_hwss->ext.update_stream_allocation_table == NULL ||
|
||||
dp_get_link_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
|
||||
link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
|
||||
DC_LOG_ERROR("Failure: unknown encoding format\n");
|
||||
return DC_ERROR_UNEXPECTED;
|
||||
}
|
||||
|
@ -3945,7 +3955,7 @@ enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t
|
|||
|
||||
/* update mst stream allocation table hardware state */
|
||||
if (link_hwss->ext.update_stream_allocation_table == NULL ||
|
||||
dp_get_link_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
|
||||
link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
|
||||
DC_LOG_ERROR("Failure: unknown encoding format\n");
|
||||
return DC_ERROR_UNEXPECTED;
|
||||
}
|
||||
|
@ -4058,7 +4068,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
|
|||
|
||||
/* update mst stream allocation table hardware state */
|
||||
if (link_hwss->ext.update_stream_allocation_table == NULL ||
|
||||
dp_get_link_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
|
||||
link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
|
||||
DC_LOG_DEBUG("Unknown encoding format\n");
|
||||
return DC_ERROR_UNEXPECTED;
|
||||
}
|
||||
|
@ -4106,7 +4116,7 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
|
|||
|
||||
/* stream encoder index */
|
||||
config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA;
|
||||
if (is_dp_128b_132b_signal(pipe_ctx))
|
||||
if (link_is_dp_128b_132b_signal(pipe_ctx))
|
||||
config.stream_enc_idx =
|
||||
pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0;
|
||||
|
||||
|
@ -4115,7 +4125,7 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
|
|||
|
||||
/* link encoder index */
|
||||
config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
|
||||
if (is_dp_128b_132b_signal(pipe_ctx))
|
||||
if (link_is_dp_128b_132b_signal(pipe_ctx))
|
||||
config.link_enc_idx = pipe_ctx->link_res.hpo_dp_link_enc->inst;
|
||||
|
||||
/* dio output index is dpia index for DPIA endpoint & dcio index by default */
|
||||
|
@ -4136,7 +4146,7 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
|
|||
config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP) ? 1 : 0;
|
||||
config.mst_enabled = (pipe_ctx->stream->signal ==
|
||||
SIGNAL_TYPE_DISPLAY_PORT_MST) ? 1 : 0;
|
||||
config.dp2_enabled = is_dp_128b_132b_signal(pipe_ctx) ? 1 : 0;
|
||||
config.dp2_enabled = link_is_dp_128b_132b_signal(pipe_ctx) ? 1 : 0;
|
||||
config.usb4_enabled = (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ?
|
||||
1 : 0;
|
||||
config.dpms_off = dpms_off;
|
||||
|
@ -4239,7 +4249,7 @@ void core_link_enable_stream(
|
|||
struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
|
||||
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
|
||||
|
||||
if (is_dp_128b_132b_signal(pipe_ctx))
|
||||
if (link_is_dp_128b_132b_signal(pipe_ctx))
|
||||
vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg;
|
||||
|
||||
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
|
||||
|
@ -4261,7 +4271,7 @@ void core_link_enable_stream(
|
|||
ASSERT(link_enc);
|
||||
|
||||
if (!dc_is_virtual_signal(pipe_ctx->stream->signal)
|
||||
&& !is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
&& !link_is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
if (link_enc)
|
||||
link_enc->funcs->setup(
|
||||
link_enc,
|
||||
|
@ -4271,7 +4281,7 @@ void core_link_enable_stream(
|
|||
pipe_ctx->stream->link->link_state_valid = true;
|
||||
|
||||
if (pipe_ctx->stream_res.tg->funcs->set_out_mux) {
|
||||
if (is_dp_128b_132b_signal(pipe_ctx))
|
||||
if (link_is_dp_128b_132b_signal(pipe_ctx))
|
||||
otg_out_dest = OUT_MUX_HPO_DP;
|
||||
else
|
||||
otg_out_dest = OUT_MUX_DIO;
|
||||
|
@ -4373,7 +4383,7 @@ void core_link_enable_stream(
|
|||
* from transmitter control.
|
||||
*/
|
||||
if (!(dc_is_virtual_signal(pipe_ctx->stream->signal) ||
|
||||
is_dp_128b_132b_signal(pipe_ctx)))
|
||||
link_is_dp_128b_132b_signal(pipe_ctx)))
|
||||
if (link_enc)
|
||||
link_enc->funcs->setup(
|
||||
link_enc,
|
||||
|
@ -4393,7 +4403,7 @@ void core_link_enable_stream(
|
|||
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
|
||||
dc_link_allocate_mst_payload(pipe_ctx);
|
||||
else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
|
||||
is_dp_128b_132b_signal(pipe_ctx))
|
||||
link_is_dp_128b_132b_signal(pipe_ctx))
|
||||
dc_link_update_sst_payload(pipe_ctx, true);
|
||||
|
||||
dc->hwss.unblank_stream(pipe_ctx,
|
||||
|
@ -4411,7 +4421,7 @@ void core_link_enable_stream(
|
|||
dc->hwss.enable_audio_stream(pipe_ctx);
|
||||
|
||||
} else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
|
||||
if (is_dp_128b_132b_signal(pipe_ctx))
|
||||
if (link_is_dp_128b_132b_signal(pipe_ctx))
|
||||
fpga_dp_hpo_enable_link_and_stream(state, pipe_ctx);
|
||||
if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
|
||||
dc_is_virtual_signal(pipe_ctx->stream->signal))
|
||||
|
@ -4430,7 +4440,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
|
|||
struct dc_link *link = stream->sink->link;
|
||||
struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
|
||||
|
||||
if (is_dp_128b_132b_signal(pipe_ctx))
|
||||
if (link_is_dp_128b_132b_signal(pipe_ctx))
|
||||
vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg;
|
||||
|
||||
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
|
||||
|
@ -4463,7 +4473,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
|
|||
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
|
||||
deallocate_mst_payload(pipe_ctx);
|
||||
else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
|
||||
is_dp_128b_132b_signal(pipe_ctx))
|
||||
link_is_dp_128b_132b_signal(pipe_ctx))
|
||||
dc_link_update_sst_payload(pipe_ctx, false);
|
||||
|
||||
if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
|
||||
|
@ -4473,7 +4483,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
|
|||
unsigned short masked_chip_caps = link->chip_caps &
|
||||
EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
|
||||
//Need to inform that sink is going to use legacy HDMI mode.
|
||||
dal_ddc_service_write_scdc_data(
|
||||
write_scdc_data(
|
||||
link->ddc,
|
||||
165000,//vbios only handles 165Mhz.
|
||||
false);
|
||||
|
@ -4492,7 +4502,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
|
|||
}
|
||||
|
||||
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
|
||||
!is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
!link_is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
|
||||
/* In DP1.x SST mode, our encoder will go to TPS1
|
||||
* when link is on but stream is off.
|
||||
|
@ -4512,7 +4522,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
|
|||
if (dc_is_dp_signal(pipe_ctx->stream->signal))
|
||||
dp_set_dsc_enable(pipe_ctx, false);
|
||||
}
|
||||
if (is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
if (link_is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
if (pipe_ctx->stream_res.tg->funcs->set_out_mux)
|
||||
pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, OUT_MUX_DIO);
|
||||
}
|
||||
|
@ -4531,51 +4541,6 @@ void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
|
|||
dc->hwss.set_avmute(pipe_ctx, enable);
|
||||
}
|
||||
|
||||
/**
|
||||
* dc_link_enable_hpd_filter:
|
||||
* If enable is true, programs HPD filter on associated HPD line using
|
||||
* delay_on_disconnect/delay_on_connect values dependent on
|
||||
* link->connector_signal
|
||||
*
|
||||
* If enable is false, programs HPD filter on associated HPD line with no
|
||||
* delays on connect or disconnect
|
||||
*
|
||||
* @link: pointer to the dc link
|
||||
* @enable: boolean specifying whether to enable hbd
|
||||
*/
|
||||
void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
|
||||
{
|
||||
struct gpio *hpd;
|
||||
|
||||
if (enable) {
|
||||
link->is_hpd_filter_disabled = false;
|
||||
program_hpd_filter(link);
|
||||
} else {
|
||||
link->is_hpd_filter_disabled = true;
|
||||
/* Obtain HPD handle */
|
||||
hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
|
||||
|
||||
if (!hpd)
|
||||
return;
|
||||
|
||||
/* Setup HPD filtering */
|
||||
if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) {
|
||||
struct gpio_hpd_config config;
|
||||
|
||||
config.delay_on_connect = 0;
|
||||
config.delay_on_disconnect = 0;
|
||||
|
||||
dal_irq_setup_hpd_filter(hpd, &config);
|
||||
|
||||
dal_gpio_close(hpd);
|
||||
} else {
|
||||
ASSERT_CRITICAL(false);
|
||||
}
|
||||
/* Release HPD handle */
|
||||
dal_gpio_destroy_irq(&hpd);
|
||||
}
|
||||
}
|
||||
|
||||
void dc_link_set_drive_settings(struct dc *dc,
|
||||
struct link_training_settings *lt_settings,
|
||||
const struct dc_link *link)
|
||||
|
@ -4632,7 +4597,7 @@ void dc_link_set_preferred_link_settings(struct dc *dc,
|
|||
if (link_stream->dpms_off)
|
||||
return;
|
||||
|
||||
if (decide_link_settings(link_stream, &store_settings))
|
||||
if (link_decide_link_settings(link_stream, &store_settings))
|
||||
dp_retrain_link_dp_test(link, &store_settings, false);
|
||||
}
|
||||
|
||||
|
@ -4663,16 +4628,6 @@ void dc_link_set_preferred_training_settings(struct dc *dc,
|
|||
dc_link_set_preferred_link_settings(dc, &link->preferred_link_setting, link);
|
||||
}
|
||||
|
||||
void dc_link_enable_hpd(const struct dc_link *link)
|
||||
{
|
||||
dc_link_dp_enable_hpd(link);
|
||||
}
|
||||
|
||||
void dc_link_disable_hpd(const struct dc_link *link)
|
||||
{
|
||||
dc_link_dp_disable_hpd(link);
|
||||
}
|
||||
|
||||
void dc_link_set_test_pattern(struct dc_link *link,
|
||||
enum dp_test_pattern test_pattern,
|
||||
enum dp_test_pattern_color_space test_pattern_color_space,
|
||||
|
@ -4697,7 +4652,7 @@ uint32_t dc_link_bandwidth_kbps(
|
|||
uint32_t total_data_bw_efficiency_x10000 = 0;
|
||||
uint32_t link_rate_per_lane_kbps = 0;
|
||||
|
||||
switch (dp_get_link_encoding_format(link_setting)) {
|
||||
switch (link_dp_get_encoding_format(link_setting)) {
|
||||
case DP_8b_10b_ENCODING:
|
||||
/* For 8b/10b encoding:
|
||||
* link rate is defined in the unit of LINK_RATE_REF_FREQ_IN_KHZ per DP byte per lane.
|
||||
|
@ -4726,57 +4681,6 @@ uint32_t dc_link_bandwidth_kbps(
|
|||
return link_rate_per_lane_kbps * link_setting->lane_count / 10000 * total_data_bw_efficiency_x10000;
|
||||
}
|
||||
|
||||
const struct dc_link_settings *dc_link_get_link_cap(
|
||||
const struct dc_link *link)
|
||||
{
|
||||
if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN &&
|
||||
link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN)
|
||||
return &link->preferred_link_setting;
|
||||
return &link->verified_link_cap;
|
||||
}
|
||||
|
||||
void dc_link_overwrite_extended_receiver_cap(
|
||||
struct dc_link *link)
|
||||
{
|
||||
dp_overwrite_extended_receiver_cap(link);
|
||||
}
|
||||
|
||||
bool dc_link_is_fec_supported(const struct dc_link *link)
|
||||
{
|
||||
/* TODO - use asic cap instead of link_enc->features
|
||||
* we no longer know which link enc to use for this link before commit
|
||||
*/
|
||||
struct link_encoder *link_enc = NULL;
|
||||
|
||||
link_enc = link_enc_cfg_get_link_enc(link);
|
||||
ASSERT(link_enc);
|
||||
|
||||
return (dc_is_dp_signal(link->connector_signal) && link_enc &&
|
||||
link_enc->features.fec_supported &&
|
||||
link->dpcd_caps.fec_cap.bits.FEC_CAPABLE &&
|
||||
!IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment));
|
||||
}
|
||||
|
||||
bool dc_link_should_enable_fec(const struct dc_link *link)
|
||||
{
|
||||
bool force_disable = false;
|
||||
|
||||
if (link->fec_state == dc_link_fec_enabled)
|
||||
force_disable = false;
|
||||
else if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST &&
|
||||
link->local_sink &&
|
||||
link->local_sink->edid_caps.panel_patch.disable_fec)
|
||||
force_disable = true;
|
||||
else if (link->connector_signal == SIGNAL_TYPE_EDP
|
||||
&& (link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.
|
||||
dsc_support.DSC_SUPPORT == false
|
||||
|| link->panel_config.dsc.disable_dsc_edp
|
||||
|| !link->dc->caps.edp_dsc_support))
|
||||
force_disable = true;
|
||||
|
||||
return !force_disable && dc_link_is_fec_supported(link);
|
||||
}
|
||||
|
||||
uint32_t dc_bandwidth_in_kbps_from_timing(
|
||||
const struct dc_crtc_timing *timing)
|
||||
{
|
||||
|
@ -4881,8 +4785,8 @@ void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map)
|
|||
for (i = 0; i < dc->caps.max_links; i++) {
|
||||
link = dc->links[i];
|
||||
if (link->link_status.link_active &&
|
||||
dp_get_link_encoding_format(&link->reported_link_cap) == DP_128b_132b_ENCODING &&
|
||||
dp_get_link_encoding_format(&link->cur_link_settings) != DP_128b_132b_ENCODING)
|
||||
link_dp_get_encoding_format(&link->reported_link_cap) == DP_128b_132b_ENCODING &&
|
||||
link_dp_get_encoding_format(&link->cur_link_settings) != DP_128b_132b_ENCODING)
|
||||
/* hpo dp link encoder is considered as recycled, when RX reports 128b/132b encoding capability
|
||||
* but current link doesn't use it.
|
||||
*/
|
||||
|
@ -4925,7 +4829,7 @@ void dc_restore_link_res_map(const struct dc *dc, uint32_t *map)
|
|||
if ((hpo_dp_recycle_map & (1 << i)) == 0) {
|
||||
link = dc->links[i];
|
||||
if (link->type != dc_connection_none &&
|
||||
dp_get_link_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) {
|
||||
link_dp_get_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) {
|
||||
if (available_hpo_dp_count > 0)
|
||||
available_hpo_dp_count--;
|
||||
else
|
||||
|
@ -4939,7 +4843,7 @@ void dc_restore_link_res_map(const struct dc *dc, uint32_t *map)
|
|||
if ((hpo_dp_recycle_map & (1 << i)) != 0) {
|
||||
link = dc->links[i];
|
||||
if (link->type != dc_connection_none &&
|
||||
dp_get_link_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) {
|
||||
link_dp_get_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) {
|
||||
if (available_hpo_dp_count > 0)
|
||||
available_hpo_dp_count--;
|
||||
else
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -25,6 +25,7 @@
|
|||
#include "link_enc_cfg.h"
|
||||
#include "resource.h"
|
||||
#include "dc_link_dp.h"
|
||||
#include "link.h"
|
||||
|
||||
#define DC_LOGGER dc->ctx->logger
|
||||
|
||||
|
@ -48,7 +49,7 @@ static bool is_dig_link_enc_stream(struct dc_stream_state *stream)
|
|||
/* DIGs do not support DP2.0 streams with 128b/132b encoding. */
|
||||
struct dc_link_settings link_settings = {0};
|
||||
|
||||
decide_link_settings(stream, &link_settings);
|
||||
link_decide_link_settings(stream, &link_settings);
|
||||
if ((link_settings.link_rate >= LINK_RATE_LOW) &&
|
||||
link_settings.link_rate <= LINK_RATE_HIGH3) {
|
||||
is_dig_stream = true;
|
||||
|
@ -305,15 +306,17 @@ void link_enc_cfg_link_encs_assign(
|
|||
for (i = 0; i < stream_count; i++) {
|
||||
struct dc_stream_state *stream = streams[i];
|
||||
|
||||
/* skip it if the link is mappable endpoint. */
|
||||
if (stream->link->is_dig_mapping_flexible)
|
||||
continue;
|
||||
|
||||
/* Skip stream if not supported by DIG link encoder. */
|
||||
if (!is_dig_link_enc_stream(stream))
|
||||
continue;
|
||||
|
||||
/* Physical endpoints have a fixed mapping to DIG link encoders. */
|
||||
if (!stream->link->is_dig_mapping_flexible) {
|
||||
eng_id = stream->link->eng_id;
|
||||
add_link_enc_assignment(state, stream, eng_id);
|
||||
}
|
||||
eng_id = stream->link->eng_id;
|
||||
add_link_enc_assignment(state, stream, eng_id);
|
||||
}
|
||||
|
||||
/* (b) Retain previous assignments for mappable endpoints if encoders still available. */
|
||||
|
@ -325,11 +328,12 @@ void link_enc_cfg_link_encs_assign(
|
|||
for (i = 0; i < stream_count; i++) {
|
||||
struct dc_stream_state *stream = state->streams[i];
|
||||
|
||||
/* Skip stream if not supported by DIG link encoder. */
|
||||
if (!is_dig_link_enc_stream(stream))
|
||||
/* Skip it if the link is NOT mappable endpoint. */
|
||||
if (!stream->link->is_dig_mapping_flexible)
|
||||
continue;
|
||||
|
||||
if (!stream->link->is_dig_mapping_flexible)
|
||||
/* Skip stream if not supported by DIG link encoder. */
|
||||
if (!is_dig_link_enc_stream(stream))
|
||||
continue;
|
||||
|
||||
for (j = 0; j < prev_state->stream_count; j++) {
|
||||
|
@ -338,6 +342,7 @@ void link_enc_cfg_link_encs_assign(
|
|||
if (stream == prev_stream && stream->link == prev_stream->link &&
|
||||
prev_state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[j].valid) {
|
||||
eng_id = prev_state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[j].eng_id;
|
||||
|
||||
if (is_avail_link_enc(state, eng_id, stream))
|
||||
add_link_enc_assignment(state, stream, eng_id);
|
||||
}
|
||||
|
@ -350,6 +355,15 @@ void link_enc_cfg_link_encs_assign(
|
|||
|
||||
for (i = 0; i < stream_count; i++) {
|
||||
struct dc_stream_state *stream = streams[i];
|
||||
struct link_encoder *link_enc = NULL;
|
||||
|
||||
/* Skip it if the link is NOT mappable endpoint. */
|
||||
if (!stream->link->is_dig_mapping_flexible)
|
||||
continue;
|
||||
|
||||
/* Skip if encoder assignment retained in step (b) above. */
|
||||
if (stream->link_enc)
|
||||
continue;
|
||||
|
||||
/* Skip stream if not supported by DIG link encoder. */
|
||||
if (!is_dig_link_enc_stream(stream)) {
|
||||
|
@ -358,24 +372,18 @@ void link_enc_cfg_link_encs_assign(
|
|||
}
|
||||
|
||||
/* Mappable endpoints have a flexible mapping to DIG link encoders. */
|
||||
if (stream->link->is_dig_mapping_flexible) {
|
||||
struct link_encoder *link_enc = NULL;
|
||||
|
||||
/* Skip if encoder assignment retained in step (b) above. */
|
||||
if (stream->link_enc)
|
||||
continue;
|
||||
/* For MST, multiple streams will share the same link / display
|
||||
* endpoint. These streams should use the same link encoder
|
||||
* assigned to that endpoint.
|
||||
*/
|
||||
link_enc = get_link_enc_used_by_link(state, stream->link);
|
||||
if (link_enc == NULL)
|
||||
eng_id = find_first_avail_link_enc(stream->ctx, state);
|
||||
else
|
||||
eng_id = link_enc->preferred_engine;
|
||||
|
||||
/* For MST, multiple streams will share the same link / display
|
||||
* endpoint. These streams should use the same link encoder
|
||||
* assigned to that endpoint.
|
||||
*/
|
||||
link_enc = get_link_enc_used_by_link(state, stream->link);
|
||||
if (link_enc == NULL)
|
||||
eng_id = find_first_avail_link_enc(stream->ctx, state);
|
||||
else
|
||||
eng_id = link_enc->preferred_engine;
|
||||
add_link_enc_assignment(state, stream, eng_id);
|
||||
}
|
||||
add_link_enc_assignment(state, stream, eng_id);
|
||||
}
|
||||
|
||||
link_enc_cfg_validate(dc, state);
|
||||
|
@ -420,10 +428,6 @@ void link_enc_cfg_link_enc_unassign(
|
|||
{
|
||||
enum engine_id eng_id = ENGINE_ID_UNKNOWN;
|
||||
|
||||
/* Only DIG link encoders. */
|
||||
if (!is_dig_link_enc_stream(stream))
|
||||
return;
|
||||
|
||||
if (stream->link_enc)
|
||||
eng_id = stream->link_enc->preferred_engine;
|
||||
|
||||
|
|
|
@ -41,6 +41,7 @@
|
|||
#include "dpcd_defs.h"
|
||||
#include "link_enc_cfg.h"
|
||||
#include "dc_link_dp.h"
|
||||
#include "link.h"
|
||||
#include "virtual/virtual_link_hwss.h"
|
||||
#include "link/link_hwss_dio.h"
|
||||
#include "link/link_hwss_dpia.h"
|
||||
|
@ -2213,7 +2214,7 @@ enum dc_status dc_remove_stream_from_ctx(
|
|||
del_pipe->stream_res.stream_enc,
|
||||
false);
|
||||
|
||||
if (is_dp_128b_132b_signal(del_pipe)) {
|
||||
if (link_is_dp_128b_132b_signal(del_pipe)) {
|
||||
update_hpo_dp_stream_engine_usage(
|
||||
&new_ctx->res_ctx, dc->res_pool,
|
||||
del_pipe->stream_res.hpo_dp_stream_enc,
|
||||
|
@ -2513,9 +2514,9 @@ enum dc_status resource_map_pool_resources(
|
|||
* and link settings
|
||||
*/
|
||||
if (dc_is_dp_signal(stream->signal)) {
|
||||
if (!decide_link_settings(stream, &pipe_ctx->link_config.dp_link_settings))
|
||||
if (!link_decide_link_settings(stream, &pipe_ctx->link_config.dp_link_settings))
|
||||
return DC_FAIL_DP_LINK_BANDWIDTH;
|
||||
if (dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) {
|
||||
if (link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) {
|
||||
pipe_ctx->stream_res.hpo_dp_stream_enc =
|
||||
find_first_free_match_hpo_dp_stream_enc_for_link(
|
||||
&context->res_ctx, pool, stream);
|
||||
|
@ -3763,7 +3764,7 @@ bool get_temp_dp_link_res(struct dc_link *link,
|
|||
|
||||
memset(link_res, 0, sizeof(*link_res));
|
||||
|
||||
if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
|
||||
if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
|
||||
link_res->hpo_dp_link_enc = get_temp_hpo_dp_link_enc(res_ctx,
|
||||
dc->res_pool, link);
|
||||
if (!link_res->hpo_dp_link_enc)
|
||||
|
@ -3810,8 +3811,6 @@ void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,
|
|||
int i;
|
||||
struct pipe_ctx *pipe_ctx, *pipe_ctx_check;
|
||||
|
||||
DC_LOGGER_INIT(dc->ctx->logger);
|
||||
|
||||
pipe_ctx = &context->res_ctx.pipe_ctx[disabled_master_pipe_idx];
|
||||
if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx) != disabled_master_pipe_idx) ||
|
||||
!IS_PIPE_SYNCD_VALID(pipe_ctx))
|
||||
|
@ -3822,15 +3821,19 @@ void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,
|
|||
pipe_ctx_check = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_check) == disabled_master_pipe_idx) &&
|
||||
IS_PIPE_SYNCD_VALID(pipe_ctx_check) && (i != disabled_master_pipe_idx)) {
|
||||
/* On dcn32, this error isn't fatal since hw supports odm transition in fast update*/
|
||||
if (dc->ctx->dce_version == DCN_VERSION_3_2 ||
|
||||
dc->ctx->dce_version == DCN_VERSION_3_21)
|
||||
DC_LOG_DEBUG("DC: pipe_idx[%d] syncd with disabled master pipe_idx[%d]\n",
|
||||
i, disabled_master_pipe_idx);
|
||||
else
|
||||
DC_ERR("DC: Failure: pipe_idx[%d] syncd with disabled master pipe_idx[%d]\n",
|
||||
i, disabled_master_pipe_idx);
|
||||
IS_PIPE_SYNCD_VALID(pipe_ctx_check) && (i != disabled_master_pipe_idx)) {
|
||||
struct pipe_ctx *first_pipe = pipe_ctx_check;
|
||||
|
||||
while (first_pipe->prev_odm_pipe)
|
||||
first_pipe = first_pipe->prev_odm_pipe;
|
||||
/* When ODM combine is enabled, this case is expected. If the disabled pipe
|
||||
* is part of the ODM tree, then we should not print an error.
|
||||
* */
|
||||
if (first_pipe->pipe_idx == disabled_master_pipe_idx)
|
||||
continue;
|
||||
|
||||
DC_ERR("DC: Failure: pipe_idx[%d] syncd with disabled master pipe_idx[%d]\n",
|
||||
i, disabled_master_pipe_idx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3995,7 +3998,7 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc,
|
|||
struct dc_state *context,
|
||||
struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
if (dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) {
|
||||
if (link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) {
|
||||
if (pipe_ctx->stream_res.hpo_dp_stream_enc == NULL) {
|
||||
pipe_ctx->stream_res.hpo_dp_stream_enc =
|
||||
find_first_free_match_hpo_dp_stream_enc_for_link(
|
||||
|
|
|
@ -47,7 +47,7 @@ struct aux_payload;
|
|||
struct set_config_cmd_payload;
|
||||
struct dmub_notification;
|
||||
|
||||
#define DC_VER "3.2.217"
|
||||
#define DC_VER "3.2.218"
|
||||
|
||||
#define MAX_SURFACES 3
|
||||
#define MAX_PLANES 6
|
||||
|
@ -873,6 +873,7 @@ struct dc_debug_options {
|
|||
unsigned int dsc_delay_factor_wa_x1000;
|
||||
unsigned int min_prefetch_in_strobe_ns;
|
||||
bool disable_unbounded_requesting;
|
||||
bool dig_fifo_off_in_blank;
|
||||
};
|
||||
|
||||
struct gpu_info_soc_bounding_box_v1_0;
|
||||
|
|
|
@ -140,7 +140,8 @@ struct dc_vbios_funcs {
|
|||
enum bp_result (*enable_lvtma_control)(
|
||||
struct dc_bios *bios,
|
||||
uint8_t uc_pwr_on,
|
||||
uint8_t panel_instance);
|
||||
uint8_t panel_instance,
|
||||
uint8_t bypass_panel_control_wait);
|
||||
|
||||
enum bp_result (*get_soc_bb_info)(
|
||||
struct dc_bios *dcb,
|
||||
|
|
|
@ -77,6 +77,32 @@ struct aux_reply_transaction_data {
|
|||
uint8_t *data;
|
||||
};
|
||||
|
||||
struct aux_payload {
|
||||
/* set following flag to read/write I2C data,
|
||||
* reset it to read/write DPCD data */
|
||||
bool i2c_over_aux;
|
||||
/* set following flag to write data,
|
||||
* reset it to read data */
|
||||
bool write;
|
||||
bool mot;
|
||||
bool write_status_update;
|
||||
|
||||
uint32_t address;
|
||||
uint32_t length;
|
||||
uint8_t *data;
|
||||
/*
|
||||
* used to return the reply type of the transaction
|
||||
* ignored if NULL
|
||||
*/
|
||||
uint8_t *reply;
|
||||
/* expressed in milliseconds
|
||||
* zero means "use default value"
|
||||
*/
|
||||
uint32_t defer_delay;
|
||||
|
||||
};
|
||||
#define DEFAULT_AUX_MAX_DATA_SIZE 16
|
||||
|
||||
struct i2c_payload {
|
||||
bool write;
|
||||
uint8_t address;
|
||||
|
@ -90,6 +116,8 @@ enum i2c_command_engine {
|
|||
I2C_COMMAND_ENGINE_HW
|
||||
};
|
||||
|
||||
#define DDC_I2C_COMMAND_ENGINE I2C_COMMAND_ENGINE_SW
|
||||
|
||||
struct i2c_command {
|
||||
struct i2c_payload *payloads;
|
||||
uint8_t number_of_payloads;
|
||||
|
|
|
@ -361,14 +361,10 @@ enum dpcd_downstream_port_detailed_type {
|
|||
union dwnstream_port_caps_byte2 {
|
||||
struct {
|
||||
uint8_t MAX_BITS_PER_COLOR_COMPONENT:2;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
uint8_t MAX_ENCODED_LINK_BW_SUPPORT:3;
|
||||
uint8_t SOURCE_CONTROL_MODE_SUPPORT:1;
|
||||
uint8_t CONCURRENT_LINK_BRING_UP_SEQ_SUPPORT:1;
|
||||
uint8_t RESERVED:1;
|
||||
#else
|
||||
uint8_t RESERVED:6;
|
||||
#endif
|
||||
} bits;
|
||||
uint8_t raw;
|
||||
};
|
||||
|
@ -406,7 +402,6 @@ union dwnstream_port_caps_byte3_hdmi {
|
|||
uint8_t raw;
|
||||
};
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
union hdmi_sink_encoded_link_bw_support {
|
||||
struct {
|
||||
uint8_t HDMI_SINK_ENCODED_LINK_BW_SUPPORT:3;
|
||||
|
@ -428,7 +423,6 @@ union hdmi_encoded_link_bw {
|
|||
} bits;
|
||||
uint8_t raw;
|
||||
};
|
||||
#endif
|
||||
|
||||
/*4-byte structure for detailed capabilities of a down-stream port
|
||||
(DP-to-TMDS converter).*/
|
||||
|
@ -975,6 +969,9 @@ struct dpcd_usb4_dp_tunneling_info {
|
|||
/* TODO - Use DRM header to replace above once available */
|
||||
#endif // DP_INTRA_HOP_AUX_REPLY_INDICATION
|
||||
|
||||
#ifndef DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE
|
||||
#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50
|
||||
#endif
|
||||
union dp_main_line_channel_coding_cap {
|
||||
struct {
|
||||
uint8_t DP_8b_10b_SUPPORTED :1;
|
||||
|
|
|
@ -0,0 +1,114 @@
|
|||
/*
|
||||
* Copyright 2022 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef DC_HDMI_TYPES_H
|
||||
#define DC_HDMI_TYPES_H
|
||||
|
||||
#include "os_types.h"
|
||||
|
||||
/* Address range from 0x00 to 0x1F.*/
|
||||
#define DP_ADAPTOR_TYPE2_SIZE 0x20
|
||||
#define DP_ADAPTOR_TYPE2_REG_ID 0x10
|
||||
#define DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK 0x1D
|
||||
/* Identifies adaptor as Dual-mode adaptor */
|
||||
#define DP_ADAPTOR_TYPE2_ID 0xA0
|
||||
/* MHz*/
|
||||
#define DP_ADAPTOR_TYPE2_MAX_TMDS_CLK 600
|
||||
/* MHz*/
|
||||
#define DP_ADAPTOR_TYPE2_MIN_TMDS_CLK 25
|
||||
/* kHZ*/
|
||||
#define DP_ADAPTOR_DVI_MAX_TMDS_CLK 165000
|
||||
/* kHZ*/
|
||||
#define DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK 165000
|
||||
|
||||
struct dp_hdmi_dongle_signature_data {
|
||||
int8_t id[15];/* "DP-HDMI ADAPTOR"*/
|
||||
uint8_t eot;/* end of transmition '\x4' */
|
||||
};
|
||||
|
||||
/* DP-HDMI dongle slave address for retrieving dongle signature*/
|
||||
#define DP_HDMI_DONGLE_ADDRESS 0x40
|
||||
static const uint8_t dp_hdmi_dongle_signature_str[] = "DP-HDMI ADAPTOR";
|
||||
#define DP_HDMI_DONGLE_SIGNATURE_EOT 0x04
|
||||
|
||||
|
||||
/* SCDC Address defines (HDMI 2.0)*/
|
||||
#define HDMI_SCDC_WRITE_UPDATE_0_ARRAY 3
|
||||
#define HDMI_SCDC_ADDRESS 0x54
|
||||
#define HDMI_SCDC_SINK_VERSION 0x01
|
||||
#define HDMI_SCDC_SOURCE_VERSION 0x02
|
||||
#define HDMI_SCDC_UPDATE_0 0x10
|
||||
#define HDMI_SCDC_TMDS_CONFIG 0x20
|
||||
#define HDMI_SCDC_SCRAMBLER_STATUS 0x21
|
||||
#define HDMI_SCDC_CONFIG_0 0x30
|
||||
#define HDMI_SCDC_CONFIG_1 0x31
|
||||
#define HDMI_SCDC_SOURCE_TEST_REQ 0x35
|
||||
#define HDMI_SCDC_STATUS_FLAGS 0x40
|
||||
#define HDMI_SCDC_ERR_DETECT 0x50
|
||||
#define HDMI_SCDC_TEST_CONFIG 0xC0
|
||||
|
||||
union hdmi_scdc_update_read_data {
|
||||
uint8_t byte[2];
|
||||
struct {
|
||||
uint8_t STATUS_UPDATE:1;
|
||||
uint8_t CED_UPDATE:1;
|
||||
uint8_t RR_TEST:1;
|
||||
uint8_t RESERVED:5;
|
||||
uint8_t RESERVED2:8;
|
||||
} fields;
|
||||
};
|
||||
|
||||
union hdmi_scdc_status_flags_data {
|
||||
uint8_t byte;
|
||||
struct {
|
||||
uint8_t CLOCK_DETECTED:1;
|
||||
uint8_t CH0_LOCKED:1;
|
||||
uint8_t CH1_LOCKED:1;
|
||||
uint8_t CH2_LOCKED:1;
|
||||
uint8_t RESERVED:4;
|
||||
} fields;
|
||||
};
|
||||
|
||||
union hdmi_scdc_ced_data {
|
||||
uint8_t byte[11];
|
||||
struct {
|
||||
uint8_t CH0_8LOW:8;
|
||||
uint8_t CH0_7HIGH:7;
|
||||
uint8_t CH0_VALID:1;
|
||||
uint8_t CH1_8LOW:8;
|
||||
uint8_t CH1_7HIGH:7;
|
||||
uint8_t CH1_VALID:1;
|
||||
uint8_t CH2_8LOW:8;
|
||||
uint8_t CH2_7HIGH:7;
|
||||
uint8_t CH2_VALID:1;
|
||||
uint8_t CHECKSUM:8;
|
||||
uint8_t RESERVED:8;
|
||||
uint8_t RESERVED2:8;
|
||||
uint8_t RESERVED3:8;
|
||||
uint8_t RESERVED4:4;
|
||||
} fields;
|
||||
};
|
||||
|
||||
#endif /* DC_HDMI_TYPES_H */
|
|
@ -31,6 +31,7 @@
|
|||
#include "grph_object_defs.h"
|
||||
|
||||
struct link_resource;
|
||||
enum aux_return_code_type;
|
||||
|
||||
enum dc_link_fec_state {
|
||||
dc_link_fec_not_ready,
|
||||
|
@ -158,11 +159,11 @@ struct dc_panel_config {
|
|||
struct dc_dpia_bw_alloc {
|
||||
int sink_verified_bw; // The Verified BW that sink can allocated and use that has been verified already
|
||||
int sink_allocated_bw; // The Actual Allocated BW that sink currently allocated
|
||||
int padding_bw; // The Padding "Un-used" BW allocated by CM for padding reasons
|
||||
int sink_max_bw; // The Max BW that sink can require/support
|
||||
int estimated_bw; // The estimated available BW for this DPIA
|
||||
int bw_granularity; // BW Granularity
|
||||
bool bw_alloc_enabled; // The BW Alloc Mode Support is turned ON for all 3: DP-Tx & Dpia & CM
|
||||
bool response_ready; // Response ready from the CM side
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -293,6 +294,8 @@ struct dc_link {
|
|||
|
||||
struct gpio *hpd_gpio;
|
||||
enum dc_link_fec_state fec_state;
|
||||
bool link_powered_externally; // Used to bypass hardware sequencing delays when panel is powered down forcibly
|
||||
|
||||
struct dc_panel_config panel_config;
|
||||
struct phy_state phy_state;
|
||||
};
|
||||
|
@ -338,14 +341,13 @@ static inline bool dc_get_edp_link_panel_inst(const struct dc *dc,
|
|||
int edp_num, i;
|
||||
|
||||
*inst_out = 0;
|
||||
if (link->connector_signal != SIGNAL_TYPE_EDP || !link->local_sink)
|
||||
if (link->connector_signal != SIGNAL_TYPE_EDP)
|
||||
return false;
|
||||
get_edp_links(dc, edp_links, &edp_num);
|
||||
for (i = 0; i < edp_num; i++) {
|
||||
if (link == edp_links[i])
|
||||
break;
|
||||
if (edp_links[i]->local_sink)
|
||||
(*inst_out)++;
|
||||
(*inst_out)++;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -459,31 +461,6 @@ void dc_link_dp_set_drive_settings(
|
|||
const struct link_resource *link_res,
|
||||
struct link_training_settings *lt_settings);
|
||||
|
||||
bool dc_link_dp_perform_link_training_skip_aux(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
const struct dc_link_settings *link_setting);
|
||||
|
||||
enum link_training_result dc_link_dp_perform_link_training(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
const struct dc_link_settings *link_settings,
|
||||
bool skip_video_pattern);
|
||||
|
||||
bool dc_link_dp_sync_lt_begin(struct dc_link *link);
|
||||
|
||||
enum link_training_result dc_link_dp_sync_lt_attempt(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
struct dc_link_settings *link_setting,
|
||||
struct dc_link_training_overrides *lt_settings);
|
||||
|
||||
bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down);
|
||||
|
||||
void dc_link_dp_enable_hpd(const struct dc_link *link);
|
||||
|
||||
void dc_link_dp_disable_hpd(const struct dc_link *link);
|
||||
|
||||
bool dc_link_dp_set_test_pattern(
|
||||
struct dc_link *link,
|
||||
enum dp_test_pattern test_pattern,
|
||||
|
@ -494,6 +471,21 @@ bool dc_link_dp_set_test_pattern(
|
|||
|
||||
bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap);
|
||||
|
||||
/**
|
||||
*****************************************************************************
|
||||
* Function: dc_link_enable_hpd_filter
|
||||
*
|
||||
* @brief
|
||||
* If enable is true, programs HPD filter on associated HPD line to default
|
||||
* values dependent on link->connector_signal
|
||||
*
|
||||
* If enable is false, programs HPD filter on associated HPD line with no
|
||||
* delays on connect or disconnect
|
||||
*
|
||||
* @param [in] link: pointer to the dc link
|
||||
* @param [in] enable: boolean specifying whether to enable hbd
|
||||
*****************************************************************************
|
||||
*/
|
||||
void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);
|
||||
|
||||
bool dc_link_is_dp_sink_present(struct dc_link *link);
|
||||
|
@ -566,9 +558,6 @@ void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map);
|
|||
/* restore link resource allocation state from a snapshot */
|
||||
void dc_restore_link_res_map(const struct dc *dc, uint32_t *map);
|
||||
void dc_link_clear_dprx_states(struct dc_link *link);
|
||||
struct gpio *get_hpd_gpio(struct dc_bios *dcb,
|
||||
struct graphics_object_id link_id,
|
||||
struct gpio_service *gpio_service);
|
||||
void dp_trace_reset(struct dc_link *link);
|
||||
bool dc_dp_trace_is_initialized(struct dc_link *link);
|
||||
unsigned long long dc_dp_trace_get_lt_end_timestamp(struct dc_link *link,
|
||||
|
@ -584,4 +573,20 @@ unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link);
|
|||
|
||||
/* Destruct the mst topology of the link and reset the allocated payload table */
|
||||
bool reset_cur_dp_mst_topology(struct dc_link *link);
|
||||
|
||||
/* Attempt to transfer the given aux payload. This function does not perform
|
||||
* retries or handle error states. The reply is returned in the payload->reply
|
||||
* and the result through operation_result. Returns the number of bytes
|
||||
* transferred,or -1 on a failure.
|
||||
*/
|
||||
int dc_link_aux_transfer_raw(struct ddc_service *ddc,
|
||||
struct aux_payload *payload,
|
||||
enum aux_return_code_type *operation_result);
|
||||
|
||||
enum lttpr_mode dc_link_decide_lttpr_mode(struct dc_link *link,
|
||||
struct dc_link_settings *link_setting);
|
||||
void dc_link_dp_receiver_power_ctrl(struct dc_link *link, bool on);
|
||||
bool dc_link_decide_edp_link_settings(struct dc_link *link,
|
||||
struct dc_link_settings *link_setting,
|
||||
uint32_t req_bw);
|
||||
#endif /* DC_LINK_H_ */
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
#include "fixed31_32.h"
|
||||
#include "irq_types.h"
|
||||
#include "dc_dp_types.h"
|
||||
#include "dc_hdmi_types.h"
|
||||
#include "dc_hw_types.h"
|
||||
#include "dal_types.h"
|
||||
#include "grph_object_defs.h"
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
#ifndef __DAL_AUX_ENGINE_DCE110_H__
|
||||
#define __DAL_AUX_ENGINE_DCE110_H__
|
||||
|
||||
#include "i2caux_interface.h"
|
||||
#include "gpio_service_interface.h"
|
||||
#include "inc/hw/aux_engine.h"
|
||||
|
||||
enum aux_return_code_type;
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
#include "link_encoder.h"
|
||||
#include "dce_link_encoder.h"
|
||||
#include "stream_encoder.h"
|
||||
#include "i2caux_interface.h"
|
||||
#include "dc_bios_types.h"
|
||||
|
||||
#include "gpio_service_interface.h"
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
#include "link_encoder.h"
|
||||
#include "link_enc_cfg.h"
|
||||
#include "link_hwss.h"
|
||||
#include "link.h"
|
||||
#include "dc_link_dp.h"
|
||||
#include "dccg.h"
|
||||
#include "clock_source.h"
|
||||
|
@ -54,7 +55,6 @@
|
|||
#include "audio.h"
|
||||
#include "reg_helper.h"
|
||||
#include "panel_cntl.h"
|
||||
#include "inc/link_dpcd.h"
|
||||
#include "dpcd_defs.h"
|
||||
/* include DCE11 register header files */
|
||||
#include "dce/dce_11_0_d.h"
|
||||
|
@ -737,7 +737,7 @@ void dce110_edp_wait_for_hpd_ready(
|
|||
|
||||
/* obtain HPD */
|
||||
/* TODO what to do with this? */
|
||||
hpd = get_hpd_gpio(ctx->dc_bios, connector, ctx->gpio_service);
|
||||
hpd = link_get_hpd_gpio(ctx->dc_bios, connector, ctx->gpio_service);
|
||||
|
||||
if (!hpd) {
|
||||
BREAK_TO_DEBUGGER();
|
||||
|
@ -875,14 +875,16 @@ void dce110_edp_power_control(
|
|||
|
||||
if (ctx->dc->ctx->dmub_srv &&
|
||||
ctx->dc->debug.dmub_command_table) {
|
||||
if (cntl.action == TRANSMITTER_CONTROL_POWER_ON)
|
||||
|
||||
if (cntl.action == TRANSMITTER_CONTROL_POWER_ON) {
|
||||
bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
|
||||
LVTMA_CONTROL_POWER_ON,
|
||||
panel_instance);
|
||||
else
|
||||
panel_instance, link->link_powered_externally);
|
||||
} else {
|
||||
bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
|
||||
LVTMA_CONTROL_POWER_OFF,
|
||||
panel_instance);
|
||||
panel_instance, link->link_powered_externally);
|
||||
}
|
||||
}
|
||||
|
||||
bp_result = link_transmitter_control(ctx->dc_bios, &cntl);
|
||||
|
@ -941,7 +943,6 @@ void dce110_edp_wait_for_T12(
|
|||
msleep(t12_duration - time_since_edp_poweroff_ms);
|
||||
}
|
||||
}
|
||||
|
||||
/*todo: cloned in stream enc, fix*/
|
||||
/*
|
||||
* @brief
|
||||
|
@ -1020,16 +1021,20 @@ void dce110_edp_backlight_control(
|
|||
DC_LOG_DC("edp_receiver_ready_T7 skipped\n");
|
||||
}
|
||||
|
||||
/* Setting link_powered_externally will bypass delays in the backlight
|
||||
* as they are not required if the link is being powered by a different
|
||||
* source.
|
||||
*/
|
||||
if (ctx->dc->ctx->dmub_srv &&
|
||||
ctx->dc->debug.dmub_command_table) {
|
||||
if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON)
|
||||
ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
|
||||
LVTMA_CONTROL_LCD_BLON,
|
||||
panel_instance);
|
||||
panel_instance, link->link_powered_externally);
|
||||
else
|
||||
ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
|
||||
LVTMA_CONTROL_LCD_BLOFF,
|
||||
panel_instance);
|
||||
panel_instance, link->link_powered_externally);
|
||||
}
|
||||
|
||||
link_transmitter_control(ctx->dc_bios, &cntl);
|
||||
|
@ -1154,7 +1159,7 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
|
|||
pipe_ctx->stream_res.stream_enc);
|
||||
}
|
||||
|
||||
if (is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
if (link_is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->stop_dp_info_packets(
|
||||
pipe_ctx->stream_res.hpo_dp_stream_enc);
|
||||
} else if (dc_is_dp_signal(pipe_ctx->stream->signal))
|
||||
|
@ -1165,7 +1170,7 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
|
|||
|
||||
link_hwss->reset_stream_encoder(pipe_ctx);
|
||||
|
||||
if (is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
if (link_is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
dto_params.otg_inst = tg->inst;
|
||||
dto_params.timing = &pipe_ctx->stream->timing;
|
||||
dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
|
||||
|
@ -1174,7 +1179,7 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
|
|||
dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst);
|
||||
}
|
||||
|
||||
if (is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
if (link_is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
/* TODO: This looks like a bug to me as we are disabling HPO IO when
|
||||
* we are just disabling a single HPO stream. Shouldn't we disable HPO
|
||||
* HW control only when HPOs for all streams are disabled?
|
||||
|
@ -1216,7 +1221,7 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
|
|||
link->dc->hwss.set_abm_immediate_disable(pipe_ctx);
|
||||
}
|
||||
|
||||
if (is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
if (link_is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
/* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */
|
||||
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_blank(
|
||||
pipe_ctx->stream_res.hpo_dp_stream_enc);
|
||||
|
@ -1421,7 +1426,7 @@ static enum dc_status dce110_enable_stream_timing(
|
|||
if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
|
||||
pipe_ctx->clock_source,
|
||||
&pipe_ctx->stream_res.pix_clk_params,
|
||||
dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings),
|
||||
link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
|
||||
&pipe_ctx->pll_settings)) {
|
||||
BREAK_TO_DEBUGGER();
|
||||
return DC_ERROR_UNEXPECTED;
|
||||
|
@ -1525,7 +1530,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
|
|||
* To do so, move calling function enable_stream_timing to only be done AFTER calling
|
||||
* function core_link_enable_stream
|
||||
*/
|
||||
if (!(hws->wa.dp_hpo_and_otg_sequence && is_dp_128b_132b_signal(pipe_ctx)))
|
||||
if (!(hws->wa.dp_hpo_and_otg_sequence && link_is_dp_128b_132b_signal(pipe_ctx)))
|
||||
/* */
|
||||
/* Do not touch stream timing on seamless boot optimization. */
|
||||
if (!pipe_ctx->stream->apply_seamless_boot_optimization)
|
||||
|
@ -1567,7 +1572,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
|
|||
* To do so, move calling function enable_stream_timing to only be done AFTER calling
|
||||
* function core_link_enable_stream
|
||||
*/
|
||||
if (hws->wa.dp_hpo_and_otg_sequence && is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
if (hws->wa.dp_hpo_and_otg_sequence && link_is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
if (!pipe_ctx->stream->apply_seamless_boot_optimization)
|
||||
hws->funcs.enable_stream_timing(pipe_ctx, context, dc);
|
||||
}
|
||||
|
@ -3047,13 +3052,13 @@ void dce110_enable_dp_link_output(
|
|||
pipes[i].clock_source->funcs->program_pix_clk(
|
||||
pipes[i].clock_source,
|
||||
&pipes[i].stream_res.pix_clk_params,
|
||||
dp_get_link_encoding_format(link_settings),
|
||||
link_dp_get_encoding_format(link_settings),
|
||||
&pipes[i].pll_settings);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
|
||||
if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
|
||||
if (dc->clk_mgr->funcs->notify_link_rate_change)
|
||||
dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);
|
||||
}
|
||||
|
|
|
@ -71,7 +71,7 @@ void dce110_optimize_bandwidth(
|
|||
struct dc *dc,
|
||||
struct dc_state *context);
|
||||
|
||||
void dp_receiver_power_ctrl(struct dc_link *link, bool on);
|
||||
void dc_link_dp_receiver_power_ctrl(struct dc_link *link, bool on);
|
||||
|
||||
void dce110_edp_power_control(
|
||||
struct dc_link *link,
|
||||
|
|
|
@ -172,6 +172,10 @@ struct dcn_hubbub_registers {
|
|||
uint32_t DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C;
|
||||
uint32_t DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D;
|
||||
uint32_t SDPIF_REQUEST_RATE_LIMIT;
|
||||
uint32_t DCHUBBUB_SDPIF_CFG0;
|
||||
uint32_t DCHUBBUB_SDPIF_CFG1;
|
||||
uint32_t DCHUBBUB_CLOCK_CNTL;
|
||||
uint32_t DCHUBBUB_MEM_PWR_MODE_CTRL;
|
||||
};
|
||||
|
||||
#define HUBBUB_REG_FIELD_LIST_DCN32(type) \
|
||||
|
@ -362,7 +366,13 @@ struct dcn_hubbub_registers {
|
|||
type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C;\
|
||||
type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D;\
|
||||
type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D;\
|
||||
type SDPIF_REQUEST_RATE_LIMIT
|
||||
type SDPIF_REQUEST_RATE_LIMIT;\
|
||||
type DISPCLK_R_DCHUBBUB_GATE_DIS;\
|
||||
type DCFCLK_R_DCHUBBUB_GATE_DIS;\
|
||||
type SDPIF_MAX_NUM_OUTSTANDING;\
|
||||
type DCHUBBUB_ARB_MAX_REQ_OUTSTAND;\
|
||||
type SDPIF_PORT_CONTROL;\
|
||||
type DET_MEM_PWR_LS_MODE
|
||||
|
||||
|
||||
struct dcn_hubbub_shift {
|
||||
|
|
|
@ -57,7 +57,7 @@
|
|||
#include "dc_trace.h"
|
||||
#include "dce/dmub_outbox.h"
|
||||
#include "inc/dc_link_dp.h"
|
||||
#include "inc/link_dpcd.h"
|
||||
#include "link.h"
|
||||
|
||||
#define DC_LOGGER_INIT(logger)
|
||||
|
||||
|
@ -921,7 +921,7 @@ enum dc_status dcn10_enable_stream_timing(
|
|||
if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
|
||||
pipe_ctx->clock_source,
|
||||
&pipe_ctx->stream_res.pix_clk_params,
|
||||
dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings),
|
||||
link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
|
||||
&pipe_ctx->pll_settings)) {
|
||||
BREAK_TO_DEBUGGER();
|
||||
return DC_ERROR_UNEXPECTED;
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
#include "link_encoder.h"
|
||||
#include "dcn10_link_encoder.h"
|
||||
#include "stream_encoder.h"
|
||||
#include "i2caux_interface.h"
|
||||
#include "dc_bios_types.h"
|
||||
|
||||
#include "gpio_service_interface.h"
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
#include "dcn10_stream_encoder.h"
|
||||
#include "reg_helper.h"
|
||||
#include "hw_shared.h"
|
||||
#include "inc/link_dpcd.h"
|
||||
#include "dc_link_dp.h"
|
||||
#include "dpcd_defs.h"
|
||||
#include "dcn30/dcn30_afmt.h"
|
||||
|
||||
|
|
|
@ -52,10 +52,10 @@
|
|||
#include "dc_dmub_srv.h"
|
||||
#include "dce/dmub_hw_lock_mgr.h"
|
||||
#include "hw_sequencer.h"
|
||||
#include "inc/link_dpcd.h"
|
||||
#include "dpcd_defs.h"
|
||||
#include "inc/link_enc_cfg.h"
|
||||
#include "link_hwss.h"
|
||||
#include "link.h"
|
||||
|
||||
#define DC_LOGGER_INIT(logger)
|
||||
|
||||
|
@ -712,7 +712,7 @@ enum dc_status dcn20_enable_stream_timing(
|
|||
if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
|
||||
pipe_ctx->clock_source,
|
||||
&pipe_ctx->stream_res.pix_clk_params,
|
||||
dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings),
|
||||
link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
|
||||
&pipe_ctx->pll_settings)) {
|
||||
BREAK_TO_DEBUGGER();
|
||||
return DC_ERROR_UNEXPECTED;
|
||||
|
@ -2383,7 +2383,7 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
|
|||
|
||||
params.link_settings.link_rate = link_settings->link_rate;
|
||||
|
||||
if (is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
if (link_is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
/* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */
|
||||
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank(
|
||||
pipe_ctx->stream_res.hpo_dp_stream_enc,
|
||||
|
@ -2691,12 +2691,12 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
|
|||
unsigned int k1_div = PIXEL_RATE_DIV_NA;
|
||||
unsigned int k2_div = PIXEL_RATE_DIV_NA;
|
||||
|
||||
if (is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
if (link_is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
if (dc->hwseq->funcs.setup_hpo_hw_control)
|
||||
dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, true);
|
||||
}
|
||||
|
||||
if (is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
if (link_is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
|
||||
dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst);
|
||||
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
#include "link_encoder.h"
|
||||
#include "dcn20_link_encoder.h"
|
||||
#include "stream_encoder.h"
|
||||
#include "i2caux_interface.h"
|
||||
#include "dc_bios_types.h"
|
||||
|
||||
#include "gpio_service_interface.h"
|
||||
|
|
|
@ -62,7 +62,6 @@
|
|||
#include "dml/display_mode_vba.h"
|
||||
#include "dcn20_dccg.h"
|
||||
#include "dcn20_vmid.h"
|
||||
#include "dc_link_ddc.h"
|
||||
#include "dce/dce_panel_cntl.h"
|
||||
|
||||
#include "navi10_ip_offset.h"
|
||||
|
@ -90,6 +89,7 @@
|
|||
|
||||
#include "amdgpu_socbb.h"
|
||||
|
||||
#include "link.h"
|
||||
#define DC_LOGGER_INIT(logger)
|
||||
|
||||
#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
|
||||
|
@ -1214,7 +1214,7 @@ static void dcn20_resource_destruct(struct dcn20_resource_pool *pool)
|
|||
dcn20_pp_smu_destroy(&pool->base.pp_smu);
|
||||
|
||||
if (pool->base.oem_device != NULL)
|
||||
dal_ddc_service_destroy(&pool->base.oem_device);
|
||||
link_destroy_ddc_service(&pool->base.oem_device);
|
||||
}
|
||||
|
||||
struct hubp *dcn20_hubp_create(
|
||||
|
@ -2769,7 +2769,7 @@ static bool dcn20_resource_construct(
|
|||
ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
|
||||
ddc_init_data.id.enum_id = 0;
|
||||
ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
|
||||
pool->base.oem_device = dal_ddc_service_create(&ddc_init_data);
|
||||
pool->base.oem_device = link_create_ddc_service(&ddc_init_data);
|
||||
} else {
|
||||
pool->base.oem_device = NULL;
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
#include "dcn20_stream_encoder.h"
|
||||
#include "reg_helper.h"
|
||||
#include "hw_shared.h"
|
||||
#include "inc/link_dpcd.h"
|
||||
#include "dc_link_dp.h"
|
||||
#include "dpcd_defs.h"
|
||||
|
||||
#define DC_LOGGER \
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
#include "link_encoder.h"
|
||||
#include "dcn201_link_encoder.h"
|
||||
#include "stream_encoder.h"
|
||||
#include "i2caux_interface.h"
|
||||
#include "dc_bios_types.h"
|
||||
|
||||
#include "gpio_service_interface.h"
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
#include "dcn21_link_encoder.h"
|
||||
#include "stream_encoder.h"
|
||||
|
||||
#include "i2caux_interface.h"
|
||||
#include "dc_bios_types.h"
|
||||
|
||||
#include "gpio_service_interface.h"
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
#include "link_encoder.h"
|
||||
#include "dcn30_dio_link_encoder.h"
|
||||
#include "stream_encoder.h"
|
||||
#include "i2caux_interface.h"
|
||||
#include "dc_bios_types.h"
|
||||
/* #include "dcn3ag/dcn3ag_phy_fw.h" */
|
||||
|
||||
|
|
|
@ -51,7 +51,6 @@
|
|||
#include "../dcn20/dcn20_hwseq.h"
|
||||
#include "dcn30_resource.h"
|
||||
#include "inc/dc_link_dp.h"
|
||||
#include "inc/link_dpcd.h"
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@
|
|||
#include "dml/display_mode_vba.h"
|
||||
#include "dcn30/dcn30_dccg.h"
|
||||
#include "dcn10/dcn10_resource.h"
|
||||
#include "dc_link_ddc.h"
|
||||
#include "link.h"
|
||||
#include "dce/dce_panel_cntl.h"
|
||||
|
||||
#include "dcn30/dcn30_dwb.h"
|
||||
|
@ -1208,7 +1208,7 @@ static void dcn30_resource_destruct(struct dcn30_resource_pool *pool)
|
|||
dcn_dccg_destroy(&pool->base.dccg);
|
||||
|
||||
if (pool->base.oem_device != NULL)
|
||||
dal_ddc_service_destroy(&pool->base.oem_device);
|
||||
link_destroy_ddc_service(&pool->base.oem_device);
|
||||
}
|
||||
|
||||
static struct hubp *dcn30_hubp_create(
|
||||
|
@ -2590,7 +2590,7 @@ static bool dcn30_resource_construct(
|
|||
ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
|
||||
ddc_init_data.id.enum_id = 0;
|
||||
ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
|
||||
pool->base.oem_device = dal_ddc_service_create(&ddc_init_data);
|
||||
pool->base.oem_device = link_create_ddc_service(&ddc_init_data);
|
||||
} else {
|
||||
pool->base.oem_device = NULL;
|
||||
}
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
#include "link_encoder.h"
|
||||
#include "dcn301_dio_link_encoder.h"
|
||||
#include "stream_encoder.h"
|
||||
#include "i2caux_interface.h"
|
||||
#include "dc_bios_types.h"
|
||||
#include "gpio_service_interface.h"
|
||||
|
||||
|
|
|
@ -47,6 +47,7 @@
|
|||
|
||||
#include "dcn10/dcn10_resource.h"
|
||||
|
||||
#include "link.h"
|
||||
#include "dce/dce_abm.h"
|
||||
#include "dce/dce_audio.h"
|
||||
#include "dce/dce_aux.h"
|
||||
|
@ -1125,6 +1126,9 @@ static void dcn302_resource_destruct(struct resource_pool *pool)
|
|||
|
||||
if (pool->dccg != NULL)
|
||||
dcn_dccg_destroy(&pool->dccg);
|
||||
|
||||
if (pool->oem_device != NULL)
|
||||
link_destroy_ddc_service(&pool->oem_device);
|
||||
}
|
||||
|
||||
static void dcn302_destroy_resource_pool(struct resource_pool **pool)
|
||||
|
@ -1216,6 +1220,7 @@ static bool dcn302_resource_construct(
|
|||
int i;
|
||||
struct dc_context *ctx = dc->ctx;
|
||||
struct irq_service_init_data init_data;
|
||||
struct ddc_service_init_data ddc_init_data = {0};
|
||||
|
||||
ctx->dc_bios->regs = &bios_regs;
|
||||
|
||||
|
@ -1497,6 +1502,17 @@ static bool dcn302_resource_construct(
|
|||
|
||||
dc->cap_funcs = cap_funcs;
|
||||
|
||||
if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
|
||||
ddc_init_data.ctx = dc->ctx;
|
||||
ddc_init_data.link = NULL;
|
||||
ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
|
||||
ddc_init_data.id.enum_id = 0;
|
||||
ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
|
||||
pool->oem_device = link_create_ddc_service(&ddc_init_data);
|
||||
} else {
|
||||
pool->oem_device = NULL;
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
create_fail:
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
|
||||
#include "dcn10/dcn10_resource.h"
|
||||
|
||||
#include "dc_link_ddc.h"
|
||||
#include "link.h"
|
||||
|
||||
#include "dce/dce_abm.h"
|
||||
#include "dce/dce_audio.h"
|
||||
|
@ -1054,7 +1054,7 @@ static void dcn303_resource_destruct(struct resource_pool *pool)
|
|||
dcn_dccg_destroy(&pool->dccg);
|
||||
|
||||
if (pool->oem_device != NULL)
|
||||
dal_ddc_service_destroy(&pool->oem_device);
|
||||
link_destroy_ddc_service(&pool->oem_device);
|
||||
}
|
||||
|
||||
static void dcn303_destroy_resource_pool(struct resource_pool **pool)
|
||||
|
@ -1421,7 +1421,7 @@ static bool dcn303_resource_construct(
|
|||
ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
|
||||
ddc_init_data.id.enum_id = 0;
|
||||
ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
|
||||
pool->oem_device = dal_ddc_service_create(&ddc_init_data);
|
||||
pool->oem_device = link_create_ddc_service(&ddc_init_data);
|
||||
} else {
|
||||
pool->oem_device = NULL;
|
||||
}
|
||||
|
|
|
@ -30,7 +30,6 @@
|
|||
#include "link_encoder.h"
|
||||
#include "dcn31_dio_link_encoder.h"
|
||||
#include "stream_encoder.h"
|
||||
#include "i2caux_interface.h"
|
||||
#include "dc_bios_types.h"
|
||||
|
||||
#include "gpio_service_interface.h"
|
||||
|
|
|
@ -1008,6 +1008,24 @@ static bool hubbub31_verify_allow_pstate_change_high(struct hubbub *hubbub)
|
|||
return false;
|
||||
}
|
||||
|
||||
void hubbub31_init(struct hubbub *hubbub)
|
||||
{
|
||||
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
|
||||
|
||||
/*Enable clock gate*/
|
||||
if (hubbub->ctx->dc->debug.disable_clock_gate) {
|
||||
/*done in hwseq*/
|
||||
/*REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);*/
|
||||
REG_UPDATE_2(DCHUBBUB_CLOCK_CNTL,
|
||||
DISPCLK_R_DCHUBBUB_GATE_DIS, 0,
|
||||
DCFCLK_R_DCHUBBUB_GATE_DIS, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
only the DCN will determine when to connect the SDP port
|
||||
*/
|
||||
REG_UPDATE(DCHUBBUB_SDPIF_CFG0, SDPIF_PORT_CONTROL, 1);
|
||||
}
|
||||
static const struct hubbub_funcs hubbub31_funcs = {
|
||||
.update_dchub = hubbub2_update_dchub,
|
||||
.init_dchub_sys_ctx = hubbub31_init_dchub_sys_ctx,
|
||||
|
|
|
@ -42,6 +42,10 @@
|
|||
SR(DCHUBBUB_COMPBUF_CTRL),\
|
||||
SR(COMPBUF_RESERVED_SPACE),\
|
||||
SR(DCHUBBUB_DEBUG_CTRL_0),\
|
||||
SR(DCHUBBUB_CLOCK_CNTL),\
|
||||
SR(DCHUBBUB_SDPIF_CFG0),\
|
||||
SR(DCHUBBUB_SDPIF_CFG1),\
|
||||
SR(DCHUBBUB_MEM_PWR_MODE_CTRL),\
|
||||
SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A),\
|
||||
SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A),\
|
||||
SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B),\
|
||||
|
@ -120,7 +124,11 @@
|
|||
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, mask_sh), \
|
||||
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_TABLE_LEVEL, mask_sh), \
|
||||
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \
|
||||
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh)
|
||||
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh),\
|
||||
HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DISPCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\
|
||||
HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DCFCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\
|
||||
HUBBUB_SF(DCHUBBUB_SDPIF_CFG0, SDPIF_PORT_CONTROL, mask_sh),\
|
||||
HUBBUB_SF(DCHUBBUB_MEM_PWR_MODE_CTRL, DET_MEM_PWR_LS_MODE, mask_sh)
|
||||
|
||||
int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub,
|
||||
struct dcn_hubbub_phys_addr_config *pa_config);
|
||||
|
|
|
@ -46,7 +46,7 @@
|
|||
#include "dpcd_defs.h"
|
||||
#include "dce/dmub_outbox.h"
|
||||
#include "dc_link_dp.h"
|
||||
#include "inc/link_dpcd.h"
|
||||
#include "link.h"
|
||||
#include "dcn10/dcn10_hw_sequencer.h"
|
||||
#include "inc/link_enc_cfg.h"
|
||||
#include "dcn30/dcn30_vpg.h"
|
||||
|
@ -415,7 +415,12 @@ void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx)
|
|||
pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets(
|
||||
pipe_ctx->stream_res.stream_enc,
|
||||
&pipe_ctx->stream_res.encoder_info_frame);
|
||||
else {
|
||||
else if (link_is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->update_dp_info_packets(
|
||||
pipe_ctx->stream_res.hpo_dp_stream_enc,
|
||||
&pipe_ctx->stream_res.encoder_info_frame);
|
||||
return;
|
||||
} else {
|
||||
pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets(
|
||||
pipe_ctx->stream_res.stream_enc,
|
||||
&pipe_ctx->stream_res.encoder_info_frame);
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
#include "dcn314_dio_stream_encoder.h"
|
||||
#include "reg_helper.h"
|
||||
#include "hw_shared.h"
|
||||
#include "inc/link_dpcd.h"
|
||||
#include "dc_link_dp.h"
|
||||
#include "dpcd_defs.h"
|
||||
|
||||
#define DC_LOGGER \
|
||||
|
@ -281,7 +281,8 @@ static void enc314_stream_encoder_dp_blank(
|
|||
enc1_stream_encoder_dp_blank(link, enc);
|
||||
|
||||
/* Disable FIFO after the DP vid stream is disabled to avoid corruption. */
|
||||
enc314_disable_fifo(enc);
|
||||
if (enc->ctx->dc->debug.dig_fifo_off_in_blank)
|
||||
enc314_disable_fifo(enc);
|
||||
}
|
||||
|
||||
static void enc314_stream_encoder_dp_unblank(
|
||||
|
|
|
@ -47,8 +47,8 @@
|
|||
#include "dpcd_defs.h"
|
||||
#include "dce/dmub_outbox.h"
|
||||
#include "dc_link_dp.h"
|
||||
#include "link.h"
|
||||
#include "inc/dc_link_dp.h"
|
||||
#include "inc/link_dpcd.h"
|
||||
#include "dcn10/dcn10_hw_sequencer.h"
|
||||
#include "inc/link_enc_cfg.h"
|
||||
#include "dcn30/dcn30_vpg.h"
|
||||
|
@ -348,7 +348,7 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
|
|||
two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
|
||||
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
|
||||
|
||||
if (is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
if (link_is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
*k1_div = PIXEL_RATE_DIV_BY_1;
|
||||
*k2_div = PIXEL_RATE_DIV_BY_1;
|
||||
} else if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal)) {
|
||||
|
|
|
@ -1776,7 +1776,7 @@ static bool dcn316_resource_construct(
|
|||
pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
|
||||
dc->caps.max_downscale_ratio = 600;
|
||||
dc->caps.i2c_speed_in_khz = 100;
|
||||
dc->caps.i2c_speed_in_khz_hdcp = 100;
|
||||
dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.5 w/a applied by default*/
|
||||
dc->caps.max_cursor_size = 256;
|
||||
dc->caps.min_horizontal_blanking_period = 80;
|
||||
dc->caps.dmdata_alloc_size = 2048;
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
#include "dcn31/dcn31_dio_link_encoder.h"
|
||||
#include "dcn32_dio_link_encoder.h"
|
||||
#include "stream_encoder.h"
|
||||
#include "i2caux_interface.h"
|
||||
#include "dc_bios_types.h"
|
||||
#include "link_enc_cfg.h"
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
#include "dcn32_dio_stream_encoder.h"
|
||||
#include "reg_helper.h"
|
||||
#include "hw_shared.h"
|
||||
#include "inc/link_dpcd.h"
|
||||
#include "dc_link_dp.h"
|
||||
#include "dpcd_defs.h"
|
||||
|
||||
#define DC_LOGGER \
|
||||
|
@ -421,6 +421,33 @@ static void enc32_set_dig_input_mode(struct stream_encoder *enc, unsigned int pi
|
|||
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, pix_per_container == 2 ? 0x1 : 0x0);
|
||||
}
|
||||
|
||||
static void enc32_reset_fifo(struct stream_encoder *enc, bool reset)
|
||||
{
|
||||
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
|
||||
uint32_t reset_val = reset ? 1 : 0;
|
||||
uint32_t is_symclk_on;
|
||||
|
||||
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, reset_val);
|
||||
REG_GET(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, &is_symclk_on);
|
||||
|
||||
if (is_symclk_on)
|
||||
REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, reset_val, 10, 5000);
|
||||
else
|
||||
udelay(10);
|
||||
}
|
||||
|
||||
static void enc32_enable_fifo(struct stream_encoder *enc)
|
||||
{
|
||||
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
|
||||
|
||||
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
|
||||
|
||||
enc32_reset_fifo(enc, true);
|
||||
enc32_reset_fifo(enc, false);
|
||||
|
||||
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 1);
|
||||
}
|
||||
|
||||
static const struct stream_encoder_funcs dcn32_str_enc_funcs = {
|
||||
.dp_set_odm_combine =
|
||||
enc32_dp_set_odm_combine,
|
||||
|
@ -466,6 +493,7 @@ static const struct stream_encoder_funcs dcn32_str_enc_funcs = {
|
|||
.hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute,
|
||||
|
||||
.set_input_mode = enc32_set_dig_input_mode,
|
||||
.enable_fifo = enc32_enable_fifo,
|
||||
};
|
||||
|
||||
void dcn32_dio_stream_encoder_construct(
|
||||
|
|
|
@ -945,6 +945,35 @@ void hubbub32_force_wm_propagate_to_pipes(struct hubbub *hubbub)
|
|||
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
|
||||
}
|
||||
|
||||
void hubbub32_init(struct hubbub *hubbub)
|
||||
{
|
||||
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
|
||||
|
||||
/* Enable clock gate*/
|
||||
if (hubbub->ctx->dc->debug.disable_clock_gate) {
|
||||
/*done in hwseq*/
|
||||
/*REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);*/
|
||||
|
||||
REG_UPDATE_2(DCHUBBUB_CLOCK_CNTL,
|
||||
DISPCLK_R_DCHUBBUB_GATE_DIS, 0,
|
||||
DCFCLK_R_DCHUBBUB_GATE_DIS, 0);
|
||||
}
|
||||
/*
|
||||
ignore the "df_pre_cstate_req" from the SDP port control.
|
||||
only the DCN will determine when to connect the SDP port
|
||||
*/
|
||||
REG_UPDATE(DCHUBBUB_SDPIF_CFG0,
|
||||
SDPIF_PORT_CONTROL, 1);
|
||||
/*Set SDP's max outstanding request to 512
|
||||
must set the register back to 0 (max outstanding = 256) in zero frame buffer mode*/
|
||||
REG_UPDATE(DCHUBBUB_SDPIF_CFG1,
|
||||
SDPIF_MAX_NUM_OUTSTANDING, 1);
|
||||
/*must set the registers back to 256 in zero frame buffer mode*/
|
||||
REG_UPDATE_2(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
|
||||
DCHUBBUB_ARB_MAX_REQ_OUTSTAND, 512,
|
||||
DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 512);
|
||||
}
|
||||
|
||||
static const struct hubbub_funcs hubbub32_funcs = {
|
||||
.update_dchub = hubbub2_update_dchub,
|
||||
.init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx,
|
||||
|
|
|
@ -83,7 +83,12 @@
|
|||
SR(DCN_VM_FAULT_ADDR_LSB),\
|
||||
SR(DCN_VM_FAULT_CNTL),\
|
||||
SR(DCN_VM_FAULT_STATUS),\
|
||||
SR(SDPIF_REQUEST_RATE_LIMIT)
|
||||
SR(SDPIF_REQUEST_RATE_LIMIT),\
|
||||
SR(DCHUBBUB_CLOCK_CNTL),\
|
||||
SR(DCHUBBUB_SDPIF_CFG0),\
|
||||
SR(DCHUBBUB_SDPIF_CFG1),\
|
||||
SR(DCHUBBUB_MEM_PWR_MODE_CTRL)
|
||||
|
||||
|
||||
#define HUBBUB_MASK_SH_LIST_DCN32(mask_sh)\
|
||||
HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \
|
||||
|
@ -96,6 +101,7 @@
|
|||
HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, mask_sh), \
|
||||
HUBBUB_SF(DCHUBBUB_ARB_SAT_LEVEL, DCHUBBUB_ARB_SAT_LEVEL, mask_sh), \
|
||||
HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, mask_sh), \
|
||||
HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MAX_REQ_OUTSTAND, mask_sh), \
|
||||
HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, mask_sh), \
|
||||
HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, mask_sh), \
|
||||
HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, mask_sh), \
|
||||
|
@ -161,7 +167,14 @@
|
|||
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_TABLE_LEVEL, mask_sh), \
|
||||
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \
|
||||
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh),\
|
||||
HUBBUB_SF(SDPIF_REQUEST_RATE_LIMIT, SDPIF_REQUEST_RATE_LIMIT, mask_sh)
|
||||
HUBBUB_SF(SDPIF_REQUEST_RATE_LIMIT, SDPIF_REQUEST_RATE_LIMIT, mask_sh),\
|
||||
HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DISPCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\
|
||||
HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DCFCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\
|
||||
HUBBUB_SF(DCHUBBUB_SDPIF_CFG0, SDPIF_PORT_CONTROL, mask_sh),\
|
||||
HUBBUB_SF(DCHUBBUB_SDPIF_CFG1, SDPIF_MAX_NUM_OUTSTANDING, mask_sh),\
|
||||
HUBBUB_SF(DCHUBBUB_MEM_PWR_MODE_CTRL, DET_MEM_PWR_LS_MODE, mask_sh)
|
||||
|
||||
|
||||
|
||||
bool hubbub32_program_urgent_watermarks(
|
||||
struct hubbub *hubbub,
|
||||
|
|
|
@ -155,7 +155,11 @@ void hubp32_cursor_set_attributes(
|
|||
else
|
||||
REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, false);
|
||||
}
|
||||
|
||||
void hubp32_init(struct hubp *hubp)
|
||||
{
|
||||
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
|
||||
REG_WRITE(HUBPREQ_DEBUG_DB, 1 << 8);
|
||||
}
|
||||
static struct hubp_funcs dcn32_hubp_funcs = {
|
||||
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
|
||||
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
|
||||
|
|
|
@ -50,6 +50,7 @@
|
|||
#include "dmub_subvp_state.h"
|
||||
#include "dce/dmub_hw_lock_mgr.h"
|
||||
#include "dcn32_resource.h"
|
||||
#include "link.h"
|
||||
#include "dc_link_dp.h"
|
||||
#include "dmub/inc/dmub_subvp_state.h"
|
||||
|
||||
|
@ -207,151 +208,31 @@ static bool dcn32_check_no_memory_request_for_cab(struct dc *dc)
|
|||
*/
|
||||
static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx)
|
||||
{
|
||||
int i, j;
|
||||
struct dc_stream_state *stream = NULL;
|
||||
struct dc_plane_state *plane = NULL;
|
||||
uint32_t cursor_size = 0;
|
||||
uint32_t total_lines = 0;
|
||||
uint32_t lines_per_way = 0;
|
||||
int i;
|
||||
uint8_t num_ways = 0;
|
||||
uint8_t bytes_per_pixel = 0;
|
||||
uint8_t cursor_bpp = 0;
|
||||
uint16_t mblk_width = 0;
|
||||
uint16_t mblk_height = 0;
|
||||
uint16_t mall_alloc_width_blk_aligned = 0;
|
||||
uint16_t mall_alloc_height_blk_aligned = 0;
|
||||
uint16_t num_mblks = 0;
|
||||
uint32_t bytes_in_mall = 0;
|
||||
uint32_t cache_lines_used = 0;
|
||||
uint32_t cache_lines_per_plane = 0;
|
||||
uint32_t mall_ss_size_bytes = 0;
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
|
||||
|
||||
/* If PSR is supported on an eDP panel that's connected, but that panel is
|
||||
* not in PSR at the time of trying to enter MALL SS, we have to include it
|
||||
* in the static screen CAB calculation
|
||||
*/
|
||||
if (!pipe->stream || !pipe->plane_state ||
|
||||
(pipe->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
|
||||
pipe->stream->link->psr_settings.psr_allow_active) ||
|
||||
pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)
|
||||
continue;
|
||||
|
||||
bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4;
|
||||
mblk_width = DCN3_2_MBLK_WIDTH;
|
||||
mblk_height = bytes_per_pixel == 4 ? DCN3_2_MBLK_HEIGHT_4BPE : DCN3_2_MBLK_HEIGHT_8BPE;
|
||||
|
||||
/* full_vp_width_blk_aligned = FLOOR(vp_x_start + full_vp_width + blk_width - 1, blk_width) -
|
||||
* FLOOR(vp_x_start, blk_width)
|
||||
*
|
||||
* mall_alloc_width_blk_aligned_l/c = full_vp_width_blk_aligned_l/c
|
||||
*/
|
||||
mall_alloc_width_blk_aligned = ((pipe->plane_res.scl_data.viewport.x +
|
||||
pipe->plane_res.scl_data.viewport.width + mblk_width - 1) / mblk_width * mblk_width) -
|
||||
(pipe->plane_res.scl_data.viewport.x / mblk_width * mblk_width);
|
||||
|
||||
/* full_vp_height_blk_aligned = FLOOR(vp_y_start + full_vp_height + blk_height - 1, blk_height) -
|
||||
* FLOOR(vp_y_start, blk_height)
|
||||
*
|
||||
* mall_alloc_height_blk_aligned_l/c = full_vp_height_blk_aligned_l/c
|
||||
*/
|
||||
mall_alloc_height_blk_aligned = ((pipe->plane_res.scl_data.viewport.y +
|
||||
pipe->plane_res.scl_data.viewport.height + mblk_height - 1) / mblk_height * mblk_height) -
|
||||
(pipe->plane_res.scl_data.viewport.y / mblk_height * mblk_height);
|
||||
|
||||
num_mblks = ((mall_alloc_width_blk_aligned + mblk_width - 1) / mblk_width) *
|
||||
((mall_alloc_height_blk_aligned + mblk_height - 1) / mblk_height);
|
||||
|
||||
/*For DCC:
|
||||
* meta_num_mblk = CEILING(meta_pitch*full_vp_height*Bpe/256/mblk_bytes, 1)
|
||||
*/
|
||||
if (pipe->plane_state->dcc.enable)
|
||||
num_mblks += (pipe->plane_state->dcc.meta_pitch * pipe->plane_res.scl_data.viewport.height * bytes_per_pixel +
|
||||
(256 * DCN3_2_MALL_MBLK_SIZE_BYTES) - 1) / (256 * DCN3_2_MALL_MBLK_SIZE_BYTES);
|
||||
|
||||
bytes_in_mall = num_mblks * DCN3_2_MALL_MBLK_SIZE_BYTES;
|
||||
|
||||
/* (cache lines used is total bytes / cache_line size. Add +2 for worst case alignment
|
||||
* (MALL is 64-byte aligned)
|
||||
*/
|
||||
cache_lines_per_plane = bytes_in_mall / dc->caps.cache_line_size + 2;
|
||||
cache_lines_used += cache_lines_per_plane;
|
||||
}
|
||||
mall_ss_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_size_bytes;
|
||||
// TODO add additional logic for PSR active stream exclusion optimization
|
||||
// mall_ss_psr_active_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes;
|
||||
|
||||
// Include cursor size for CAB allocation
|
||||
for (j = 0; j < dc->res_pool->pipe_count; j++) {
|
||||
struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[j];
|
||||
struct hubp *hubp = pipe->plane_res.hubp;
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (pipe->stream && pipe->plane_state && hubp)
|
||||
/* Find the cursor plane and use the exact size instead of
|
||||
using the max for calculation */
|
||||
if (!pipe->stream || !pipe->plane_state)
|
||||
continue;
|
||||
|
||||
if (hubp->curs_attr.width > 0) {
|
||||
cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height;
|
||||
|
||||
switch (pipe->stream->cursor_attributes.color_format) {
|
||||
case CURSOR_MODE_MONO:
|
||||
cursor_size /= 2;
|
||||
cursor_bpp = 4;
|
||||
break;
|
||||
case CURSOR_MODE_COLOR_1BIT_AND:
|
||||
case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
|
||||
case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
|
||||
cursor_size *= 4;
|
||||
cursor_bpp = 4;
|
||||
break;
|
||||
|
||||
case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
|
||||
case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
|
||||
cursor_size *= 8;
|
||||
cursor_bpp = 8;
|
||||
break;
|
||||
}
|
||||
|
||||
if (pipe->stream->cursor_position.enable && !dc->debug.alloc_extra_way_for_cursor &&
|
||||
cursor_size > 16384) {
|
||||
/* cursor_num_mblk = CEILING(num_cursors*cursor_width*cursor_width*cursor_Bpe/mblk_bytes, 1)
|
||||
*/
|
||||
cache_lines_used += (((cursor_size + DCN3_2_MALL_MBLK_SIZE_BYTES - 1) /
|
||||
DCN3_2_MALL_MBLK_SIZE_BYTES) * DCN3_2_MALL_MBLK_SIZE_BYTES) /
|
||||
dc->caps.cache_line_size + 2;
|
||||
break;
|
||||
}
|
||||
}
|
||||
mall_ss_size_bytes += dcn32_helper_calculate_mall_bytes_for_cursor(dc, pipe, false);
|
||||
}
|
||||
|
||||
// Convert number of cache lines required to number of ways
|
||||
total_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size;
|
||||
lines_per_way = total_lines / dc->caps.cache_num_ways;
|
||||
num_ways = cache_lines_used / lines_per_way;
|
||||
|
||||
if (cache_lines_used % lines_per_way > 0)
|
||||
num_ways++;
|
||||
|
||||
for (i = 0; i < ctx->stream_count; i++) {
|
||||
stream = ctx->streams[i];
|
||||
for (j = 0; j < ctx->stream_status[i].plane_count; j++) {
|
||||
plane = ctx->stream_status[i].plane_states[j];
|
||||
|
||||
if (stream->cursor_position.enable && plane &&
|
||||
dc->debug.alloc_extra_way_for_cursor &&
|
||||
cursor_size > 16384) {
|
||||
/* Cursor caching is not supported since it won't be on the same line.
|
||||
* So we need an extra line to accommodate it. With large cursors and a single 4k monitor
|
||||
* this case triggers corruption. If we're at the edge, then dont trigger display refresh
|
||||
* from MALL. We only need to cache cursor if its greater that 64x64 at 4 bpp.
|
||||
*/
|
||||
num_ways++;
|
||||
/* We only expect one cursor plane */
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (dc->debug.force_mall_ss_num_ways > 0) {
|
||||
num_ways = dc->debug.force_mall_ss_num_ways;
|
||||
} else {
|
||||
num_ways = dcn32_helper_mall_bytes_to_ways(dc, mall_ss_size_bytes);
|
||||
}
|
||||
|
||||
return num_ways;
|
||||
}
|
||||
|
||||
|
@ -804,6 +685,25 @@ void dcn32_program_mall_pipe_config(struct dc *dc, struct dc_state *context)
|
|||
}
|
||||
}
|
||||
|
||||
static void dcn32_initialize_min_clocks(struct dc *dc)
|
||||
{
|
||||
struct dc_clocks *clocks = &dc->current_state->bw_ctx.bw.dcn.clk;
|
||||
|
||||
clocks->dcfclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz * 1000;
|
||||
clocks->socclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].socclk_mhz * 1000;
|
||||
clocks->dramclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 1000;
|
||||
clocks->dppclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dppclk_mhz * 1000;
|
||||
clocks->dispclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz * 1000;
|
||||
clocks->ref_dtbclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dtbclk_mhz * 1000;
|
||||
clocks->fclk_p_state_change_support = true;
|
||||
clocks->p_state_change_support = true;
|
||||
|
||||
dc->clk_mgr->funcs->update_clocks(
|
||||
dc->clk_mgr,
|
||||
dc->current_state,
|
||||
true);
|
||||
}
|
||||
|
||||
void dcn32_init_hw(struct dc *dc)
|
||||
{
|
||||
struct abm **abms = dc->res_pool->multiple_abms;
|
||||
|
@ -898,6 +798,8 @@ void dcn32_init_hw(struct dc *dc)
|
|||
if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
|
||||
dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
|
||||
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
|
||||
|
||||
dcn32_initialize_min_clocks(dc);
|
||||
}
|
||||
|
||||
/* In headless boot cases, DIG may be turned
|
||||
|
@ -1176,7 +1078,7 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
|
|||
two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
|
||||
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
|
||||
|
||||
if (is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
if (link_is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
*k1_div = PIXEL_RATE_DIV_BY_1;
|
||||
*k2_div = PIXEL_RATE_DIV_BY_1;
|
||||
} else if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal)) {
|
||||
|
@ -1240,7 +1142,7 @@ void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx,
|
|||
|
||||
params.link_settings.link_rate = link_settings->link_rate;
|
||||
|
||||
if (is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
if (link_is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
/* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */
|
||||
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank(
|
||||
pipe_ctx->stream_res.hpo_dp_stream_enc,
|
||||
|
@ -1267,7 +1169,7 @@ bool dcn32_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
|
|||
if (!is_h_timing_divisible_by_2(pipe_ctx->stream))
|
||||
return false;
|
||||
|
||||
if (dc_is_dp_signal(pipe_ctx->stream->signal) && !is_dp_128b_132b_signal(pipe_ctx) &&
|
||||
if (dc_is_dp_signal(pipe_ctx->stream->signal) && !link_is_dp_128b_132b_signal(pipe_ctx) &&
|
||||
dc->debug.enable_dp_dig_pixel_rate_div_policy)
|
||||
return true;
|
||||
return false;
|
||||
|
@ -1301,7 +1203,7 @@ static void apply_symclk_on_tx_off_wa(struct dc_link *link)
|
|||
pipe_ctx->clock_source->funcs->program_pix_clk(
|
||||
pipe_ctx->clock_source,
|
||||
&pipe_ctx->stream_res.pix_clk_params,
|
||||
dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings),
|
||||
link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
|
||||
&pipe_ctx->pll_settings);
|
||||
link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
|
||||
break;
|
||||
|
|
|
@ -69,7 +69,7 @@
|
|||
#include "dml/display_mode_vba.h"
|
||||
#include "dcn32/dcn32_dccg.h"
|
||||
#include "dcn10/dcn10_resource.h"
|
||||
#include "dc_link_ddc.h"
|
||||
#include "link.h"
|
||||
#include "dcn31/dcn31_panel_cntl.h"
|
||||
|
||||
#include "dcn30/dcn30_dwb.h"
|
||||
|
@ -1508,7 +1508,7 @@ static void dcn32_resource_destruct(struct dcn32_resource_pool *pool)
|
|||
dcn_dccg_destroy(&pool->base.dccg);
|
||||
|
||||
if (pool->base.oem_device != NULL)
|
||||
dal_ddc_service_destroy(&pool->base.oem_device);
|
||||
link_destroy_ddc_service(&pool->base.oem_device);
|
||||
}
|
||||
|
||||
|
||||
|
@ -2450,7 +2450,7 @@ static bool dcn32_resource_construct(
|
|||
ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
|
||||
ddc_init_data.id.enum_id = 0;
|
||||
ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
|
||||
pool->base.oem_device = dal_ddc_service_create(&ddc_init_data);
|
||||
pool->base.oem_device = link_create_ddc_service(&ddc_init_data);
|
||||
} else {
|
||||
pool->base.oem_device = NULL;
|
||||
}
|
||||
|
|
|
@ -96,8 +96,17 @@ void dcn32_calculate_wm_and_dlg(
|
|||
int pipe_cnt,
|
||||
int vlevel);
|
||||
|
||||
uint32_t dcn32_helper_calculate_num_ways_for_subvp
|
||||
(struct dc *dc,
|
||||
uint32_t dcn32_helper_mall_bytes_to_ways(
|
||||
struct dc *dc,
|
||||
uint32_t total_size_in_mall_bytes);
|
||||
|
||||
uint32_t dcn32_helper_calculate_mall_bytes_for_cursor(
|
||||
struct dc *dc,
|
||||
struct pipe_ctx *pipe_ctx,
|
||||
bool ignore_cursor_buf);
|
||||
|
||||
uint32_t dcn32_helper_calculate_num_ways_for_subvp(
|
||||
struct dc *dc,
|
||||
struct dc_state *context);
|
||||
|
||||
void dcn32_merge_pipes_for_subvp(struct dc *dc,
|
||||
|
@ -135,6 +144,8 @@ void dcn32_restore_mall_state(struct dc *dc,
|
|||
struct dc_state *context,
|
||||
struct mall_temp_config *temp_config);
|
||||
|
||||
bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe);
|
||||
|
||||
/* definitions for run time init of reg offsets */
|
||||
|
||||
/* CLK SRC */
|
||||
|
|
|
@ -33,13 +33,75 @@ static bool is_dual_plane(enum surface_pixel_format format)
|
|||
return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
|
||||
}
|
||||
|
||||
|
||||
uint32_t dcn32_helper_mall_bytes_to_ways(
|
||||
struct dc *dc,
|
||||
uint32_t total_size_in_mall_bytes)
|
||||
{
|
||||
uint32_t cache_lines_used, lines_per_way, total_cache_lines, num_ways;
|
||||
|
||||
/* add 2 lines for worst case alignment */
|
||||
cache_lines_used = total_size_in_mall_bytes / dc->caps.cache_line_size + 2;
|
||||
|
||||
total_cache_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size;
|
||||
lines_per_way = total_cache_lines / dc->caps.cache_num_ways;
|
||||
num_ways = cache_lines_used / lines_per_way;
|
||||
if (cache_lines_used % lines_per_way > 0)
|
||||
num_ways++;
|
||||
|
||||
return num_ways;
|
||||
}
|
||||
|
||||
uint32_t dcn32_helper_calculate_mall_bytes_for_cursor(
|
||||
struct dc *dc,
|
||||
struct pipe_ctx *pipe_ctx,
|
||||
bool ignore_cursor_buf)
|
||||
{
|
||||
struct hubp *hubp = pipe_ctx->plane_res.hubp;
|
||||
uint32_t cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height;
|
||||
uint32_t cursor_bpp = 4;
|
||||
uint32_t cursor_mall_size_bytes = 0;
|
||||
|
||||
switch (pipe_ctx->stream->cursor_attributes.color_format) {
|
||||
case CURSOR_MODE_MONO:
|
||||
cursor_size /= 2;
|
||||
cursor_bpp = 4;
|
||||
break;
|
||||
case CURSOR_MODE_COLOR_1BIT_AND:
|
||||
case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
|
||||
case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
|
||||
cursor_size *= 4;
|
||||
cursor_bpp = 4;
|
||||
break;
|
||||
|
||||
case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
|
||||
case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
|
||||
cursor_size *= 8;
|
||||
cursor_bpp = 8;
|
||||
break;
|
||||
}
|
||||
|
||||
/* only count if cursor is enabled, and if additional allocation needed outside of the
|
||||
* DCN cursor buffer
|
||||
*/
|
||||
if (pipe_ctx->stream->cursor_position.enable && (ignore_cursor_buf ||
|
||||
cursor_size > 16384)) {
|
||||
/* cursor_num_mblk = CEILING(num_cursors*cursor_width*cursor_width*cursor_Bpe/mblk_bytes, 1)
|
||||
* Note: add 1 mblk in case of cursor misalignment
|
||||
*/
|
||||
cursor_mall_size_bytes = ((cursor_size + DCN3_2_MALL_MBLK_SIZE_BYTES - 1) /
|
||||
DCN3_2_MALL_MBLK_SIZE_BYTES + 1) * DCN3_2_MALL_MBLK_SIZE_BYTES;
|
||||
}
|
||||
|
||||
return cursor_mall_size_bytes;
|
||||
}
|
||||
|
||||
/**
|
||||
* ********************************************************************************************
|
||||
* dcn32_helper_calculate_num_ways_for_subvp: Calculate number of ways needed for SubVP
|
||||
*
|
||||
* This function first checks the bytes required per pixel on the SubVP pipe, then calculates
|
||||
* the total number of pixels required in the SubVP MALL region. These are used to calculate
|
||||
* the number of cache lines used (then number of ways required) for SubVP MCLK switching.
|
||||
* Gets total allocation required for the phantom viewport calculated by DML in bytes and
|
||||
* converts to number of cache ways.
|
||||
*
|
||||
* @param [in] dc: current dc state
|
||||
* @param [in] context: new dc state
|
||||
|
@ -48,106 +110,19 @@ static bool is_dual_plane(enum surface_pixel_format format)
|
|||
*
|
||||
* ********************************************************************************************
|
||||
*/
|
||||
uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_state *context)
|
||||
uint32_t dcn32_helper_calculate_num_ways_for_subvp(
|
||||
struct dc *dc,
|
||||
struct dc_state *context)
|
||||
{
|
||||
uint32_t num_ways = 0;
|
||||
uint32_t bytes_per_pixel = 0;
|
||||
uint32_t cache_lines_used = 0;
|
||||
uint32_t lines_per_way = 0;
|
||||
uint32_t total_cache_lines = 0;
|
||||
uint32_t bytes_in_mall = 0;
|
||||
uint32_t num_mblks = 0;
|
||||
uint32_t cache_lines_per_plane = 0;
|
||||
uint32_t i = 0, j = 0;
|
||||
uint16_t mblk_width = 0;
|
||||
uint16_t mblk_height = 0;
|
||||
uint32_t full_vp_width_blk_aligned = 0;
|
||||
uint32_t full_vp_height_blk_aligned = 0;
|
||||
uint32_t mall_alloc_width_blk_aligned = 0;
|
||||
uint32_t mall_alloc_height_blk_aligned = 0;
|
||||
uint16_t full_vp_height = 0;
|
||||
bool subvp_in_use = false;
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
/* Find the phantom pipes.
|
||||
* - For pipe split case we need to loop through the bottom and next ODM
|
||||
* pipes or only half the viewport size is counted
|
||||
*/
|
||||
if (pipe->stream && pipe->plane_state &&
|
||||
pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
|
||||
struct pipe_ctx *main_pipe = NULL;
|
||||
|
||||
subvp_in_use = true;
|
||||
/* Get full viewport height from main pipe (required for MBLK calculation) */
|
||||
for (j = 0; j < dc->res_pool->pipe_count; j++) {
|
||||
main_pipe = &context->res_ctx.pipe_ctx[j];
|
||||
if (main_pipe->stream == pipe->stream->mall_stream_config.paired_stream) {
|
||||
full_vp_height = main_pipe->plane_res.scl_data.viewport.height;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4;
|
||||
mblk_width = DCN3_2_MBLK_WIDTH;
|
||||
mblk_height = bytes_per_pixel == 4 ? DCN3_2_MBLK_HEIGHT_4BPE : DCN3_2_MBLK_HEIGHT_8BPE;
|
||||
|
||||
/* full_vp_width_blk_aligned = FLOOR(vp_x_start + full_vp_width + blk_width - 1, blk_width) -
|
||||
* FLOOR(vp_x_start, blk_width)
|
||||
*/
|
||||
full_vp_width_blk_aligned = ((pipe->plane_res.scl_data.viewport.x +
|
||||
pipe->plane_res.scl_data.viewport.width + mblk_width - 1) / mblk_width * mblk_width) -
|
||||
(pipe->plane_res.scl_data.viewport.x / mblk_width * mblk_width);
|
||||
|
||||
/* full_vp_height_blk_aligned = FLOOR(vp_y_start + full_vp_height + blk_height - 1, blk_height) -
|
||||
* FLOOR(vp_y_start, blk_height)
|
||||
*/
|
||||
full_vp_height_blk_aligned = ((pipe->plane_res.scl_data.viewport.y +
|
||||
full_vp_height + mblk_height - 1) / mblk_height * mblk_height) -
|
||||
(pipe->plane_res.scl_data.viewport.y / mblk_height * mblk_height);
|
||||
|
||||
/* mall_alloc_width_blk_aligned_l/c = full_vp_width_blk_aligned_l/c */
|
||||
mall_alloc_width_blk_aligned = full_vp_width_blk_aligned;
|
||||
|
||||
/* mall_alloc_height_blk_aligned_l/c = CEILING(sub_vp_height_l/c - 1, blk_height_l/c) + blk_height_l/c */
|
||||
mall_alloc_height_blk_aligned = (pipe->plane_res.scl_data.viewport.height - 1 + mblk_height - 1) /
|
||||
mblk_height * mblk_height + mblk_height;
|
||||
|
||||
/* full_mblk_width_ub_l/c = mall_alloc_width_blk_aligned_l/c;
|
||||
* full_mblk_height_ub_l/c = mall_alloc_height_blk_aligned_l/c;
|
||||
* num_mblk_l/c = (full_mblk_width_ub_l/c / mblk_width_l/c) * (full_mblk_height_ub_l/c / mblk_height_l/c);
|
||||
* (Should be divisible, but round up if not)
|
||||
*/
|
||||
num_mblks = ((mall_alloc_width_blk_aligned + mblk_width - 1) / mblk_width) *
|
||||
((mall_alloc_height_blk_aligned + mblk_height - 1) / mblk_height);
|
||||
|
||||
/*For DCC:
|
||||
* meta_num_mblk = CEILING(meta_pitch*full_vp_height*Bpe/256/mblk_bytes, 1)
|
||||
*/
|
||||
if (pipe->plane_state->dcc.enable)
|
||||
num_mblks += (pipe->plane_state->dcc.meta_pitch * pipe->plane_res.scl_data.viewport.height * bytes_per_pixel +
|
||||
(256 * DCN3_2_MALL_MBLK_SIZE_BYTES) - 1) / (256 * DCN3_2_MALL_MBLK_SIZE_BYTES);
|
||||
|
||||
bytes_in_mall = num_mblks * DCN3_2_MALL_MBLK_SIZE_BYTES;
|
||||
// cache lines used is total bytes / cache_line size. Add +2 for worst case alignment
|
||||
// (MALL is 64-byte aligned)
|
||||
cache_lines_per_plane = bytes_in_mall / dc->caps.cache_line_size + 2;
|
||||
|
||||
cache_lines_used += cache_lines_per_plane;
|
||||
if (context->bw_ctx.bw.dcn.mall_subvp_size_bytes > 0) {
|
||||
if (dc->debug.force_subvp_num_ways) {
|
||||
return dc->debug.force_subvp_num_ways;
|
||||
} else {
|
||||
return dcn32_helper_mall_bytes_to_ways(dc, context->bw_ctx.bw.dcn.mall_subvp_size_bytes);
|
||||
}
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
|
||||
total_cache_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size;
|
||||
lines_per_way = total_cache_lines / dc->caps.cache_num_ways;
|
||||
num_ways = cache_lines_used / lines_per_way;
|
||||
if (cache_lines_used % lines_per_way > 0)
|
||||
num_ways++;
|
||||
|
||||
if (subvp_in_use && dc->debug.force_subvp_num_ways > 0)
|
||||
num_ways = dc->debug.force_subvp_num_ways;
|
||||
|
||||
return num_ways;
|
||||
}
|
||||
|
||||
void dcn32_merge_pipes_for_subvp(struct dc *dc,
|
||||
|
@ -265,6 +240,14 @@ bool dcn32_is_center_timing(struct pipe_ctx *pipe)
|
|||
is_center_timing = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (pipe->plane_state) {
|
||||
if (pipe->stream->timing.v_addressable != pipe->plane_state->dst_rect.height &&
|
||||
pipe->stream->timing.v_addressable != pipe->plane_state->src_rect.height) {
|
||||
is_center_timing = true;
|
||||
}
|
||||
}
|
||||
|
||||
return is_center_timing;
|
||||
}
|
||||
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
#include "dcn321_dio_link_encoder.h"
|
||||
#include "dcn31/dcn31_dio_link_encoder.h"
|
||||
#include "stream_encoder.h"
|
||||
#include "i2caux_interface.h"
|
||||
#include "dc_bios_types.h"
|
||||
|
||||
#include "gpio_service_interface.h"
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue