Merge branch 'drm-next-5.1' of git://people.freedesktop.org/~agd5f/linux into drm-next
Updates for 5.1: - GDS fixes - Add AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES interface - GPUVM fixes - PCIE DPM switching fixes for vega20 - Vega10 uclk DPM regression fix - DC Freesync fixes - DC ABM fixes - Various DC cleanups Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190208210214.27666-1-alexander.deucher@amd.com
This commit is contained in:
commit
f4bc54b532
|
@ -214,6 +214,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
|
|||
case AMDGPU_CHUNK_ID_DEPENDENCIES:
|
||||
case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
|
||||
case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
|
||||
case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -1090,6 +1091,15 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
|
|||
|
||||
fence = amdgpu_ctx_get_fence(ctx, entity,
|
||||
deps[i].handle);
|
||||
|
||||
if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
|
||||
struct drm_sched_fence *s_fence = to_drm_sched_fence(fence);
|
||||
struct dma_fence *old = fence;
|
||||
|
||||
fence = dma_fence_get(&s_fence->scheduled);
|
||||
dma_fence_put(old);
|
||||
}
|
||||
|
||||
if (IS_ERR(fence)) {
|
||||
r = PTR_ERR(fence);
|
||||
amdgpu_ctx_put(ctx);
|
||||
|
@ -1177,7 +1187,8 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
|
|||
|
||||
chunk = &p->chunks[i];
|
||||
|
||||
if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES) {
|
||||
if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES ||
|
||||
chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
|
||||
r = amdgpu_cs_process_fence_dep(p, chunk);
|
||||
if (r)
|
||||
return r;
|
||||
|
|
|
@ -3618,6 +3618,38 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
|
|||
return r;
|
||||
}
|
||||
|
||||
static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,
|
||||
enum pci_bus_speed *speed,
|
||||
enum pcie_link_width *width)
|
||||
{
|
||||
struct pci_dev *pdev = adev->pdev;
|
||||
enum pci_bus_speed cur_speed;
|
||||
enum pcie_link_width cur_width;
|
||||
|
||||
*speed = PCI_SPEED_UNKNOWN;
|
||||
*width = PCIE_LNK_WIDTH_UNKNOWN;
|
||||
|
||||
while (pdev) {
|
||||
cur_speed = pcie_get_speed_cap(pdev);
|
||||
cur_width = pcie_get_width_cap(pdev);
|
||||
|
||||
if (cur_speed != PCI_SPEED_UNKNOWN) {
|
||||
if (*speed == PCI_SPEED_UNKNOWN)
|
||||
*speed = cur_speed;
|
||||
else if (cur_speed < *speed)
|
||||
*speed = cur_speed;
|
||||
}
|
||||
|
||||
if (cur_width != PCIE_LNK_WIDTH_UNKNOWN) {
|
||||
if (*width == PCIE_LNK_WIDTH_UNKNOWN)
|
||||
*width = cur_width;
|
||||
else if (cur_width < *width)
|
||||
*width = cur_width;
|
||||
}
|
||||
pdev = pci_upstream_bridge(pdev);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
|
||||
*
|
||||
|
@ -3630,8 +3662,8 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
|
|||
static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
|
||||
{
|
||||
struct pci_dev *pdev;
|
||||
enum pci_bus_speed speed_cap;
|
||||
enum pcie_link_width link_width;
|
||||
enum pci_bus_speed speed_cap, platform_speed_cap;
|
||||
enum pcie_link_width platform_link_width;
|
||||
|
||||
if (amdgpu_pcie_gen_cap)
|
||||
adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
|
||||
|
@ -3648,6 +3680,12 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
|
|||
return;
|
||||
}
|
||||
|
||||
if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
|
||||
return;
|
||||
|
||||
amdgpu_device_get_min_pci_speed_width(adev, &platform_speed_cap,
|
||||
&platform_link_width);
|
||||
|
||||
if (adev->pm.pcie_gen_mask == 0) {
|
||||
/* asic caps */
|
||||
pdev = adev->pdev;
|
||||
|
@ -3673,22 +3711,20 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
|
|||
adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
|
||||
}
|
||||
/* platform caps */
|
||||
pdev = adev->ddev->pdev->bus->self;
|
||||
speed_cap = pcie_get_speed_cap(pdev);
|
||||
if (speed_cap == PCI_SPEED_UNKNOWN) {
|
||||
if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
|
||||
adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
|
||||
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
|
||||
} else {
|
||||
if (speed_cap == PCIE_SPEED_16_0GT)
|
||||
if (platform_speed_cap == PCIE_SPEED_16_0GT)
|
||||
adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
|
||||
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
|
||||
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
|
||||
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
|
||||
else if (speed_cap == PCIE_SPEED_8_0GT)
|
||||
else if (platform_speed_cap == PCIE_SPEED_8_0GT)
|
||||
adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
|
||||
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
|
||||
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
|
||||
else if (speed_cap == PCIE_SPEED_5_0GT)
|
||||
else if (platform_speed_cap == PCIE_SPEED_5_0GT)
|
||||
adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
|
||||
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
|
||||
else
|
||||
|
@ -3697,12 +3733,10 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
if (adev->pm.pcie_mlw_mask == 0) {
|
||||
pdev = adev->ddev->pdev->bus->self;
|
||||
link_width = pcie_get_width_cap(pdev);
|
||||
if (link_width == PCIE_LNK_WIDTH_UNKNOWN) {
|
||||
if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
|
||||
adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
|
||||
} else {
|
||||
switch (link_width) {
|
||||
switch (platform_link_width) {
|
||||
case PCIE_LNK_X32:
|
||||
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
|
||||
|
|
|
@ -71,9 +71,11 @@
|
|||
* - 3.25.0 - Add support for sensor query info (stable pstate sclk/mclk).
|
||||
* - 3.26.0 - GFX9: Process AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE.
|
||||
* - 3.27.0 - Add new chunk to to AMDGPU_CS to enable BO_LIST creation.
|
||||
* - 3.28.0 - Add AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES
|
||||
* - 3.29.0 - Add AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID
|
||||
*/
|
||||
#define KMS_DRIVER_MAJOR 3
|
||||
#define KMS_DRIVER_MINOR 27
|
||||
#define KMS_DRIVER_MINOR 29
|
||||
#define KMS_DRIVER_PATCHLEVEL 0
|
||||
|
||||
int amdgpu_vram_limit = 0;
|
||||
|
|
|
@ -37,6 +37,8 @@ struct amdgpu_gds {
|
|||
struct amdgpu_gds_asic_info mem;
|
||||
struct amdgpu_gds_asic_info gws;
|
||||
struct amdgpu_gds_asic_info oa;
|
||||
uint32_t gds_compute_max_wave_id;
|
||||
|
||||
/* At present, GDS, GWS and OA resources for gfx (graphics)
|
||||
* is always pre-allocated and available for graphics operation.
|
||||
* Such resource is shared between all gfx clients.
|
||||
|
|
|
@ -54,10 +54,6 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
|
|||
|
||||
memset(&bp, 0, sizeof(bp));
|
||||
*obj = NULL;
|
||||
/* At least align on page size */
|
||||
if (alignment < PAGE_SIZE) {
|
||||
alignment = PAGE_SIZE;
|
||||
}
|
||||
|
||||
bp.size = size;
|
||||
bp.byte_align = alignment;
|
||||
|
@ -244,9 +240,6 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
|
|||
return -EINVAL;
|
||||
}
|
||||
flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
|
||||
/* GDS allocations must be DW aligned */
|
||||
if (args->in.domains & AMDGPU_GEM_DOMAIN_GDS)
|
||||
size = ALIGN(size, 4);
|
||||
}
|
||||
|
||||
if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
|
||||
|
|
|
@ -426,12 +426,20 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
|
|||
size_t acc_size;
|
||||
int r;
|
||||
|
||||
page_align = roundup(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
|
||||
if (bp->domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS |
|
||||
AMDGPU_GEM_DOMAIN_OA))
|
||||
/* Note that GDS/GWS/OA allocates 1 page per byte/resource. */
|
||||
if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
|
||||
/* GWS and OA don't need any alignment. */
|
||||
page_align = bp->byte_align;
|
||||
size <<= PAGE_SHIFT;
|
||||
else
|
||||
} else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
|
||||
/* Both size and alignment must be a multiple of 4. */
|
||||
page_align = ALIGN(bp->byte_align, 4);
|
||||
size = ALIGN(size, 4) << PAGE_SHIFT;
|
||||
} else {
|
||||
/* Memory should be aligned at least to a page size. */
|
||||
page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
|
||||
size = ALIGN(size, PAGE_SIZE);
|
||||
}
|
||||
|
||||
if (!amdgpu_bo_validate_size(adev, size, bp->domain))
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -1756,7 +1756,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
|||
}
|
||||
|
||||
r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
|
||||
4, AMDGPU_GEM_DOMAIN_GDS,
|
||||
&adev->gds.gds_gfx_bo, NULL, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -1769,7 +1769,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
|||
}
|
||||
|
||||
r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
|
||||
1, AMDGPU_GEM_DOMAIN_GWS,
|
||||
&adev->gds.gws_gfx_bo, NULL, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -1782,7 +1782,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
|||
}
|
||||
|
||||
r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
|
||||
1, AMDGPU_GEM_DOMAIN_OA,
|
||||
&adev->gds.oa_gfx_bo, NULL, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
|
|
|
@ -107,14 +107,6 @@ struct amdgpu_pte_update_params {
|
|||
* DMA addresses to use for mapping, used during VM update by CPU
|
||||
*/
|
||||
dma_addr_t *pages_addr;
|
||||
|
||||
/**
|
||||
* @kptr:
|
||||
*
|
||||
* Kernel pointer of PD/PT BO that needs to be updated,
|
||||
* used during VM update by CPU
|
||||
*/
|
||||
void *kptr;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -1789,13 +1781,20 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
|||
if (pages_addr)
|
||||
params.src = ~0;
|
||||
|
||||
/* Wait for PT BOs to be free. PTs share the same resv. object
|
||||
/* Wait for PT BOs to be idle. PTs share the same resv. object
|
||||
* as the root PD BO
|
||||
*/
|
||||
r = amdgpu_vm_wait_pd(adev, vm, owner);
|
||||
if (unlikely(r))
|
||||
return r;
|
||||
|
||||
/* Wait for any BO move to be completed */
|
||||
if (exclusive) {
|
||||
r = dma_fence_wait(exclusive, true);
|
||||
if (unlikely(r))
|
||||
return r;
|
||||
}
|
||||
|
||||
params.func = amdgpu_vm_cpu_set_ptes;
|
||||
params.pages_addr = pages_addr;
|
||||
return amdgpu_vm_update_ptes(¶ms, start, last + 1,
|
||||
|
@ -1809,13 +1808,12 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
|||
/*
|
||||
* reserve space for two commands every (1 << BLOCK_SIZE)
|
||||
* entries or 2k dwords (whatever is smaller)
|
||||
*
|
||||
* The second command is for the shadow pagetables.
|
||||
*/
|
||||
ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1);
|
||||
|
||||
/* The second command is for the shadow pagetables. */
|
||||
if (vm->root.base.bo->shadow)
|
||||
ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2;
|
||||
else
|
||||
ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1);
|
||||
ncmds *= 2;
|
||||
|
||||
/* padding, etc. */
|
||||
ndw = 64;
|
||||
|
@ -1834,10 +1832,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
|||
ndw += ncmds * 10;
|
||||
|
||||
/* extra commands for begin/end fragments */
|
||||
ncmds = 2 * adev->vm_manager.fragment_size;
|
||||
if (vm->root.base.bo->shadow)
|
||||
ndw += 2 * 10 * adev->vm_manager.fragment_size * 2;
|
||||
else
|
||||
ndw += 2 * 10 * adev->vm_manager.fragment_size;
|
||||
ncmds *= 2;
|
||||
|
||||
ndw += 10 * ncmds;
|
||||
|
||||
params.func = amdgpu_vm_do_set_ptes;
|
||||
}
|
||||
|
|
|
@ -125,7 +125,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
|
|||
if (!hive) {
|
||||
ret = -EINVAL;
|
||||
dev_err(adev->dev,
|
||||
"XGMI: node 0x%llx, can not matech hive 0x%llx in the hive list.\n",
|
||||
"XGMI: node 0x%llx, can not match hive 0x%llx in the hive list.\n",
|
||||
adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id);
|
||||
goto exit;
|
||||
}
|
||||
|
|
|
@ -2264,6 +2264,22 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
|
|||
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||
u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
|
||||
|
||||
/* Currently, there is a high possibility to get wave ID mismatch
|
||||
* between ME and GDS, leading to a hw deadlock, because ME generates
|
||||
* different wave IDs than the GDS expects. This situation happens
|
||||
* randomly when at least 5 compute pipes use GDS ordered append.
|
||||
* The wave IDs generated by ME are also wrong after suspend/resume.
|
||||
* Those are probably bugs somewhere else in the kernel driver.
|
||||
*
|
||||
* Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
|
||||
* GDS to 0 for this ring (me/pipe).
|
||||
*/
|
||||
if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID - PACKET3_SET_CONFIG_REG_START);
|
||||
amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
|
||||
}
|
||||
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
|
||||
amdgpu_ring_write(ring,
|
||||
#ifdef __BIG_ENDIAN
|
||||
|
@ -5000,7 +5016,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
|
|||
7 + /* gfx_v7_0_ring_emit_pipeline_sync */
|
||||
CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v7_0_ring_emit_vm_flush */
|
||||
7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
|
||||
.emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_compute */
|
||||
.emit_ib_size = 7, /* gfx_v7_0_ring_emit_ib_compute */
|
||||
.emit_ib = gfx_v7_0_ring_emit_ib_compute,
|
||||
.emit_fence = gfx_v7_0_ring_emit_fence_compute,
|
||||
.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
|
||||
|
@ -5057,6 +5073,7 @@ static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev)
|
|||
adev->gds.mem.total_size = RREG32(mmGDS_VMID0_SIZE);
|
||||
adev->gds.gws.total_size = 64;
|
||||
adev->gds.oa.total_size = 16;
|
||||
adev->gds.gds_compute_max_wave_id = RREG32(mmGDS_COMPUTE_MAX_WAVE_ID);
|
||||
|
||||
if (adev->gds.mem.total_size == 64 * 1024) {
|
||||
adev->gds.mem.gfx_partition_size = 4096;
|
||||
|
|
|
@ -6084,6 +6084,22 @@ static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
|
|||
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||
u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
|
||||
|
||||
/* Currently, there is a high possibility to get wave ID mismatch
|
||||
* between ME and GDS, leading to a hw deadlock, because ME generates
|
||||
* different wave IDs than the GDS expects. This situation happens
|
||||
* randomly when at least 5 compute pipes use GDS ordered append.
|
||||
* The wave IDs generated by ME are also wrong after suspend/resume.
|
||||
* Those are probably bugs somewhere else in the kernel driver.
|
||||
*
|
||||
* Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
|
||||
* GDS to 0 for this ring (me/pipe).
|
||||
*/
|
||||
if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID - PACKET3_SET_CONFIG_REG_START);
|
||||
amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
|
||||
}
|
||||
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
|
||||
amdgpu_ring_write(ring,
|
||||
#ifdef __BIG_ENDIAN
|
||||
|
@ -6890,7 +6906,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
|
|||
7 + /* gfx_v8_0_ring_emit_pipeline_sync */
|
||||
VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v8_0_ring_emit_vm_flush */
|
||||
7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
|
||||
.emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */
|
||||
.emit_ib_size = 7, /* gfx_v8_0_ring_emit_ib_compute */
|
||||
.emit_ib = gfx_v8_0_ring_emit_ib_compute,
|
||||
.emit_fence = gfx_v8_0_ring_emit_fence_compute,
|
||||
.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
|
||||
|
@ -6920,7 +6936,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
|
|||
7 + /* gfx_v8_0_ring_emit_pipeline_sync */
|
||||
17 + /* gfx_v8_0_ring_emit_vm_flush */
|
||||
7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_kiq x3 for user fence, vm fence */
|
||||
.emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */
|
||||
.emit_ib_size = 7, /* gfx_v8_0_ring_emit_ib_compute */
|
||||
.emit_fence = gfx_v8_0_ring_emit_fence_kiq,
|
||||
.test_ring = gfx_v8_0_ring_test_ring,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
|
@ -6996,6 +7012,7 @@ static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev)
|
|||
adev->gds.mem.total_size = RREG32(mmGDS_VMID0_SIZE);
|
||||
adev->gds.gws.total_size = 64;
|
||||
adev->gds.oa.total_size = 16;
|
||||
adev->gds.gds_compute_max_wave_id = RREG32(mmGDS_COMPUTE_MAX_WAVE_ID);
|
||||
|
||||
if (adev->gds.mem.total_size == 64 * 1024) {
|
||||
adev->gds.mem.gfx_partition_size = 4096;
|
||||
|
|
|
@ -4010,6 +4010,22 @@ static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
|
|||
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||
u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
|
||||
|
||||
/* Currently, there is a high possibility to get wave ID mismatch
|
||||
* between ME and GDS, leading to a hw deadlock, because ME generates
|
||||
* different wave IDs than the GDS expects. This situation happens
|
||||
* randomly when at least 5 compute pipes use GDS ordered append.
|
||||
* The wave IDs generated by ME are also wrong after suspend/resume.
|
||||
* Those are probably bugs somewhere else in the kernel driver.
|
||||
*
|
||||
* Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
|
||||
* GDS to 0 for this ring (me/pipe).
|
||||
*/
|
||||
if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID);
|
||||
amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
|
||||
}
|
||||
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
|
||||
BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
|
||||
amdgpu_ring_write(ring,
|
||||
|
@ -4729,7 +4745,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
|
|||
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
|
||||
2 + /* gfx_v9_0_ring_emit_vm_flush */
|
||||
8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
|
||||
.emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
|
||||
.emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
|
||||
.emit_ib = gfx_v9_0_ring_emit_ib_compute,
|
||||
.emit_fence = gfx_v9_0_ring_emit_fence,
|
||||
.emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
|
||||
|
@ -4764,7 +4780,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
|
|||
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
|
||||
2 + /* gfx_v9_0_ring_emit_vm_flush */
|
||||
8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
|
||||
.emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
|
||||
.emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
|
||||
.emit_fence = gfx_v9_0_ring_emit_fence_kiq,
|
||||
.test_ring = gfx_v9_0_ring_test_ring,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
|
@ -4846,6 +4862,26 @@ static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
|
|||
break;
|
||||
}
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_VEGA10:
|
||||
case CHIP_VEGA20:
|
||||
adev->gds.gds_compute_max_wave_id = 0x7ff;
|
||||
break;
|
||||
case CHIP_VEGA12:
|
||||
adev->gds.gds_compute_max_wave_id = 0x27f;
|
||||
break;
|
||||
case CHIP_RAVEN:
|
||||
if (adev->rev_id >= 0x8)
|
||||
adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */
|
||||
else
|
||||
adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */
|
||||
break;
|
||||
default:
|
||||
/* this really depends on the chip */
|
||||
adev->gds.gds_compute_max_wave_id = 0x7ff;
|
||||
break;
|
||||
}
|
||||
|
||||
adev->gds.gws.total_size = 64;
|
||||
adev->gds.oa.total_size = 16;
|
||||
|
||||
|
|
|
@ -2297,12 +2297,15 @@ static bool fill_plane_dcc_attributes(struct amdgpu_device *adev,
|
|||
uint64_t info)
|
||||
{
|
||||
struct dc *dc = adev->dm.dc;
|
||||
struct dc_dcc_surface_param input = {0};
|
||||
struct dc_surface_dcc_cap output = {0};
|
||||
struct dc_dcc_surface_param input;
|
||||
struct dc_surface_dcc_cap output;
|
||||
uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
|
||||
uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
|
||||
uint64_t dcc_address;
|
||||
|
||||
memset(&input, 0, sizeof(input));
|
||||
memset(&output, 0, sizeof(output));
|
||||
|
||||
if (!offset)
|
||||
return false;
|
||||
|
||||
|
@ -2956,11 +2959,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
|||
drm_connector = &aconnector->base;
|
||||
|
||||
if (!aconnector->dc_sink) {
|
||||
if (!aconnector->mst_port) {
|
||||
sink = create_fake_sink(aconnector);
|
||||
if (!sink)
|
||||
return stream;
|
||||
}
|
||||
sink = create_fake_sink(aconnector);
|
||||
if (!sink)
|
||||
return stream;
|
||||
} else {
|
||||
sink = aconnector->dc_sink;
|
||||
}
|
||||
|
@ -3027,9 +3028,6 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
|||
|
||||
update_stream_signal(stream, sink);
|
||||
|
||||
if (dm_state && dm_state->freesync_capable)
|
||||
stream->ignore_msa_timing_param = true;
|
||||
|
||||
finish:
|
||||
if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON)
|
||||
dc_sink_release(sink);
|
||||
|
@ -4610,12 +4608,6 @@ static void update_freesync_state_on_stream(
|
|||
new_crtc_state->base.crtc->base.id,
|
||||
(int)new_crtc_state->base.vrr_enabled,
|
||||
(int)vrr_params.state);
|
||||
|
||||
if (new_crtc_state->freesync_timing_changed)
|
||||
DRM_DEBUG_KMS("VRR timing update: crtc=%u min=%u max=%u\n",
|
||||
new_crtc_state->base.crtc->base.id,
|
||||
vrr_params.adjust.v_total_min,
|
||||
vrr_params.adjust.v_total_max);
|
||||
}
|
||||
|
||||
static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
|
@ -4639,7 +4631,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
|||
unsigned long flags;
|
||||
struct amdgpu_bo *abo;
|
||||
uint64_t tiling_flags, dcc_address;
|
||||
struct dc_stream_status *stream_status;
|
||||
uint32_t target, target_vblank;
|
||||
|
||||
struct {
|
||||
|
@ -4670,7 +4661,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
|||
struct drm_framebuffer *fb = new_plane_state->fb;
|
||||
struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
|
||||
bool pflip_needed;
|
||||
struct dc_plane_state *surface, *dc_plane;
|
||||
struct dc_plane_state *dc_plane;
|
||||
struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
|
||||
|
||||
if (plane->type == DRM_PLANE_TYPE_CURSOR) {
|
||||
|
@ -4733,39 +4724,22 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
|||
timestamp_ns = ktime_get_ns();
|
||||
flip->flip_addrs[flip_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
|
||||
flip->surface_updates[flip_count].flip_addr = &flip->flip_addrs[flip_count];
|
||||
flip->surface_updates[flip_count].surface = dc_plane;
|
||||
|
||||
stream_status = dc_stream_get_status(acrtc_state->stream);
|
||||
if (!stream_status) {
|
||||
DRM_ERROR("No stream status for CRTC: id=%d\n",
|
||||
acrtc_attach->crtc_id);
|
||||
continue;
|
||||
}
|
||||
|
||||
surface = stream_status->plane_states[0];
|
||||
flip->surface_updates[flip_count].surface = surface;
|
||||
if (!flip->surface_updates[flip_count].surface) {
|
||||
DRM_ERROR("No surface for CRTC: id=%d\n",
|
||||
acrtc_attach->crtc_id);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (acrtc_state->stream)
|
||||
if (plane == pcrtc->primary)
|
||||
update_freesync_state_on_stream(
|
||||
dm,
|
||||
acrtc_state,
|
||||
acrtc_state->stream,
|
||||
surface,
|
||||
dc_plane,
|
||||
flip->flip_addrs[flip_count].flip_timestamp_in_us);
|
||||
|
||||
/* Update surface timing information. */
|
||||
surface->time.time_elapsed_in_us[surface->time.index] =
|
||||
flip->flip_addrs[flip_count].flip_timestamp_in_us -
|
||||
surface->time.prev_update_time_in_us;
|
||||
surface->time.prev_update_time_in_us = flip->flip_addrs[flip_count].flip_timestamp_in_us;
|
||||
surface->time.index++;
|
||||
if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
|
||||
surface->time.index = 0;
|
||||
|
||||
DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
|
||||
__func__,
|
||||
flip->flip_addrs[flip_count].address.grph.addr.high_part,
|
||||
|
@ -4902,7 +4876,8 @@ cleanup:
|
|||
static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
|
||||
struct dc_stream_state *stream_state)
|
||||
{
|
||||
stream_state->mode_changed = crtc_state->mode_changed;
|
||||
stream_state->mode_changed =
|
||||
crtc_state->mode_changed || crtc_state->active_changed;
|
||||
}
|
||||
|
||||
static int amdgpu_dm_atomic_commit(struct drm_device *dev,
|
||||
|
@ -5094,10 +5069,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
|||
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
|
||||
struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
|
||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
|
||||
struct dc_surface_update dummy_updates[MAX_SURFACES] = { 0 };
|
||||
struct dc_stream_update stream_update = { 0 };
|
||||
struct dc_surface_update dummy_updates[MAX_SURFACES];
|
||||
struct dc_stream_update stream_update;
|
||||
struct dc_stream_status *status = NULL;
|
||||
|
||||
memset(&dummy_updates, 0, sizeof(dummy_updates));
|
||||
memset(&stream_update, 0, sizeof(stream_update));
|
||||
|
||||
if (acrtc) {
|
||||
new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
|
||||
old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
|
||||
|
@ -5174,9 +5152,11 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
|||
|
||||
manage_dm_interrupts(adev, acrtc, true);
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
/* The stream has changed so CRC capture needs to re-enabled. */
|
||||
if (dm_new_crtc_state->crc_enabled)
|
||||
amdgpu_dm_crtc_set_crc_source(crtc, "auto");
|
||||
#endif
|
||||
}
|
||||
|
||||
/* update planes when needed per crtc*/
|
||||
|
@ -5372,10 +5352,13 @@ static void get_freesync_config_for_crtc(
|
|||
struct mod_freesync_config config = {0};
|
||||
struct amdgpu_dm_connector *aconnector =
|
||||
to_amdgpu_dm_connector(new_con_state->base.connector);
|
||||
struct drm_display_mode *mode = &new_crtc_state->base.mode;
|
||||
|
||||
new_crtc_state->vrr_supported = new_con_state->freesync_capable;
|
||||
new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
|
||||
aconnector->min_vfreq <= drm_mode_vrefresh(mode);
|
||||
|
||||
if (new_con_state->freesync_capable) {
|
||||
if (new_crtc_state->vrr_supported) {
|
||||
new_crtc_state->stream->ignore_msa_timing_param = true;
|
||||
config.state = new_crtc_state->base.vrr_enabled ?
|
||||
VRR_STATE_ACTIVE_VARIABLE :
|
||||
VRR_STATE_INACTIVE;
|
||||
|
@ -5783,7 +5766,6 @@ dm_determine_update_type_for_commit(struct dc *dc,
|
|||
|
||||
struct dc_surface_update *updates;
|
||||
struct dc_plane_state *surface;
|
||||
struct dc_stream_update stream_update;
|
||||
enum surface_update_type update_type = UPDATE_TYPE_FAST;
|
||||
|
||||
updates = kcalloc(MAX_SURFACES, sizeof(*updates), GFP_KERNEL);
|
||||
|
@ -5797,79 +5779,85 @@ dm_determine_update_type_for_commit(struct dc *dc,
|
|||
}
|
||||
|
||||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
||||
struct dc_stream_update stream_update = { 0 };
|
||||
|
||||
new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
|
||||
old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
|
||||
num_plane = 0;
|
||||
|
||||
if (new_dm_crtc_state->stream) {
|
||||
|
||||
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
|
||||
new_plane_crtc = new_plane_state->crtc;
|
||||
old_plane_crtc = old_plane_state->crtc;
|
||||
new_dm_plane_state = to_dm_plane_state(new_plane_state);
|
||||
old_dm_plane_state = to_dm_plane_state(old_plane_state);
|
||||
|
||||
if (plane->type == DRM_PLANE_TYPE_CURSOR)
|
||||
continue;
|
||||
|
||||
if (!state->allow_modeset)
|
||||
continue;
|
||||
|
||||
if (crtc == new_plane_crtc) {
|
||||
updates[num_plane].surface = &surface[num_plane];
|
||||
|
||||
if (new_crtc_state->mode_changed) {
|
||||
updates[num_plane].surface->src_rect =
|
||||
new_dm_plane_state->dc_state->src_rect;
|
||||
updates[num_plane].surface->dst_rect =
|
||||
new_dm_plane_state->dc_state->dst_rect;
|
||||
updates[num_plane].surface->rotation =
|
||||
new_dm_plane_state->dc_state->rotation;
|
||||
updates[num_plane].surface->in_transfer_func =
|
||||
new_dm_plane_state->dc_state->in_transfer_func;
|
||||
stream_update.dst = new_dm_crtc_state->stream->dst;
|
||||
stream_update.src = new_dm_crtc_state->stream->src;
|
||||
}
|
||||
|
||||
if (new_crtc_state->color_mgmt_changed) {
|
||||
updates[num_plane].gamma =
|
||||
new_dm_plane_state->dc_state->gamma_correction;
|
||||
updates[num_plane].in_transfer_func =
|
||||
new_dm_plane_state->dc_state->in_transfer_func;
|
||||
stream_update.gamut_remap =
|
||||
&new_dm_crtc_state->stream->gamut_remap_matrix;
|
||||
stream_update.out_transfer_func =
|
||||
new_dm_crtc_state->stream->out_transfer_func;
|
||||
}
|
||||
|
||||
num_plane++;
|
||||
}
|
||||
if (!new_dm_crtc_state->stream) {
|
||||
if (!new_dm_crtc_state->stream && old_dm_crtc_state->stream) {
|
||||
update_type = UPDATE_TYPE_FULL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if (num_plane > 0) {
|
||||
ret = dm_atomic_get_state(state, &dm_state);
|
||||
if (ret)
|
||||
goto cleanup;
|
||||
continue;
|
||||
}
|
||||
|
||||
old_dm_state = dm_atomic_get_old_state(state);
|
||||
if (!old_dm_state) {
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
|
||||
new_plane_crtc = new_plane_state->crtc;
|
||||
old_plane_crtc = old_plane_state->crtc;
|
||||
new_dm_plane_state = to_dm_plane_state(new_plane_state);
|
||||
old_dm_plane_state = to_dm_plane_state(old_plane_state);
|
||||
|
||||
status = dc_stream_get_status_from_state(old_dm_state->context,
|
||||
new_dm_crtc_state->stream);
|
||||
if (plane->type == DRM_PLANE_TYPE_CURSOR)
|
||||
continue;
|
||||
|
||||
update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
|
||||
&stream_update, status);
|
||||
if (!state->allow_modeset)
|
||||
continue;
|
||||
|
||||
if (update_type > UPDATE_TYPE_MED) {
|
||||
update_type = UPDATE_TYPE_FULL;
|
||||
goto cleanup;
|
||||
}
|
||||
if (crtc != new_plane_crtc)
|
||||
continue;
|
||||
|
||||
updates[num_plane].surface = &surface[num_plane];
|
||||
|
||||
if (new_crtc_state->mode_changed) {
|
||||
updates[num_plane].surface->src_rect =
|
||||
new_dm_plane_state->dc_state->src_rect;
|
||||
updates[num_plane].surface->dst_rect =
|
||||
new_dm_plane_state->dc_state->dst_rect;
|
||||
updates[num_plane].surface->rotation =
|
||||
new_dm_plane_state->dc_state->rotation;
|
||||
updates[num_plane].surface->in_transfer_func =
|
||||
new_dm_plane_state->dc_state->in_transfer_func;
|
||||
stream_update.dst = new_dm_crtc_state->stream->dst;
|
||||
stream_update.src = new_dm_crtc_state->stream->src;
|
||||
}
|
||||
|
||||
} else if (!new_dm_crtc_state->stream && old_dm_crtc_state->stream) {
|
||||
if (new_crtc_state->color_mgmt_changed) {
|
||||
updates[num_plane].gamma =
|
||||
new_dm_plane_state->dc_state->gamma_correction;
|
||||
updates[num_plane].in_transfer_func =
|
||||
new_dm_plane_state->dc_state->in_transfer_func;
|
||||
stream_update.gamut_remap =
|
||||
&new_dm_crtc_state->stream->gamut_remap_matrix;
|
||||
stream_update.out_transfer_func =
|
||||
new_dm_crtc_state->stream->out_transfer_func;
|
||||
}
|
||||
|
||||
num_plane++;
|
||||
}
|
||||
|
||||
if (num_plane == 0)
|
||||
continue;
|
||||
|
||||
ret = dm_atomic_get_state(state, &dm_state);
|
||||
if (ret)
|
||||
goto cleanup;
|
||||
|
||||
old_dm_state = dm_atomic_get_old_state(state);
|
||||
if (!old_dm_state) {
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
status = dc_stream_get_status_from_state(old_dm_state->context,
|
||||
new_dm_crtc_state->stream);
|
||||
|
||||
update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
|
||||
&stream_update, status);
|
||||
|
||||
if (update_type > UPDATE_TYPE_MED) {
|
||||
update_type = UPDATE_TYPE_FULL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
|
|
@ -174,6 +174,11 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
|
|||
aconnector->edid = edid;
|
||||
}
|
||||
|
||||
if (aconnector->dc_sink && aconnector->dc_sink->sink_signal == SIGNAL_TYPE_VIRTUAL) {
|
||||
dc_sink_release(aconnector->dc_sink);
|
||||
aconnector->dc_sink = NULL;
|
||||
}
|
||||
|
||||
if (!aconnector->dc_sink) {
|
||||
struct dc_sink *dc_sink;
|
||||
struct dc_sink_init_data init_params = {
|
||||
|
|
|
@ -63,7 +63,7 @@ void scaler_settings_calculation(struct dcn_bw_internal_vars *v)
|
|||
if (v->interlace_output[k] == 1.0) {
|
||||
v->v_ratio[k] = 2.0 * v->v_ratio[k];
|
||||
}
|
||||
if ((v->underscan_output[k] == 1.0)) {
|
||||
if (v->underscan_output[k] == 1.0) {
|
||||
v->h_ratio[k] = v->h_ratio[k] * v->under_scan_factor;
|
||||
v->v_ratio[k] = v->v_ratio[k] * v->under_scan_factor;
|
||||
}
|
||||
|
|
|
@ -621,7 +621,6 @@ static bool construct(struct dc *dc,
|
|||
#endif
|
||||
|
||||
enum dce_version dc_version = DCE_VERSION_UNKNOWN;
|
||||
|
||||
dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
|
||||
if (!dc_dceip) {
|
||||
dm_error("%s: failed to create dceip\n", __func__);
|
||||
|
@ -869,8 +868,9 @@ static void program_timing_sync(
|
|||
struct dc *dc,
|
||||
struct dc_state *ctx)
|
||||
{
|
||||
int i, j;
|
||||
int i, j, k;
|
||||
int group_index = 0;
|
||||
int num_group = 0;
|
||||
int pipe_count = dc->res_pool->pipe_count;
|
||||
struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
|
||||
|
||||
|
@ -907,11 +907,11 @@ static void program_timing_sync(
|
|||
}
|
||||
}
|
||||
|
||||
/* set first unblanked pipe as master */
|
||||
/* set first pipe with plane as master */
|
||||
for (j = 0; j < group_size; j++) {
|
||||
struct pipe_ctx *temp;
|
||||
|
||||
if (pipe_set[j]->stream_res.tg->funcs->is_blanked && !pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
|
||||
if (pipe_set[j]->plane_state) {
|
||||
if (j == 0)
|
||||
break;
|
||||
|
||||
|
@ -922,9 +922,21 @@ static void program_timing_sync(
|
|||
}
|
||||
}
|
||||
|
||||
/* remove any other unblanked pipes as they have already been synced */
|
||||
|
||||
for (k = 0; k < group_size; k++) {
|
||||
struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
|
||||
|
||||
status->timing_sync_info.group_id = num_group;
|
||||
status->timing_sync_info.group_size = group_size;
|
||||
if (k == 0)
|
||||
status->timing_sync_info.master = true;
|
||||
else
|
||||
status->timing_sync_info.master = false;
|
||||
|
||||
}
|
||||
/* remove any other pipes with plane as they have already been synced */
|
||||
for (j = j + 1; j < group_size; j++) {
|
||||
if (pipe_set[j]->stream_res.tg->funcs->is_blanked && !pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
|
||||
if (pipe_set[j]->plane_state) {
|
||||
group_size--;
|
||||
pipe_set[j] = pipe_set[group_size];
|
||||
j--;
|
||||
|
@ -936,6 +948,7 @@ static void program_timing_sync(
|
|||
dc, group_index, group_size, pipe_set);
|
||||
group_index++;
|
||||
}
|
||||
num_group++;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -956,6 +969,52 @@ static bool context_changed(
|
|||
return false;
|
||||
}
|
||||
|
||||
bool dc_validate_seamless_boot_timing(struct dc *dc,
|
||||
const struct dc_sink *sink,
|
||||
struct dc_crtc_timing *crtc_timing)
|
||||
{
|
||||
struct timing_generator *tg;
|
||||
struct dc_link *link = sink->link;
|
||||
unsigned int inst;
|
||||
|
||||
/* Check for enabled DIG to identify enabled display */
|
||||
if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
|
||||
return false;
|
||||
|
||||
/* Check for which front end is used by this encoder.
|
||||
* Note the inst is 1 indexed, where 0 is undefined.
|
||||
* Note that DIG_FE can source from different OTG but our
|
||||
* current implementation always map 1-to-1, so this code makes
|
||||
* the same assumption and doesn't check OTG source.
|
||||
*/
|
||||
inst = link->link_enc->funcs->get_dig_frontend(link->link_enc) - 1;
|
||||
|
||||
/* Instance should be within the range of the pool */
|
||||
if (inst >= dc->res_pool->pipe_count)
|
||||
return false;
|
||||
|
||||
tg = dc->res_pool->timing_generators[inst];
|
||||
|
||||
if (!tg->funcs->is_matching_timing)
|
||||
return false;
|
||||
|
||||
if (!tg->funcs->is_matching_timing(tg, crtc_timing))
|
||||
return false;
|
||||
|
||||
if (dc_is_dp_signal(link->connector_signal)) {
|
||||
unsigned int pix_clk_100hz;
|
||||
|
||||
dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
|
||||
dc->res_pool->dp_clock_source,
|
||||
inst, &pix_clk_100hz);
|
||||
|
||||
if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool dc_enable_stereo(
|
||||
struct dc *dc,
|
||||
struct dc_state *context,
|
||||
|
@ -1037,6 +1096,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
|
|||
const struct dc_link *link = context->streams[i]->link;
|
||||
struct dc_stream_status *status;
|
||||
|
||||
if (context->streams[i]->apply_seamless_boot_optimization)
|
||||
context->streams[i]->apply_seamless_boot_optimization = false;
|
||||
|
||||
if (!context->streams[i]->mode_changed)
|
||||
continue;
|
||||
|
||||
|
@ -1112,6 +1174,9 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
|
|||
int i;
|
||||
struct dc_state *context = dc->current_state;
|
||||
|
||||
if (dc->optimized_required == false)
|
||||
return true;
|
||||
|
||||
post_surface_trace(dc);
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++)
|
||||
|
@ -1440,6 +1505,101 @@ static struct dc_stream_status *stream_get_status(
|
|||
|
||||
static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
|
||||
|
||||
static void copy_surface_update_to_plane(
|
||||
struct dc_plane_state *surface,
|
||||
struct dc_surface_update *srf_update)
|
||||
{
|
||||
if (srf_update->flip_addr) {
|
||||
surface->address = srf_update->flip_addr->address;
|
||||
surface->flip_immediate =
|
||||
srf_update->flip_addr->flip_immediate;
|
||||
surface->time.time_elapsed_in_us[surface->time.index] =
|
||||
srf_update->flip_addr->flip_timestamp_in_us -
|
||||
surface->time.prev_update_time_in_us;
|
||||
surface->time.prev_update_time_in_us =
|
||||
srf_update->flip_addr->flip_timestamp_in_us;
|
||||
surface->time.index++;
|
||||
if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
|
||||
surface->time.index = 0;
|
||||
}
|
||||
|
||||
if (srf_update->scaling_info) {
|
||||
surface->scaling_quality =
|
||||
srf_update->scaling_info->scaling_quality;
|
||||
surface->dst_rect =
|
||||
srf_update->scaling_info->dst_rect;
|
||||
surface->src_rect =
|
||||
srf_update->scaling_info->src_rect;
|
||||
surface->clip_rect =
|
||||
srf_update->scaling_info->clip_rect;
|
||||
}
|
||||
|
||||
if (srf_update->plane_info) {
|
||||
surface->color_space =
|
||||
srf_update->plane_info->color_space;
|
||||
surface->format =
|
||||
srf_update->plane_info->format;
|
||||
surface->plane_size =
|
||||
srf_update->plane_info->plane_size;
|
||||
surface->rotation =
|
||||
srf_update->plane_info->rotation;
|
||||
surface->horizontal_mirror =
|
||||
srf_update->plane_info->horizontal_mirror;
|
||||
surface->stereo_format =
|
||||
srf_update->plane_info->stereo_format;
|
||||
surface->tiling_info =
|
||||
srf_update->plane_info->tiling_info;
|
||||
surface->visible =
|
||||
srf_update->plane_info->visible;
|
||||
surface->per_pixel_alpha =
|
||||
srf_update->plane_info->per_pixel_alpha;
|
||||
surface->global_alpha =
|
||||
srf_update->plane_info->global_alpha;
|
||||
surface->global_alpha_value =
|
||||
srf_update->plane_info->global_alpha_value;
|
||||
surface->dcc =
|
||||
srf_update->plane_info->dcc;
|
||||
surface->sdr_white_level =
|
||||
srf_update->plane_info->sdr_white_level;
|
||||
}
|
||||
|
||||
if (srf_update->gamma &&
|
||||
(surface->gamma_correction !=
|
||||
srf_update->gamma)) {
|
||||
memcpy(&surface->gamma_correction->entries,
|
||||
&srf_update->gamma->entries,
|
||||
sizeof(struct dc_gamma_entries));
|
||||
surface->gamma_correction->is_identity =
|
||||
srf_update->gamma->is_identity;
|
||||
surface->gamma_correction->num_entries =
|
||||
srf_update->gamma->num_entries;
|
||||
surface->gamma_correction->type =
|
||||
srf_update->gamma->type;
|
||||
}
|
||||
|
||||
if (srf_update->in_transfer_func &&
|
||||
(surface->in_transfer_func !=
|
||||
srf_update->in_transfer_func)) {
|
||||
surface->in_transfer_func->sdr_ref_white_level =
|
||||
srf_update->in_transfer_func->sdr_ref_white_level;
|
||||
surface->in_transfer_func->tf =
|
||||
srf_update->in_transfer_func->tf;
|
||||
surface->in_transfer_func->type =
|
||||
srf_update->in_transfer_func->type;
|
||||
memcpy(&surface->in_transfer_func->tf_pts,
|
||||
&srf_update->in_transfer_func->tf_pts,
|
||||
sizeof(struct dc_transfer_func_distributed_points));
|
||||
}
|
||||
|
||||
if (srf_update->input_csc_color_matrix)
|
||||
surface->input_csc_color_matrix =
|
||||
*srf_update->input_csc_color_matrix;
|
||||
|
||||
if (srf_update->coeff_reduction_factor)
|
||||
surface->coeff_reduction_factor =
|
||||
*srf_update->coeff_reduction_factor;
|
||||
}
|
||||
|
||||
static void commit_planes_do_stream_update(struct dc *dc,
|
||||
struct dc_stream_state *stream,
|
||||
struct dc_stream_update *stream_update,
|
||||
|
@ -1463,13 +1623,13 @@ static void commit_planes_do_stream_update(struct dc *dc,
|
|||
stream_update->adjust->v_total_min,
|
||||
stream_update->adjust->v_total_max);
|
||||
|
||||
if (stream_update->vline0_config && pipe_ctx->stream_res.tg->funcs->program_vline_interrupt)
|
||||
if (stream_update->periodic_vsync_config && pipe_ctx->stream_res.tg->funcs->program_vline_interrupt)
|
||||
pipe_ctx->stream_res.tg->funcs->program_vline_interrupt(
|
||||
pipe_ctx->stream_res.tg, VLINE0, stream->vline0_config);
|
||||
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, VLINE0, &stream->periodic_vsync_config);
|
||||
|
||||
if (stream_update->vline1_config && pipe_ctx->stream_res.tg->funcs->program_vline_interrupt)
|
||||
if (stream_update->enhanced_sync_config && pipe_ctx->stream_res.tg->funcs->program_vline_interrupt)
|
||||
pipe_ctx->stream_res.tg->funcs->program_vline_interrupt(
|
||||
pipe_ctx->stream_res.tg, VLINE1, stream->vline1_config);
|
||||
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, VLINE1, &stream->enhanced_sync_config);
|
||||
|
||||
if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
|
||||
stream_update->vrr_infopacket ||
|
||||
|
@ -1645,14 +1805,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
|
|||
for (i = 0; i < surface_count; i++) {
|
||||
struct dc_plane_state *surface = srf_updates[i].surface;
|
||||
|
||||
/* TODO: On flip we don't build the state, so it still has the
|
||||
* old address. Which is why we are updating the address here
|
||||
*/
|
||||
if (srf_updates[i].flip_addr) {
|
||||
surface->address = srf_updates[i].flip_addr->address;
|
||||
surface->flip_immediate = srf_updates[i].flip_addr->flip_immediate;
|
||||
|
||||
}
|
||||
copy_surface_update_to_plane(surface, &srf_updates[i]);
|
||||
|
||||
if (update_type >= UPDATE_TYPE_MED) {
|
||||
for (j = 0; j < dc->res_pool->pipe_count; j++) {
|
||||
|
|
|
@ -76,6 +76,12 @@ static void destruct(struct dc_link *link)
|
|||
{
|
||||
int i;
|
||||
|
||||
if (link->hpd_gpio != NULL) {
|
||||
dal_gpio_close(link->hpd_gpio);
|
||||
dal_gpio_destroy_irq(&link->hpd_gpio);
|
||||
link->hpd_gpio = NULL;
|
||||
}
|
||||
|
||||
if (link->ddc)
|
||||
dal_ddc_service_destroy(&link->ddc);
|
||||
|
||||
|
@ -931,18 +937,11 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
|
|||
|
||||
bool dc_link_get_hpd_state(struct dc_link *dc_link)
|
||||
{
|
||||
struct gpio *hpd_pin;
|
||||
uint32_t state;
|
||||
|
||||
hpd_pin = get_hpd_gpio(dc_link->ctx->dc_bios,
|
||||
dc_link->link_id, dc_link->ctx->gpio_service);
|
||||
if (hpd_pin == NULL)
|
||||
ASSERT(false);
|
||||
|
||||
dal_gpio_open(hpd_pin, GPIO_MODE_INTERRUPT);
|
||||
dal_gpio_get_value(hpd_pin, &state);
|
||||
dal_gpio_close(hpd_pin);
|
||||
dal_gpio_destroy_irq(&hpd_pin);
|
||||
dal_gpio_lock_pin(dc_link->hpd_gpio);
|
||||
dal_gpio_get_value(dc_link->hpd_gpio, &state);
|
||||
dal_gpio_unlock_pin(dc_link->hpd_gpio);
|
||||
|
||||
return state;
|
||||
}
|
||||
|
@ -1098,7 +1097,6 @@ static bool construct(
|
|||
const struct link_init_data *init_params)
|
||||
{
|
||||
uint8_t i;
|
||||
struct gpio *hpd_gpio = NULL;
|
||||
struct ddc_service_init_data ddc_service_init_data = { { 0 } };
|
||||
struct dc_context *dc_ctx = init_params->ctx;
|
||||
struct encoder_init_data enc_init_data = { 0 };
|
||||
|
@ -1128,10 +1126,12 @@ static bool construct(
|
|||
if (link->dc->res_pool->funcs->link_init)
|
||||
link->dc->res_pool->funcs->link_init(link);
|
||||
|
||||
hpd_gpio = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
|
||||
|
||||
if (hpd_gpio != NULL)
|
||||
link->irq_source_hpd = dal_irq_get_source(hpd_gpio);
|
||||
link->hpd_gpio = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
|
||||
if (link->hpd_gpio != NULL) {
|
||||
dal_gpio_open(link->hpd_gpio, GPIO_MODE_INTERRUPT);
|
||||
dal_gpio_unlock_pin(link->hpd_gpio);
|
||||
link->irq_source_hpd = dal_irq_get_source(link->hpd_gpio);
|
||||
}
|
||||
|
||||
switch (link->link_id.id) {
|
||||
case CONNECTOR_ID_HDMI_TYPE_A:
|
||||
|
@ -1149,18 +1149,18 @@ static bool construct(
|
|||
case CONNECTOR_ID_DISPLAY_PORT:
|
||||
link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT;
|
||||
|
||||
if (hpd_gpio != NULL)
|
||||
if (link->hpd_gpio != NULL)
|
||||
link->irq_source_hpd_rx =
|
||||
dal_irq_get_rx_source(hpd_gpio);
|
||||
dal_irq_get_rx_source(link->hpd_gpio);
|
||||
|
||||
break;
|
||||
case CONNECTOR_ID_EDP:
|
||||
link->connector_signal = SIGNAL_TYPE_EDP;
|
||||
|
||||
if (hpd_gpio != NULL) {
|
||||
if (link->hpd_gpio != NULL) {
|
||||
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
|
||||
link->irq_source_hpd_rx =
|
||||
dal_irq_get_rx_source(hpd_gpio);
|
||||
dal_irq_get_rx_source(link->hpd_gpio);
|
||||
}
|
||||
break;
|
||||
case CONNECTOR_ID_LVDS:
|
||||
|
@ -1171,10 +1171,7 @@ static bool construct(
|
|||
goto create_fail;
|
||||
}
|
||||
|
||||
if (hpd_gpio != NULL) {
|
||||
dal_gpio_destroy_irq(&hpd_gpio);
|
||||
hpd_gpio = NULL;
|
||||
}
|
||||
|
||||
|
||||
/* TODO: #DAL3 Implement id to str function.*/
|
||||
LINK_INFO("Connector[%d] description:"
|
||||
|
@ -1277,8 +1274,9 @@ link_enc_create_fail:
|
|||
ddc_create_fail:
|
||||
create_fail:
|
||||
|
||||
if (hpd_gpio != NULL) {
|
||||
dal_gpio_destroy_irq(&hpd_gpio);
|
||||
if (link->hpd_gpio != NULL) {
|
||||
dal_gpio_destroy_irq(&link->hpd_gpio);
|
||||
link->hpd_gpio = NULL;
|
||||
}
|
||||
|
||||
return false;
|
||||
|
@ -2582,13 +2580,23 @@ void core_link_enable_stream(
|
|||
&stream->timing);
|
||||
|
||||
if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
|
||||
bool apply_edp_fast_boot_optimization =
|
||||
pipe_ctx->stream->apply_edp_fast_boot_optimization;
|
||||
|
||||
pipe_ctx->stream->apply_edp_fast_boot_optimization = false;
|
||||
|
||||
resource_build_info_frame(pipe_ctx);
|
||||
core_dc->hwss.update_info_frame(pipe_ctx);
|
||||
|
||||
/* Do not touch link on seamless boot optimization. */
|
||||
if (pipe_ctx->stream->apply_seamless_boot_optimization) {
|
||||
pipe_ctx->stream->dpms_off = false;
|
||||
return;
|
||||
}
|
||||
|
||||
/* eDP lit up by bios already, no need to enable again. */
|
||||
if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
|
||||
pipe_ctx->stream->apply_edp_fast_boot_optimization) {
|
||||
pipe_ctx->stream->apply_edp_fast_boot_optimization = false;
|
||||
apply_edp_fast_boot_optimization) {
|
||||
pipe_ctx->stream->dpms_off = false;
|
||||
return;
|
||||
}
|
||||
|
@ -2615,6 +2623,8 @@ void core_link_enable_stream(
|
|||
}
|
||||
}
|
||||
|
||||
stream->link->link_status.link_active = true;
|
||||
|
||||
core_dc->hwss.enable_audio_stream(pipe_ctx);
|
||||
|
||||
/* turn off otg test pattern if enable */
|
||||
|
@ -2649,6 +2659,8 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
|
|||
core_dc->hwss.disable_stream(pipe_ctx, option);
|
||||
|
||||
disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
|
||||
|
||||
pipe_ctx->stream->link->link_status.link_active = false;
|
||||
}
|
||||
|
||||
void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
|
||||
|
|
|
@ -47,7 +47,9 @@ static void wait_for_training_aux_rd_interval(
|
|||
struct dc_link *link,
|
||||
uint32_t default_wait_in_micro_secs)
|
||||
{
|
||||
union training_aux_rd_interval training_rd_interval = {0};
|
||||
union training_aux_rd_interval training_rd_interval;
|
||||
|
||||
memset(&training_rd_interval, 0, sizeof(training_rd_interval));
|
||||
|
||||
/* overwrite the delay if rev > 1.1*/
|
||||
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
|
||||
|
@ -2538,7 +2540,6 @@ void detect_edp_sink_caps(struct dc_link *link)
|
|||
uint32_t entry;
|
||||
uint32_t link_rate_in_khz;
|
||||
enum dc_link_rate link_rate = LINK_RATE_UNKNOWN;
|
||||
uint8_t link_rate_set = 0;
|
||||
|
||||
retrieve_link_cap(link);
|
||||
|
||||
|
@ -2558,39 +2559,7 @@ void detect_edp_sink_caps(struct dc_link *link)
|
|||
link_rate = linkRateInKHzToLinkRateMultiplier(link_rate_in_khz);
|
||||
if (link->reported_link_cap.link_rate < link_rate) {
|
||||
link->reported_link_cap.link_rate = link_rate;
|
||||
|
||||
switch (link_rate) {
|
||||
case LINK_RATE_LOW:
|
||||
link_rate_set = 1;
|
||||
break;
|
||||
case LINK_RATE_RATE_2:
|
||||
link_rate_set = 2;
|
||||
break;
|
||||
case LINK_RATE_RATE_3:
|
||||
link_rate_set = 3;
|
||||
break;
|
||||
case LINK_RATE_HIGH:
|
||||
link_rate_set = 4;
|
||||
break;
|
||||
case LINK_RATE_RBR2:
|
||||
link_rate_set = 5;
|
||||
break;
|
||||
case LINK_RATE_RATE_6:
|
||||
link_rate_set = 6;
|
||||
break;
|
||||
case LINK_RATE_HIGH2:
|
||||
link_rate_set = 7;
|
||||
break;
|
||||
case LINK_RATE_HIGH3:
|
||||
link_rate_set = 8;
|
||||
break;
|
||||
default:
|
||||
link_rate_set = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
if (link->dpcd_caps.link_rate_set < link_rate_set)
|
||||
link->dpcd_caps.link_rate_set = link_rate_set;
|
||||
link->dpcd_caps.link_rate_set = entry;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -119,6 +119,10 @@ bool edp_receiver_ready_T9(struct dc_link *link)
|
|||
break;
|
||||
udelay(100); //MAx T9
|
||||
} while (++tries < 50);
|
||||
|
||||
if (link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off > 0)
|
||||
udelay(link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off * 1000);
|
||||
|
||||
return result;
|
||||
}
|
||||
bool edp_receiver_ready_T7(struct dc_link *link)
|
||||
|
|
|
@ -1800,6 +1800,51 @@ static void calculate_phy_pix_clks(struct dc_stream_state *stream)
|
|||
stream->phy_pix_clk *= 2;
|
||||
}
|
||||
|
||||
static int acquire_resource_from_hw_enabled_state(
|
||||
struct resource_context *res_ctx,
|
||||
const struct resource_pool *pool,
|
||||
struct dc_stream_state *stream)
|
||||
{
|
||||
struct dc_link *link = stream->link;
|
||||
unsigned int inst;
|
||||
|
||||
/* Check for enabled DIG to identify enabled display */
|
||||
if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
|
||||
return -1;
|
||||
|
||||
/* Check for which front end is used by this encoder.
|
||||
* Note the inst is 1 indexed, where 0 is undefined.
|
||||
* Note that DIG_FE can source from different OTG but our
|
||||
* current implementation always map 1-to-1, so this code makes
|
||||
* the same assumption and doesn't check OTG source.
|
||||
*/
|
||||
inst = link->link_enc->funcs->get_dig_frontend(link->link_enc) - 1;
|
||||
|
||||
/* Instance should be within the range of the pool */
|
||||
if (inst >= pool->pipe_count)
|
||||
return -1;
|
||||
|
||||
if (!res_ctx->pipe_ctx[inst].stream) {
|
||||
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[inst];
|
||||
|
||||
pipe_ctx->stream_res.tg = pool->timing_generators[inst];
|
||||
pipe_ctx->plane_res.mi = pool->mis[inst];
|
||||
pipe_ctx->plane_res.hubp = pool->hubps[inst];
|
||||
pipe_ctx->plane_res.ipp = pool->ipps[inst];
|
||||
pipe_ctx->plane_res.xfm = pool->transforms[inst];
|
||||
pipe_ctx->plane_res.dpp = pool->dpps[inst];
|
||||
pipe_ctx->stream_res.opp = pool->opps[inst];
|
||||
if (pool->dpps[inst])
|
||||
pipe_ctx->plane_res.mpcc_inst = pool->dpps[inst]->inst;
|
||||
pipe_ctx->pipe_idx = inst;
|
||||
|
||||
pipe_ctx->stream = stream;
|
||||
return inst;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
enum dc_status resource_map_pool_resources(
|
||||
const struct dc *dc,
|
||||
struct dc_state *context,
|
||||
|
@ -1824,8 +1869,15 @@ enum dc_status resource_map_pool_resources(
|
|||
|
||||
calculate_phy_pix_clks(stream);
|
||||
|
||||
/* acquire new resources */
|
||||
pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream);
|
||||
if (stream->apply_seamless_boot_optimization)
|
||||
pipe_idx = acquire_resource_from_hw_enabled_state(
|
||||
&context->res_ctx,
|
||||
pool,
|
||||
stream);
|
||||
|
||||
if (pipe_idx < 0)
|
||||
/* acquire new resources */
|
||||
pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream);
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_DCN1_0
|
||||
if (pipe_idx < 0)
|
||||
|
|
|
@ -112,16 +112,12 @@ uint8_t get_vmid_for_ptb(struct vm_helper *vm_helper, int64_t ptb, uint8_t hubp_
|
|||
return vmid;
|
||||
}
|
||||
|
||||
struct vm_helper init_vm_helper(unsigned int num_vmid, unsigned int num_hubp)
|
||||
void init_vm_helper(struct vm_helper *vm_helper, unsigned int num_vmid, unsigned int num_hubp)
|
||||
{
|
||||
static uint64_t ptb_assigned_to_vmid[MAX_VMID];
|
||||
static struct vmid_usage hubp_vmid_usage[MAX_HUBP];
|
||||
vm_helper->num_vmid = num_vmid;
|
||||
vm_helper->num_hubp = num_hubp;
|
||||
vm_helper->num_vmids_available = num_vmid - 1;
|
||||
|
||||
return (struct vm_helper){
|
||||
.num_vmid = num_vmid,
|
||||
.num_hubp = num_hubp,
|
||||
.num_vmids_available = num_vmid - 1,
|
||||
.ptb_assigned_to_vmid = ptb_assigned_to_vmid,
|
||||
.hubp_vmid_usage = hubp_vmid_usage
|
||||
};
|
||||
memset(vm_helper->hubp_vmid_usage, 0, sizeof(vm_helper->hubp_vmid_usage[0]) * MAX_HUBP);
|
||||
memset(vm_helper->ptb_assigned_to_vmid, 0, sizeof(vm_helper->ptb_assigned_to_vmid[0]) * MAX_VMID);
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@
|
|||
#include "inc/hw/dmcu.h"
|
||||
#include "dml/display_mode_lib.h"
|
||||
|
||||
#define DC_VER "3.2.15"
|
||||
#define DC_VER "3.2.17"
|
||||
|
||||
#define MAX_SURFACES 3
|
||||
#define MAX_STREAMS 6
|
||||
|
@ -255,6 +255,7 @@ struct dc_debug_options {
|
|||
bool scl_reset_length10;
|
||||
bool hdmi20_disable;
|
||||
bool skip_detection_link_training;
|
||||
unsigned int force_odm_combine; //bit vector based on otg inst
|
||||
unsigned int force_fclk_khz;
|
||||
};
|
||||
|
||||
|
@ -264,7 +265,6 @@ struct dc_debug_data {
|
|||
uint32_t auxErrorCount;
|
||||
};
|
||||
|
||||
|
||||
struct dc_state;
|
||||
struct resource_pool;
|
||||
struct dce_hwseq;
|
||||
|
@ -594,6 +594,10 @@ struct dc_validation_set {
|
|||
uint8_t plane_count;
|
||||
};
|
||||
|
||||
bool dc_validate_seamless_boot_timing(struct dc *dc,
|
||||
const struct dc_sink *sink,
|
||||
struct dc_crtc_timing *crtc_timing);
|
||||
|
||||
enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *plane_state);
|
||||
|
||||
void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info);
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include "grph_object_defs.h"
|
||||
|
||||
struct dc_link_status {
|
||||
bool link_active;
|
||||
struct dpcd_caps *dpcd_caps;
|
||||
};
|
||||
|
||||
|
@ -125,6 +126,7 @@ struct dc_link {
|
|||
struct dc_link_status link_status;
|
||||
|
||||
struct link_trace link_trace;
|
||||
struct gpio *hpd_gpio;
|
||||
};
|
||||
|
||||
const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link);
|
||||
|
|
|
@ -32,11 +32,17 @@
|
|||
/*******************************************************************************
|
||||
* Stream Interfaces
|
||||
******************************************************************************/
|
||||
struct timing_sync_info {
|
||||
int group_id;
|
||||
int group_size;
|
||||
bool master;
|
||||
};
|
||||
|
||||
struct dc_stream_status {
|
||||
int primary_otg_inst;
|
||||
int stream_enc_inst;
|
||||
int plane_count;
|
||||
struct timing_sync_info timing_sync_info;
|
||||
struct dc_plane_state *plane_states[MAX_SURFACE_NUM];
|
||||
};
|
||||
|
||||
|
@ -45,11 +51,12 @@ struct freesync_context {
|
|||
bool dummy;
|
||||
};
|
||||
|
||||
struct vline_config {
|
||||
unsigned int start_line;
|
||||
unsigned int end_line;
|
||||
union vline_config {
|
||||
unsigned int line_number;
|
||||
unsigned long long delta_in_ns;
|
||||
};
|
||||
|
||||
|
||||
struct dc_stream_state {
|
||||
// sink is deprecated, new code should not reference
|
||||
// this pointer
|
||||
|
@ -99,8 +106,8 @@ struct dc_stream_state {
|
|||
/* DMCU info */
|
||||
unsigned int abm_level;
|
||||
|
||||
struct vline_config vline0_config;
|
||||
struct vline_config vline1_config;
|
||||
union vline_config periodic_vsync_config;
|
||||
union vline_config enhanced_sync_config;
|
||||
|
||||
/* from core_stream struct */
|
||||
struct dc_context *ctx;
|
||||
|
@ -112,7 +119,6 @@ struct dc_stream_state {
|
|||
int phy_pix_clk;
|
||||
enum signal_type signal;
|
||||
bool dpms_off;
|
||||
bool apply_edp_fast_boot_optimization;
|
||||
|
||||
void *dm_stream_context;
|
||||
|
||||
|
@ -139,6 +145,9 @@ struct dc_stream_state {
|
|||
uint8_t otg_offset;
|
||||
} out;
|
||||
|
||||
bool apply_edp_fast_boot_optimization;
|
||||
bool apply_seamless_boot_optimization;
|
||||
|
||||
uint32_t stream_id;
|
||||
};
|
||||
|
||||
|
@ -149,8 +158,8 @@ struct dc_stream_update {
|
|||
struct dc_info_packet *hdr_static_metadata;
|
||||
unsigned int *abm_level;
|
||||
|
||||
struct vline_config *vline0_config;
|
||||
struct vline_config *vline1_config;
|
||||
union vline_config *periodic_vsync_config;
|
||||
union vline_config *enhanced_sync_config;
|
||||
|
||||
struct dc_crtc_timing_adjust *adjust;
|
||||
struct dc_info_packet *vrr_infopacket;
|
||||
|
|
|
@ -201,6 +201,7 @@ union display_content_support {
|
|||
struct dc_panel_patch {
|
||||
unsigned int dppowerup_delay;
|
||||
unsigned int extra_t12_ms;
|
||||
unsigned int extra_delay_backlight_off;
|
||||
};
|
||||
|
||||
struct dc_edid_caps {
|
||||
|
|
|
@ -314,8 +314,8 @@ static bool dce_abm_immediate_disable(struct abm *abm)
|
|||
|
||||
/* setDMCUParam_ABMLevel */
|
||||
REG_UPDATE_2(MASTER_COMM_CMD_REG,
|
||||
MASTER_COMM_CMD_REG_BYTE0, MCP_ABM_LEVEL_SET,
|
||||
MASTER_COMM_CMD_REG_BYTE2, MCP_DISABLE_ABM_IMMEDIATELY);
|
||||
MASTER_COMM_CMD_REG_BYTE0, MCP_ABM_PIPE_SET,
|
||||
MASTER_COMM_CMD_REG_BYTE1, MCP_DISABLE_ABM_IMMEDIATELY);
|
||||
|
||||
/* notifyDMCUMsg */
|
||||
REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
|
||||
|
|
|
@ -516,7 +516,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
|
|||
}
|
||||
}
|
||||
|
||||
msleep(1);
|
||||
udelay(1000);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -977,6 +977,28 @@ static bool dce110_clock_source_power_down(
|
|||
return bp_result == BP_RESULT_OK;
|
||||
}
|
||||
|
||||
static bool get_pixel_clk_frequency_100hz(
|
||||
struct clock_source *clock_source,
|
||||
unsigned int inst,
|
||||
unsigned int *pixel_clk_khz)
|
||||
{
|
||||
struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
|
||||
unsigned int clock_hz = 0;
|
||||
|
||||
if (clock_source->id == CLOCK_SOURCE_ID_DP_DTO) {
|
||||
clock_hz = REG_READ(PHASE[inst]);
|
||||
|
||||
/* NOTE: There is agreement with VBIOS here that MODULO is
|
||||
* programmed equal to DPREFCLK, in which case PHASE will be
|
||||
* equivalent to pixel clock.
|
||||
*/
|
||||
*pixel_clk_khz = clock_hz / 100;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*****************************************/
|
||||
/* Constructor */
|
||||
/*****************************************/
|
||||
|
@ -984,12 +1006,14 @@ static bool dce110_clock_source_power_down(
|
|||
static const struct clock_source_funcs dce112_clk_src_funcs = {
|
||||
.cs_power_down = dce110_clock_source_power_down,
|
||||
.program_pix_clk = dce112_program_pix_clk,
|
||||
.get_pix_clk_dividers = dce112_get_pix_clk_dividers
|
||||
.get_pix_clk_dividers = dce112_get_pix_clk_dividers,
|
||||
.get_pixel_clk_frequency_100hz = get_pixel_clk_frequency_100hz
|
||||
};
|
||||
static const struct clock_source_funcs dce110_clk_src_funcs = {
|
||||
.cs_power_down = dce110_clock_source_power_down,
|
||||
.program_pix_clk = dce110_program_pix_clk,
|
||||
.get_pix_clk_dividers = dce110_get_pix_clk_dividers
|
||||
.get_pix_clk_dividers = dce110_get_pix_clk_dividers,
|
||||
.get_pixel_clk_frequency_100hz = get_pixel_clk_frequency_100hz
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -1032,8 +1032,7 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
|
|||
struct dc_link *link = stream->link;
|
||||
|
||||
/* only 3 items below are used by unblank */
|
||||
params.pixel_clk_khz =
|
||||
pipe_ctx->stream->timing.pix_clk_100hz / 10;
|
||||
params.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
|
||||
params.link_settings.link_rate = link_settings->link_rate;
|
||||
|
||||
if (dc_is_dp_signal(pipe_ctx->stream->signal))
|
||||
|
@ -1043,6 +1042,7 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
|
|||
link->dc->hwss.edp_backlight_control(link, true);
|
||||
}
|
||||
}
|
||||
|
||||
void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct dc_stream_state *stream = pipe_ctx->stream;
|
||||
|
@ -1250,8 +1250,6 @@ static enum dc_status dce110_enable_stream_timing(
|
|||
struct pipe_ctx *pipe_ctx_old = &dc->current_state->res_ctx.
|
||||
pipe_ctx[pipe_ctx->pipe_idx];
|
||||
struct tg_color black_color = {0};
|
||||
struct drr_params params = {0};
|
||||
unsigned int event_triggers = 0;
|
||||
|
||||
if (!pipe_ctx_old->stream) {
|
||||
|
||||
|
@ -1280,20 +1278,6 @@ static enum dc_status dce110_enable_stream_timing(
|
|||
pipe_ctx->stream_res.tg,
|
||||
&stream->timing,
|
||||
true);
|
||||
|
||||
params.vertical_total_min = stream->adjust.v_total_min;
|
||||
params.vertical_total_max = stream->adjust.v_total_max;
|
||||
if (pipe_ctx->stream_res.tg->funcs->set_drr)
|
||||
pipe_ctx->stream_res.tg->funcs->set_drr(
|
||||
pipe_ctx->stream_res.tg, ¶ms);
|
||||
|
||||
// DRR should set trigger event to monitor surface update event
|
||||
if (stream->adjust.v_total_min != 0 &&
|
||||
stream->adjust.v_total_max != 0)
|
||||
event_triggers = 0x80;
|
||||
if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control)
|
||||
pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
|
||||
pipe_ctx->stream_res.tg, event_triggers);
|
||||
}
|
||||
|
||||
if (!pipe_ctx_old->stream) {
|
||||
|
@ -1313,6 +1297,8 @@ static enum dc_status apply_single_controller_ctx_to_hw(
|
|||
struct dc *dc)
|
||||
{
|
||||
struct dc_stream_state *stream = pipe_ctx->stream;
|
||||
struct drr_params params = {0};
|
||||
unsigned int event_triggers = 0;
|
||||
|
||||
if (pipe_ctx->stream_res.audio != NULL) {
|
||||
struct audio_output audio_output;
|
||||
|
@ -1339,7 +1325,27 @@ static enum dc_status apply_single_controller_ctx_to_hw(
|
|||
}
|
||||
|
||||
/* */
|
||||
dc->hwss.enable_stream_timing(pipe_ctx, context, dc);
|
||||
/* Do not touch stream timing on seamless boot optimization. */
|
||||
if (!pipe_ctx->stream->apply_seamless_boot_optimization)
|
||||
dc->hwss.enable_stream_timing(pipe_ctx, context, dc);
|
||||
|
||||
if (pipe_ctx->stream_res.tg->funcs->program_vupdate_interrupt)
|
||||
pipe_ctx->stream_res.tg->funcs->program_vupdate_interrupt(
|
||||
pipe_ctx->stream_res.tg,
|
||||
&stream->timing);
|
||||
|
||||
params.vertical_total_min = stream->adjust.v_total_min;
|
||||
params.vertical_total_max = stream->adjust.v_total_max;
|
||||
if (pipe_ctx->stream_res.tg->funcs->set_drr)
|
||||
pipe_ctx->stream_res.tg->funcs->set_drr(
|
||||
pipe_ctx->stream_res.tg, ¶ms);
|
||||
|
||||
// DRR should set trigger event to monitor surface update event
|
||||
if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)
|
||||
event_triggers = 0x80;
|
||||
if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control)
|
||||
pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
|
||||
pipe_ctx->stream_res.tg, event_triggers);
|
||||
|
||||
if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL)
|
||||
pipe_ctx->stream_res.stream_enc->funcs->dig_connect_to_otg(
|
||||
|
@ -2269,6 +2275,11 @@ static void dce110_enable_per_frame_crtc_position_reset(
|
|||
|
||||
}
|
||||
|
||||
static void init_pipes(struct dc *dc, struct dc_state *context)
|
||||
{
|
||||
// Do nothing
|
||||
}
|
||||
|
||||
static void init_hw(struct dc *dc)
|
||||
{
|
||||
int i;
|
||||
|
@ -2535,7 +2546,7 @@ static void dce110_apply_ctx_for_surface(
|
|||
}
|
||||
|
||||
if (dc->fbc_compressor)
|
||||
enable_fbc(dc, dc->current_state);
|
||||
enable_fbc(dc, context);
|
||||
}
|
||||
|
||||
static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
||||
|
@ -2636,6 +2647,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
|
|||
.program_gamut_remap = program_gamut_remap,
|
||||
.program_output_csc = program_output_csc,
|
||||
.init_hw = init_hw,
|
||||
.init_pipes = init_pipes,
|
||||
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
|
||||
.apply_ctx_for_surface = dce110_apply_ctx_for_surface,
|
||||
.update_plane_addr = update_plane_addr,
|
||||
|
|
|
@ -88,11 +88,18 @@ void hubbub1_wm_read_state(struct hubbub *hubbub,
|
|||
s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
|
||||
}
|
||||
|
||||
void hubbub1_disable_allow_self_refresh(struct hubbub *hubbub)
|
||||
void hubbub1_allow_self_refresh_control(struct hubbub *hubbub, bool allow)
|
||||
{
|
||||
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
|
||||
REG_UPDATE(DCHUBBUB_ARB_DRAM_STATE_CNTL,
|
||||
DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, 0);
|
||||
|
||||
/*
|
||||
* DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 1 means do not allow stutter
|
||||
* DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0 means allow stutter
|
||||
*/
|
||||
|
||||
REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
|
||||
DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, !allow);
|
||||
}
|
||||
|
||||
bool hububu1_is_allow_self_refresh_enabled(struct hubbub *hubbub)
|
||||
|
@ -262,8 +269,6 @@ void hubbub1_program_watermarks(
|
|||
bool safe_to_lower)
|
||||
{
|
||||
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
|
||||
|
||||
uint32_t force_en = hubbub1->base.ctx->dc->debug.disable_stutter ? 1 : 0;
|
||||
/*
|
||||
* Need to clamp to max of the register values (i.e. no wrap)
|
||||
* for dcn1, all wm registers are 21-bit wide
|
||||
|
@ -537,9 +542,7 @@ void hubbub1_program_watermarks(
|
|||
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
|
||||
DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 68);
|
||||
|
||||
REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
|
||||
DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, 0,
|
||||
DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, force_en);
|
||||
hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
|
||||
|
||||
#if 0
|
||||
REG_UPDATE_2(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
|
||||
|
|
|
@ -204,7 +204,7 @@ void hubbub1_program_watermarks(
|
|||
unsigned int refclk_mhz,
|
||||
bool safe_to_lower);
|
||||
|
||||
void hubbub1_disable_allow_self_refresh(struct hubbub *hubbub);
|
||||
void hubbub1_allow_self_refresh_control(struct hubbub *hubbub, bool allow);
|
||||
|
||||
bool hububu1_is_allow_self_refresh_enabled(struct hubbub *hubub);
|
||||
|
||||
|
|
|
@ -1150,9 +1150,28 @@ void hubp1_cursor_set_position(
|
|||
REG_UPDATE(CURSOR_CONTROL,
|
||||
CURSOR_ENABLE, cur_en);
|
||||
|
||||
REG_SET_2(CURSOR_POSITION, 0,
|
||||
CURSOR_X_POSITION, pos->x,
|
||||
//account for cases where we see negative offset relative to overlay plane
|
||||
if (src_x_offset < 0 && src_y_offset < 0) {
|
||||
REG_SET_2(CURSOR_POSITION, 0,
|
||||
CURSOR_X_POSITION, 0,
|
||||
CURSOR_Y_POSITION, 0);
|
||||
x_hotspot -= src_x_offset;
|
||||
y_hotspot -= src_y_offset;
|
||||
} else if (src_x_offset < 0) {
|
||||
REG_SET_2(CURSOR_POSITION, 0,
|
||||
CURSOR_X_POSITION, 0,
|
||||
CURSOR_Y_POSITION, pos->y);
|
||||
x_hotspot -= src_x_offset;
|
||||
} else if (src_y_offset < 0) {
|
||||
REG_SET_2(CURSOR_POSITION, 0,
|
||||
CURSOR_X_POSITION, pos->x,
|
||||
CURSOR_Y_POSITION, 0);
|
||||
y_hotspot -= src_y_offset;
|
||||
} else {
|
||||
REG_SET_2(CURSOR_POSITION, 0,
|
||||
CURSOR_X_POSITION, pos->x,
|
||||
CURSOR_Y_POSITION, pos->y);
|
||||
}
|
||||
|
||||
REG_SET_2(CURSOR_HOT_SPOT, 0,
|
||||
CURSOR_HOT_SPOT_X, x_hotspot,
|
||||
|
|
|
@ -636,8 +636,6 @@ static enum dc_status dcn10_enable_stream_timing(
|
|||
struct dc_stream_state *stream = pipe_ctx->stream;
|
||||
enum dc_color_space color_space;
|
||||
struct tg_color black_color = {0};
|
||||
struct drr_params params = {0};
|
||||
unsigned int event_triggers = 0;
|
||||
|
||||
/* by upper caller loop, pipe0 is parent pipe and be called first.
|
||||
* back end is set up by for pipe0. Other children pipe share back end
|
||||
|
@ -705,19 +703,6 @@ static enum dc_status dcn10_enable_stream_timing(
|
|||
return DC_ERROR_UNEXPECTED;
|
||||
}
|
||||
|
||||
params.vertical_total_min = stream->adjust.v_total_min;
|
||||
params.vertical_total_max = stream->adjust.v_total_max;
|
||||
if (pipe_ctx->stream_res.tg->funcs->set_drr)
|
||||
pipe_ctx->stream_res.tg->funcs->set_drr(
|
||||
pipe_ctx->stream_res.tg, ¶ms);
|
||||
|
||||
// DRR should set trigger event to monitor surface update event
|
||||
if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)
|
||||
event_triggers = 0x80;
|
||||
if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control)
|
||||
pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
|
||||
pipe_ctx->stream_res.tg, event_triggers);
|
||||
|
||||
/* TODO program crtc source select for non-virtual signal*/
|
||||
/* TODO program FMT */
|
||||
/* TODO setup link_enc */
|
||||
|
@ -971,92 +956,34 @@ static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
|||
pipe_ctx->pipe_idx);
|
||||
}
|
||||
|
||||
static void dcn10_init_hw(struct dc *dc)
|
||||
static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
|
||||
{
|
||||
int i;
|
||||
struct abm *abm = dc->res_pool->abm;
|
||||
struct dmcu *dmcu = dc->res_pool->dmcu;
|
||||
struct dce_hwseq *hws = dc->hwseq;
|
||||
struct dc_bios *dcb = dc->ctx->dc_bios;
|
||||
struct dc_state *context = dc->current_state;
|
||||
|
||||
if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
|
||||
REG_WRITE(REFCLK_CNTL, 0);
|
||||
REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
|
||||
REG_WRITE(DIO_MEM_PWR_CTRL, 0);
|
||||
|
||||
if (!dc->debug.disable_clock_gate) {
|
||||
/* enable all DCN clock gating */
|
||||
REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
|
||||
|
||||
REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
|
||||
|
||||
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
|
||||
}
|
||||
|
||||
enable_power_gating_plane(dc->hwseq, true);
|
||||
} else {
|
||||
|
||||
if (!dcb->funcs->is_accelerated_mode(dcb)) {
|
||||
bool allow_self_fresh_force_enable =
|
||||
hububu1_is_allow_self_refresh_enabled(dc->res_pool->hubbub);
|
||||
|
||||
bios_golden_init(dc);
|
||||
|
||||
/* WA for making DF sleep when idle after resume from S0i3.
|
||||
* DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
|
||||
* command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
|
||||
* before calling command table and it changed to 1 after,
|
||||
* it should be set back to 0.
|
||||
*/
|
||||
if (allow_self_fresh_force_enable == false &&
|
||||
hububu1_is_allow_self_refresh_enabled(dc->res_pool->hubbub))
|
||||
hubbub1_disable_allow_self_refresh(dc->res_pool->hubbub);
|
||||
|
||||
disable_vga(dc->hwseq);
|
||||
}
|
||||
|
||||
for (i = 0; i < dc->link_count; i++) {
|
||||
/* Power up AND update implementation according to the
|
||||
* required signal (which may be different from the
|
||||
* default signal on connector).
|
||||
*/
|
||||
struct dc_link *link = dc->links[i];
|
||||
|
||||
if (link->link_enc->connector.id == CONNECTOR_ID_EDP)
|
||||
dc->hwss.edp_power_control(link, true);
|
||||
|
||||
link->link_enc->funcs->hw_init(link->link_enc);
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct timing_generator *tg = dc->res_pool->timing_generators[i];
|
||||
|
||||
if (tg->funcs->is_tg_enabled(tg))
|
||||
tg->funcs->lock(tg);
|
||||
}
|
||||
|
||||
/* Blank controller using driver code instead of
|
||||
* command table.
|
||||
*/
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct timing_generator *tg = dc->res_pool->timing_generators[i];
|
||||
|
||||
/* Blank controller using driver code instead of
|
||||
* command table.
|
||||
*/
|
||||
if (tg->funcs->is_tg_enabled(tg)) {
|
||||
tg->funcs->set_blank(tg, true);
|
||||
hwss_wait_for_blank_complete(tg);
|
||||
}
|
||||
}
|
||||
|
||||
/* Reset all MPCC muxes */
|
||||
dc->res_pool->mpc->funcs->mpc_init(dc->res_pool->mpc);
|
||||
|
||||
for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct timing_generator *tg = dc->res_pool->timing_generators[i];
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
|
||||
struct hubp *hubp = dc->res_pool->hubps[i];
|
||||
struct dpp *dpp = dc->res_pool->dpps[i];
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
dpp->funcs->dpp_reset(dpp);
|
||||
|
||||
pipe_ctx->stream_res.tg = tg;
|
||||
pipe_ctx->pipe_idx = i;
|
||||
|
@ -1074,18 +1001,9 @@ static void dcn10_init_hw(struct dc *dc)
|
|||
pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
|
||||
|
||||
hwss1_plane_atomic_disconnect(dc, pipe_ctx);
|
||||
}
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct timing_generator *tg = dc->res_pool->timing_generators[i];
|
||||
|
||||
if (tg->funcs->is_tg_enabled(tg))
|
||||
tg->funcs->unlock(tg);
|
||||
}
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct timing_generator *tg = dc->res_pool->timing_generators[i];
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
dcn10_disable_plane(dc, pipe_ctx);
|
||||
|
||||
|
@ -1094,10 +1012,73 @@ static void dcn10_init_hw(struct dc *dc)
|
|||
|
||||
tg->funcs->tg_init(tg);
|
||||
}
|
||||
}
|
||||
|
||||
/* end of FPGA. Below if real ASIC */
|
||||
if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
|
||||
static void dcn10_init_hw(struct dc *dc)
|
||||
{
|
||||
int i;
|
||||
struct abm *abm = dc->res_pool->abm;
|
||||
struct dmcu *dmcu = dc->res_pool->dmcu;
|
||||
struct dce_hwseq *hws = dc->hwseq;
|
||||
struct dc_bios *dcb = dc->ctx->dc_bios;
|
||||
|
||||
if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
|
||||
REG_WRITE(REFCLK_CNTL, 0);
|
||||
REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
|
||||
REG_WRITE(DIO_MEM_PWR_CTRL, 0);
|
||||
|
||||
if (!dc->debug.disable_clock_gate) {
|
||||
/* enable all DCN clock gating */
|
||||
REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
|
||||
|
||||
REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
|
||||
|
||||
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
|
||||
}
|
||||
|
||||
enable_power_gating_plane(dc->hwseq, true);
|
||||
|
||||
/* end of FPGA. Below if real ASIC */
|
||||
return;
|
||||
}
|
||||
|
||||
if (!dcb->funcs->is_accelerated_mode(dcb)) {
|
||||
bool allow_self_fresh_force_enable =
|
||||
hububu1_is_allow_self_refresh_enabled(
|
||||
dc->res_pool->hubbub);
|
||||
|
||||
bios_golden_init(dc);
|
||||
|
||||
/* WA for making DF sleep when idle after resume from S0i3.
|
||||
* DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
|
||||
* command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
|
||||
* before calling command table and it changed to 1 after,
|
||||
* it should be set back to 0.
|
||||
*/
|
||||
if (allow_self_fresh_force_enable == false &&
|
||||
hububu1_is_allow_self_refresh_enabled(dc->res_pool->hubbub))
|
||||
hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, true);
|
||||
|
||||
disable_vga(dc->hwseq);
|
||||
}
|
||||
|
||||
for (i = 0; i < dc->link_count; i++) {
|
||||
/* Power up AND update implementation according to the
|
||||
* required signal (which may be different from the
|
||||
* default signal on connector).
|
||||
*/
|
||||
struct dc_link *link = dc->links[i];
|
||||
|
||||
if (link->link_enc->connector.id == CONNECTOR_ID_EDP)
|
||||
dc->hwss.edp_power_control(link, true);
|
||||
|
||||
link->link_enc->funcs->hw_init(link->link_enc);
|
||||
|
||||
/* Check for enabled DIG to identify enabled display */
|
||||
if (link->link_enc->funcs->is_dig_enabled &&
|
||||
link->link_enc->funcs->is_dig_enabled(link->link_enc))
|
||||
link->link_status.link_active = true;
|
||||
}
|
||||
|
||||
for (i = 0; i < dc->res_pool->audio_count; i++) {
|
||||
struct audio *audio = dc->res_pool->audios[i];
|
||||
|
@ -1128,6 +1109,9 @@ static void dcn10_init_hw(struct dc *dc)
|
|||
enable_power_gating_plane(dc->hwseq, true);
|
||||
|
||||
memset(&dc->res_pool->clk_mgr->clks, 0, sizeof(dc->res_pool->clk_mgr->clks));
|
||||
|
||||
if (dc->hwss.init_pipes)
|
||||
dc->hwss.init_pipes(dc, dc->current_state);
|
||||
}
|
||||
|
||||
static void reset_hw_ctx_wrap(
|
||||
|
@ -2334,9 +2318,10 @@ static void dcn10_apply_ctx_for_surface(
|
|||
}
|
||||
}
|
||||
|
||||
if (!pipe_ctx->plane_state &&
|
||||
old_pipe_ctx->plane_state &&
|
||||
old_pipe_ctx->stream_res.tg == tg) {
|
||||
if ((!pipe_ctx->plane_state ||
|
||||
pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) &&
|
||||
old_pipe_ctx->plane_state &&
|
||||
old_pipe_ctx->stream_res.tg == tg) {
|
||||
|
||||
dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx);
|
||||
removed_pipe[i] = true;
|
||||
|
@ -2383,6 +2368,22 @@ static void dcn10_apply_ctx_for_surface(
|
|||
hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
|
||||
}
|
||||
|
||||
static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
|
||||
{
|
||||
uint8_t i;
|
||||
|
||||
for (i = 0; i < context->stream_count; i++) {
|
||||
if (context->streams[i]->timing.timing_3d_format
|
||||
== TIMING_3D_FORMAT_HW_FRAME_PACKING) {
|
||||
/*
|
||||
* Disable stutter
|
||||
*/
|
||||
hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void dcn10_prepare_bandwidth(
|
||||
struct dc *dc,
|
||||
struct dc_state *context)
|
||||
|
@ -2404,6 +2405,7 @@ static void dcn10_prepare_bandwidth(
|
|||
&context->bw.dcn.watermarks,
|
||||
dc->res_pool->ref_clock_inKhz / 1000,
|
||||
true);
|
||||
dcn10_stereo_hw_frame_pack_wa(dc, context);
|
||||
|
||||
if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
|
||||
dcn_bw_notify_pplib_of_wm_ranges(dc);
|
||||
|
@ -2433,6 +2435,7 @@ static void dcn10_optimize_bandwidth(
|
|||
&context->bw.dcn.watermarks,
|
||||
dc->res_pool->ref_clock_inKhz / 1000,
|
||||
true);
|
||||
dcn10_stereo_hw_frame_pack_wa(dc, context);
|
||||
|
||||
if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
|
||||
dcn_bw_notify_pplib_of_wm_ranges(dc);
|
||||
|
@ -2709,6 +2712,7 @@ static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
|
|||
static const struct hw_sequencer_funcs dcn10_funcs = {
|
||||
.program_gamut_remap = program_gamut_remap,
|
||||
.init_hw = dcn10_init_hw,
|
||||
.init_pipes = dcn10_init_pipes,
|
||||
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
|
||||
.apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
|
||||
.update_plane_addr = dcn10_update_plane_addr,
|
||||
|
|
|
@ -85,6 +85,7 @@ static const struct link_encoder_funcs dcn10_lnk_enc_funcs = {
|
|||
.enable_hpd = dcn10_link_encoder_enable_hpd,
|
||||
.disable_hpd = dcn10_link_encoder_disable_hpd,
|
||||
.is_dig_enabled = dcn10_is_dig_enabled,
|
||||
.get_dig_frontend = dcn10_get_dig_frontend,
|
||||
.destroy = dcn10_link_encoder_destroy
|
||||
};
|
||||
|
||||
|
@ -495,6 +496,15 @@ bool dcn10_is_dig_enabled(struct link_encoder *enc)
|
|||
return value;
|
||||
}
|
||||
|
||||
unsigned int dcn10_get_dig_frontend(struct link_encoder *enc)
|
||||
{
|
||||
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
|
||||
uint32_t value;
|
||||
|
||||
REG_GET(DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, &value);
|
||||
return value;
|
||||
}
|
||||
|
||||
static void link_encoder_disable(struct dcn10_link_encoder *enc10)
|
||||
{
|
||||
/* reset training pattern */
|
||||
|
|
|
@ -336,6 +336,8 @@ void dcn10_psr_program_secondary_packet(struct link_encoder *enc,
|
|||
|
||||
bool dcn10_is_dig_enabled(struct link_encoder *enc);
|
||||
|
||||
unsigned int dcn10_get_dig_frontend(struct link_encoder *enc);
|
||||
|
||||
void dcn10_aux_initialize(struct dcn10_link_encoder *enc10);
|
||||
|
||||
#endif /* __DC_LINK_ENCODER__DCN10_H__ */
|
||||
|
|
|
@ -92,28 +92,136 @@ static void optc1_disable_stereo(struct timing_generator *optc)
|
|||
OTG_3D_STRUCTURE_STEREO_SEL_OVR, 0);
|
||||
}
|
||||
|
||||
static uint32_t get_start_vline(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing)
|
||||
{
|
||||
struct dc_crtc_timing patched_crtc_timing;
|
||||
int vesa_sync_start;
|
||||
int asic_blank_end;
|
||||
int interlace_factor;
|
||||
int vertical_line_start;
|
||||
|
||||
patched_crtc_timing = *dc_crtc_timing;
|
||||
optc1_apply_front_porch_workaround(optc, &patched_crtc_timing);
|
||||
|
||||
vesa_sync_start = patched_crtc_timing.h_addressable +
|
||||
patched_crtc_timing.h_border_right +
|
||||
patched_crtc_timing.h_front_porch;
|
||||
|
||||
asic_blank_end = patched_crtc_timing.h_total -
|
||||
vesa_sync_start -
|
||||
patched_crtc_timing.h_border_left;
|
||||
|
||||
interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
|
||||
|
||||
vesa_sync_start = patched_crtc_timing.v_addressable +
|
||||
patched_crtc_timing.v_border_bottom +
|
||||
patched_crtc_timing.v_front_porch;
|
||||
|
||||
asic_blank_end = (patched_crtc_timing.v_total -
|
||||
vesa_sync_start -
|
||||
patched_crtc_timing.v_border_top)
|
||||
* interlace_factor;
|
||||
|
||||
vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1;
|
||||
if (vertical_line_start < 0) {
|
||||
ASSERT(0);
|
||||
vertical_line_start = 0;
|
||||
}
|
||||
|
||||
return vertical_line_start;
|
||||
}
|
||||
|
||||
static void calc_vline_position(
|
||||
struct timing_generator *optc,
|
||||
const struct dc_crtc_timing *dc_crtc_timing,
|
||||
unsigned long long vsync_delta,
|
||||
uint32_t *start_line,
|
||||
uint32_t *end_line)
|
||||
{
|
||||
unsigned long long req_delta_tens_of_usec = div64_u64((vsync_delta + 9999), 10000);
|
||||
unsigned long long pix_clk_hundreds_khz = div64_u64((dc_crtc_timing->pix_clk_100hz + 999), 1000);
|
||||
uint32_t req_delta_lines = (uint32_t) div64_u64(
|
||||
(req_delta_tens_of_usec * pix_clk_hundreds_khz + dc_crtc_timing->h_total - 1),
|
||||
dc_crtc_timing->h_total);
|
||||
|
||||
uint32_t vsync_line = get_start_vline(optc, dc_crtc_timing);
|
||||
|
||||
if (req_delta_lines != 0)
|
||||
req_delta_lines--;
|
||||
|
||||
if (req_delta_lines > vsync_line)
|
||||
*start_line = dc_crtc_timing->v_total - (req_delta_lines - vsync_line) - 1;
|
||||
else
|
||||
*start_line = vsync_line - req_delta_lines;
|
||||
|
||||
*end_line = *start_line + 2;
|
||||
|
||||
if (*end_line >= dc_crtc_timing->v_total)
|
||||
*end_line = 2;
|
||||
}
|
||||
|
||||
void optc1_program_vline_interrupt(
|
||||
struct timing_generator *optc,
|
||||
const struct dc_crtc_timing *dc_crtc_timing,
|
||||
enum vline_select vline,
|
||||
struct vline_config vline_config)
|
||||
const union vline_config *vline_config)
|
||||
{
|
||||
struct optc *optc1 = DCN10TG_FROM_TG(optc);
|
||||
uint32_t start_line = 0;
|
||||
uint32_t end_line = 0;
|
||||
|
||||
switch (vline) {
|
||||
case VLINE0:
|
||||
calc_vline_position(optc, dc_crtc_timing, vline_config->delta_in_ns, &start_line, &end_line);
|
||||
REG_SET_2(OTG_VERTICAL_INTERRUPT0_POSITION, 0,
|
||||
OTG_VERTICAL_INTERRUPT0_LINE_START, vline_config.start_line,
|
||||
OTG_VERTICAL_INTERRUPT0_LINE_END, vline_config.end_line);
|
||||
OTG_VERTICAL_INTERRUPT0_LINE_START, start_line,
|
||||
OTG_VERTICAL_INTERRUPT0_LINE_END, end_line);
|
||||
break;
|
||||
case VLINE1:
|
||||
REG_SET(OTG_VERTICAL_INTERRUPT1_POSITION, 0,
|
||||
OTG_VERTICAL_INTERRUPT1_LINE_START, vline_config.start_line);
|
||||
OTG_VERTICAL_INTERRUPT1_LINE_START, vline_config->line_number);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void optc1_program_vupdate_interrupt(
|
||||
struct timing_generator *optc,
|
||||
const struct dc_crtc_timing *dc_crtc_timing)
|
||||
{
|
||||
struct optc *optc1 = DCN10TG_FROM_TG(optc);
|
||||
int32_t vertical_line_start;
|
||||
uint32_t asic_blank_end;
|
||||
uint32_t vesa_sync_start;
|
||||
struct dc_crtc_timing patched_crtc_timing;
|
||||
|
||||
patched_crtc_timing = *dc_crtc_timing;
|
||||
optc1_apply_front_porch_workaround(optc, &patched_crtc_timing);
|
||||
|
||||
/* asic_h_blank_end = HsyncWidth + HbackPorch =
|
||||
* vesa. usHorizontalTotal - vesa. usHorizontalSyncStart -
|
||||
* vesa.h_left_border
|
||||
*/
|
||||
vesa_sync_start = patched_crtc_timing.h_addressable +
|
||||
patched_crtc_timing.h_border_right +
|
||||
patched_crtc_timing.h_front_porch;
|
||||
|
||||
asic_blank_end = patched_crtc_timing.h_total -
|
||||
vesa_sync_start -
|
||||
patched_crtc_timing.h_border_left;
|
||||
|
||||
/* Use OTG_VERTICAL_INTERRUPT2 replace VUPDATE interrupt,
|
||||
* program the reg for interrupt postition.
|
||||
*/
|
||||
vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1;
|
||||
if (vertical_line_start < 0)
|
||||
vertical_line_start = 0;
|
||||
|
||||
REG_SET(OTG_VERTICAL_INTERRUPT2_POSITION, 0,
|
||||
OTG_VERTICAL_INTERRUPT2_LINE_START, vertical_line_start);
|
||||
}
|
||||
|
||||
/**
|
||||
* program_timing_generator used by mode timing set
|
||||
* Program CRTC Timing Registers - OTG_H_*, OTG_V_*, Pixel repetition.
|
||||
|
@ -216,22 +324,14 @@ void optc1_program_timing(
|
|||
patched_crtc_timing.v_addressable +
|
||||
patched_crtc_timing.v_border_bottom);
|
||||
|
||||
REG_UPDATE_2(OTG_V_BLANK_START_END,
|
||||
OTG_V_BLANK_START, asic_blank_start,
|
||||
OTG_V_BLANK_END, asic_blank_end);
|
||||
|
||||
/* Use OTG_VERTICAL_INTERRUPT2 replace VUPDATE interrupt,
|
||||
* program the reg for interrupt postition.
|
||||
*/
|
||||
vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1;
|
||||
v_fp2 = 0;
|
||||
if (vertical_line_start < 0)
|
||||
v_fp2 = -vertical_line_start;
|
||||
if (vertical_line_start < 0)
|
||||
vertical_line_start = 0;
|
||||
|
||||
REG_SET(OTG_VERTICAL_INTERRUPT2_POSITION, 0,
|
||||
OTG_VERTICAL_INTERRUPT2_LINE_START, vertical_line_start);
|
||||
REG_UPDATE_2(OTG_V_BLANK_START_END,
|
||||
OTG_V_BLANK_START, asic_blank_start,
|
||||
OTG_V_BLANK_END, asic_blank_end);
|
||||
|
||||
/* v_sync polarity */
|
||||
v_sync_polarity = patched_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ?
|
||||
|
@ -290,7 +390,7 @@ void optc1_program_timing(
|
|||
|
||||
h_div_2 = optc1_is_two_pixels_per_containter(&patched_crtc_timing);
|
||||
REG_UPDATE(OTG_H_TIMING_CNTL,
|
||||
OTG_H_TIMING_DIV_BY2, h_div_2);
|
||||
OTG_H_TIMING_DIV_BY2, h_div_2 || optc1->comb_opp_id != 0xf);
|
||||
|
||||
}
|
||||
|
||||
|
@ -1136,6 +1236,64 @@ bool optc1_is_stereo_left_eye(struct timing_generator *optc)
|
|||
return ret;
|
||||
}
|
||||
|
||||
bool optc1_is_matching_timing(struct timing_generator *tg,
|
||||
const struct dc_crtc_timing *otg_timing)
|
||||
{
|
||||
struct dc_crtc_timing hw_crtc_timing = {0};
|
||||
struct dcn_otg_state s = {0};
|
||||
|
||||
if (tg == NULL || otg_timing == NULL)
|
||||
return false;
|
||||
|
||||
optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
|
||||
|
||||
hw_crtc_timing.h_total = s.h_total + 1;
|
||||
hw_crtc_timing.h_addressable = s.h_total - ((s.h_total - s.h_blank_start) + s.h_blank_end);
|
||||
hw_crtc_timing.h_front_porch = s.h_total + 1 - s.h_blank_start;
|
||||
hw_crtc_timing.h_sync_width = s.h_sync_a_end - s.h_sync_a_start;
|
||||
|
||||
hw_crtc_timing.v_total = s.v_total + 1;
|
||||
hw_crtc_timing.v_addressable = s.v_total - ((s.v_total - s.v_blank_start) + s.v_blank_end);
|
||||
hw_crtc_timing.v_front_porch = s.v_total + 1 - s.v_blank_start;
|
||||
hw_crtc_timing.v_sync_width = s.v_sync_a_end - s.v_sync_a_start;
|
||||
|
||||
if (otg_timing->h_total != hw_crtc_timing.h_total)
|
||||
return false;
|
||||
|
||||
if (otg_timing->h_border_left != hw_crtc_timing.h_border_left)
|
||||
return false;
|
||||
|
||||
if (otg_timing->h_addressable != hw_crtc_timing.h_addressable)
|
||||
return false;
|
||||
|
||||
if (otg_timing->h_border_right != hw_crtc_timing.h_border_right)
|
||||
return false;
|
||||
|
||||
if (otg_timing->h_front_porch != hw_crtc_timing.h_front_porch)
|
||||
return false;
|
||||
|
||||
if (otg_timing->h_sync_width != hw_crtc_timing.h_sync_width)
|
||||
return false;
|
||||
|
||||
if (otg_timing->v_total != hw_crtc_timing.v_total)
|
||||
return false;
|
||||
|
||||
if (otg_timing->v_border_top != hw_crtc_timing.v_border_top)
|
||||
return false;
|
||||
|
||||
if (otg_timing->v_addressable != hw_crtc_timing.v_addressable)
|
||||
return false;
|
||||
|
||||
if (otg_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
|
||||
return false;
|
||||
|
||||
if (otg_timing->v_sync_width != hw_crtc_timing.v_sync_width)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void optc1_read_otg_state(struct optc *optc1,
|
||||
struct dcn_otg_state *s)
|
||||
{
|
||||
|
@ -1323,6 +1481,7 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
|
|||
.validate_timing = optc1_validate_timing,
|
||||
.program_timing = optc1_program_timing,
|
||||
.program_vline_interrupt = optc1_program_vline_interrupt,
|
||||
.program_vupdate_interrupt = optc1_program_vupdate_interrupt,
|
||||
.program_global_sync = optc1_program_global_sync,
|
||||
.enable_crtc = optc1_enable_crtc,
|
||||
.disable_crtc = optc1_disable_crtc,
|
||||
|
@ -1332,6 +1491,7 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
|
|||
.get_frame_count = optc1_get_vblank_counter,
|
||||
.get_scanoutpos = optc1_get_crtc_scanoutpos,
|
||||
.get_otg_active_size = optc1_get_otg_active_size,
|
||||
.is_matching_timing = optc1_is_matching_timing,
|
||||
.set_early_control = optc1_set_early_control,
|
||||
/* used by enable_timing_synchronization. Not need for FPGA */
|
||||
.wait_for_state = optc1_wait_for_state,
|
||||
|
@ -1371,10 +1531,13 @@ void dcn10_timing_generator_init(struct optc *optc1)
|
|||
optc1->min_v_blank_interlace = 5;
|
||||
optc1->min_h_sync_width = 8;
|
||||
optc1->min_v_sync_width = 1;
|
||||
optc1->comb_opp_id = 0xf;
|
||||
}
|
||||
|
||||
bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
|
||||
{
|
||||
return timing->pixel_encoding == PIXEL_ENCODING_YCBCR420;
|
||||
bool two_pix = timing->pixel_encoding == PIXEL_ENCODING_YCBCR420;
|
||||
|
||||
return two_pix;
|
||||
}
|
||||
|
||||
|
|
|
@ -435,7 +435,7 @@ struct optc {
|
|||
const struct dcn_optc_shift *tg_shift;
|
||||
const struct dcn_optc_mask *tg_mask;
|
||||
|
||||
enum controller_id controller_id;
|
||||
int comb_opp_id;
|
||||
|
||||
uint32_t max_h_total;
|
||||
uint32_t max_v_total;
|
||||
|
@ -483,9 +483,11 @@ void optc1_program_timing(
|
|||
const struct dc_crtc_timing *dc_crtc_timing,
|
||||
bool use_vbios);
|
||||
|
||||
void optc1_program_vline_interrupt(struct timing_generator *optc,
|
||||
void optc1_program_vline_interrupt(
|
||||
struct timing_generator *optc,
|
||||
const struct dc_crtc_timing *dc_crtc_timing,
|
||||
enum vline_select vline,
|
||||
struct vline_config vline_config);
|
||||
const union vline_config *vline_config);
|
||||
|
||||
void optc1_program_global_sync(
|
||||
struct timing_generator *optc);
|
||||
|
|
|
@ -101,6 +101,18 @@ enum gpio_mode dal_gpio_get_mode(
|
|||
return gpio->mode;
|
||||
}
|
||||
|
||||
enum gpio_result dal_gpio_lock_pin(
|
||||
struct gpio *gpio)
|
||||
{
|
||||
return dal_gpio_service_lock(gpio->service, gpio->id, gpio->en);
|
||||
}
|
||||
|
||||
enum gpio_result dal_gpio_unlock_pin(
|
||||
struct gpio *gpio)
|
||||
{
|
||||
return dal_gpio_service_unlock(gpio->service, gpio->id, gpio->en);
|
||||
}
|
||||
|
||||
enum gpio_result dal_gpio_change_mode(
|
||||
struct gpio *gpio,
|
||||
enum gpio_mode mode)
|
||||
|
|
|
@ -192,6 +192,34 @@ static void set_pin_free(
|
|||
service->busyness[id][en] = false;
|
||||
}
|
||||
|
||||
enum gpio_result dal_gpio_service_lock(
|
||||
struct gpio_service *service,
|
||||
enum gpio_id id,
|
||||
uint32_t en)
|
||||
{
|
||||
if (!service->busyness[id]) {
|
||||
ASSERT_CRITICAL(false);
|
||||
return GPIO_RESULT_OPEN_FAILED;
|
||||
}
|
||||
|
||||
set_pin_busy(service, id, en);
|
||||
return GPIO_RESULT_OK;
|
||||
}
|
||||
|
||||
enum gpio_result dal_gpio_service_unlock(
|
||||
struct gpio_service *service,
|
||||
enum gpio_id id,
|
||||
uint32_t en)
|
||||
{
|
||||
if (!service->busyness[id]) {
|
||||
ASSERT_CRITICAL(false);
|
||||
return GPIO_RESULT_OPEN_FAILED;
|
||||
}
|
||||
|
||||
set_pin_free(service, id, en);
|
||||
return GPIO_RESULT_OK;
|
||||
}
|
||||
|
||||
enum gpio_result dal_gpio_service_open(
|
||||
struct gpio_service *service,
|
||||
enum gpio_id id,
|
||||
|
|
|
@ -52,4 +52,14 @@ void dal_gpio_service_close(
|
|||
struct gpio_service *service,
|
||||
struct hw_gpio_pin **ptr);
|
||||
|
||||
enum gpio_result dal_gpio_service_lock(
|
||||
struct gpio_service *service,
|
||||
enum gpio_id id,
|
||||
uint32_t en);
|
||||
|
||||
enum gpio_result dal_gpio_service_unlock(
|
||||
struct gpio_service *service,
|
||||
enum gpio_id id,
|
||||
uint32_t en);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -166,6 +166,10 @@ struct clock_source_funcs {
|
|||
struct clock_source *,
|
||||
struct pixel_clk_params *,
|
||||
struct pll_settings *);
|
||||
bool (*get_pixel_clk_frequency_100hz)(
|
||||
struct clock_source *clock_source,
|
||||
unsigned int inst,
|
||||
unsigned int *pixel_clk_khz);
|
||||
};
|
||||
|
||||
struct clock_source {
|
||||
|
|
|
@ -153,6 +153,7 @@ struct link_encoder_funcs {
|
|||
void (*enable_hpd)(struct link_encoder *enc);
|
||||
void (*disable_hpd)(struct link_encoder *enc);
|
||||
bool (*is_dig_enabled)(struct link_encoder *enc);
|
||||
unsigned int (*get_dig_frontend)(struct link_encoder *enc);
|
||||
void (*destroy)(struct link_encoder **enc);
|
||||
};
|
||||
|
||||
|
|
|
@ -134,7 +134,7 @@ struct dc_crtc_timing;
|
|||
|
||||
struct drr_params;
|
||||
|
||||
struct vline_config;
|
||||
union vline_config;
|
||||
|
||||
|
||||
enum vline_select {
|
||||
|
@ -149,9 +149,14 @@ struct timing_generator_funcs {
|
|||
void (*program_timing)(struct timing_generator *tg,
|
||||
const struct dc_crtc_timing *timing,
|
||||
bool use_vbios);
|
||||
void (*program_vline_interrupt)(struct timing_generator *optc,
|
||||
void (*program_vline_interrupt)(
|
||||
struct timing_generator *optc,
|
||||
const struct dc_crtc_timing *dc_crtc_timing,
|
||||
enum vline_select vline,
|
||||
struct vline_config vline_config);
|
||||
const union vline_config *vline_config);
|
||||
|
||||
void (*program_vupdate_interrupt)(struct timing_generator *optc,
|
||||
const struct dc_crtc_timing *dc_crtc_timing);
|
||||
bool (*enable_crtc)(struct timing_generator *tg);
|
||||
bool (*disable_crtc)(struct timing_generator *tg);
|
||||
bool (*is_counter_moving)(struct timing_generator *tg);
|
||||
|
@ -168,6 +173,8 @@ struct timing_generator_funcs {
|
|||
bool (*get_otg_active_size)(struct timing_generator *optc,
|
||||
uint32_t *otg_active_width,
|
||||
uint32_t *otg_active_height);
|
||||
bool (*is_matching_timing)(struct timing_generator *tg,
|
||||
const struct dc_crtc_timing *otg_timing);
|
||||
void (*set_early_control)(struct timing_generator *tg,
|
||||
uint32_t early_cntl);
|
||||
void (*wait_for_state)(struct timing_generator *tg,
|
||||
|
|
|
@ -70,6 +70,8 @@ struct hw_sequencer_funcs {
|
|||
|
||||
void (*init_hw)(struct dc *dc);
|
||||
|
||||
void (*init_pipes)(struct dc *dc, struct dc_state *context);
|
||||
|
||||
enum dc_status (*apply_ctx_to_hw)(
|
||||
struct dc *dc, struct dc_state *context);
|
||||
|
||||
|
|
|
@ -39,8 +39,8 @@ struct vm_helper {
|
|||
unsigned int num_vmid;
|
||||
unsigned int num_hubp;
|
||||
unsigned int num_vmids_available;
|
||||
uint64_t *ptb_assigned_to_vmid;
|
||||
struct vmid_usage *hubp_vmid_usage;
|
||||
uint64_t ptb_assigned_to_vmid[MAX_VMID];
|
||||
struct vmid_usage hubp_vmid_usage[MAX_HUBP];
|
||||
};
|
||||
|
||||
uint8_t get_vmid_for_ptb(
|
||||
|
@ -48,7 +48,8 @@ uint8_t get_vmid_for_ptb(
|
|||
int64_t ptb,
|
||||
uint8_t pipe_idx);
|
||||
|
||||
struct vm_helper init_vm_helper(
|
||||
void init_vm_helper(
|
||||
struct vm_helper *vm_helper,
|
||||
unsigned int num_vmid,
|
||||
unsigned int num_hubp);
|
||||
|
||||
|
|
|
@ -59,6 +59,14 @@ enum gpio_result dal_gpio_change_mode(
|
|||
struct gpio *gpio,
|
||||
enum gpio_mode mode);
|
||||
|
||||
/* Lock Pin */
|
||||
enum gpio_result dal_gpio_lock_pin(
|
||||
struct gpio *gpio);
|
||||
|
||||
/* Unlock Pin */
|
||||
enum gpio_result dal_gpio_unlock_pin(
|
||||
struct gpio *gpio);
|
||||
|
||||
/* Get the GPIO id */
|
||||
enum gpio_id dal_gpio_get_id(
|
||||
const struct gpio *gpio);
|
||||
|
|
|
@ -1765,68 +1765,85 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
|
|||
{
|
||||
struct dc_transfer_func_distributed_points *tf_pts = &input_tf->tf_pts;
|
||||
struct dividers dividers;
|
||||
|
||||
struct pwl_float_data *rgb_user = NULL;
|
||||
struct pwl_float_data_ex *curve = NULL;
|
||||
struct gamma_pixel *axis_x = NULL;
|
||||
struct pixel_gamma_point *coeff = NULL;
|
||||
enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB;
|
||||
uint32_t i;
|
||||
bool ret = false;
|
||||
|
||||
if (input_tf->type == TF_TYPE_BYPASS)
|
||||
return false;
|
||||
|
||||
/* we can use hardcoded curve for plain SRGB TF */
|
||||
/* we can use hardcoded curve for plain SRGB TF
|
||||
* If linear, it's bypass if on user ramp
|
||||
*/
|
||||
if (input_tf->type == TF_TYPE_PREDEFINED &&
|
||||
input_tf->tf == TRANSFER_FUNCTION_SRGB &&
|
||||
!mapUserRamp)
|
||||
(input_tf->tf == TRANSFER_FUNCTION_SRGB ||
|
||||
input_tf->tf == TRANSFER_FUNCTION_LINEAR) &&
|
||||
!mapUserRamp)
|
||||
return true;
|
||||
|
||||
input_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
|
||||
|
||||
rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS,
|
||||
sizeof(*rgb_user),
|
||||
GFP_KERNEL);
|
||||
if (!rgb_user)
|
||||
goto rgb_user_alloc_fail;
|
||||
if (mapUserRamp && ramp && ramp->type == GAMMA_RGB_256) {
|
||||
rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS,
|
||||
sizeof(*rgb_user),
|
||||
GFP_KERNEL);
|
||||
if (!rgb_user)
|
||||
goto rgb_user_alloc_fail;
|
||||
|
||||
axis_x = kvcalloc(ramp->num_entries + _EXTRA_POINTS, sizeof(*axis_x),
|
||||
GFP_KERNEL);
|
||||
if (!axis_x)
|
||||
goto axis_x_alloc_fail;
|
||||
|
||||
dividers.divider1 = dc_fixpt_from_fraction(3, 2);
|
||||
dividers.divider2 = dc_fixpt_from_int(2);
|
||||
dividers.divider3 = dc_fixpt_from_fraction(5, 2);
|
||||
|
||||
build_evenly_distributed_points(
|
||||
axis_x,
|
||||
ramp->num_entries,
|
||||
dividers);
|
||||
|
||||
scale_gamma(rgb_user, ramp, dividers);
|
||||
}
|
||||
|
||||
curve = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*curve),
|
||||
GFP_KERNEL);
|
||||
GFP_KERNEL);
|
||||
if (!curve)
|
||||
goto curve_alloc_fail;
|
||||
axis_x = kvcalloc(ramp->num_entries + _EXTRA_POINTS, sizeof(*axis_x),
|
||||
GFP_KERNEL);
|
||||
if (!axis_x)
|
||||
goto axis_x_alloc_fail;
|
||||
|
||||
coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff),
|
||||
GFP_KERNEL);
|
||||
GFP_KERNEL);
|
||||
if (!coeff)
|
||||
goto coeff_alloc_fail;
|
||||
|
||||
dividers.divider1 = dc_fixpt_from_fraction(3, 2);
|
||||
dividers.divider2 = dc_fixpt_from_int(2);
|
||||
dividers.divider3 = dc_fixpt_from_fraction(5, 2);
|
||||
|
||||
tf = input_tf->tf;
|
||||
|
||||
build_evenly_distributed_points(
|
||||
axis_x,
|
||||
ramp->num_entries,
|
||||
dividers);
|
||||
|
||||
if (ramp->type == GAMMA_RGB_256 && mapUserRamp)
|
||||
scale_gamma(rgb_user, ramp, dividers);
|
||||
else if (ramp->type == GAMMA_RGB_FLOAT_1024)
|
||||
scale_gamma_dx(rgb_user, ramp, dividers);
|
||||
|
||||
if (tf == TRANSFER_FUNCTION_PQ)
|
||||
build_de_pq(curve,
|
||||
MAX_HW_POINTS,
|
||||
coordinates_x);
|
||||
else
|
||||
else if (tf == TRANSFER_FUNCTION_SRGB ||
|
||||
tf == TRANSFER_FUNCTION_BT709)
|
||||
build_degamma(curve,
|
||||
MAX_HW_POINTS,
|
||||
coordinates_x,
|
||||
tf == TRANSFER_FUNCTION_SRGB ? true:false);
|
||||
tf == TRANSFER_FUNCTION_SRGB ? true : false);
|
||||
else if (tf == TRANSFER_FUNCTION_LINEAR) {
|
||||
// just copy coordinates_x into curve
|
||||
i = 0;
|
||||
while (i != MAX_HW_POINTS + 1) {
|
||||
curve[i].r = coordinates_x[i].x;
|
||||
curve[i].g = curve[i].r;
|
||||
curve[i].b = curve[i].r;
|
||||
i++;
|
||||
}
|
||||
} else
|
||||
goto invalid_tf_fail;
|
||||
|
||||
tf_pts->end_exponent = 0;
|
||||
tf_pts->x_point_at_y1_red = 1;
|
||||
|
@ -1836,23 +1853,21 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
|
|||
map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
|
||||
coordinates_x, axis_x, curve,
|
||||
MAX_HW_POINTS, tf_pts,
|
||||
mapUserRamp && ramp->type != GAMMA_CUSTOM);
|
||||
if (ramp->type == GAMMA_CUSTOM)
|
||||
apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
|
||||
mapUserRamp && ramp && ramp->type == GAMMA_RGB_256);
|
||||
|
||||
ret = true;
|
||||
|
||||
invalid_tf_fail:
|
||||
kvfree(coeff);
|
||||
coeff_alloc_fail:
|
||||
kvfree(axis_x);
|
||||
axis_x_alloc_fail:
|
||||
kvfree(curve);
|
||||
curve_alloc_fail:
|
||||
kvfree(axis_x);
|
||||
axis_x_alloc_fail:
|
||||
kvfree(rgb_user);
|
||||
rgb_user_alloc_fail:
|
||||
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -47,10 +47,10 @@ static const unsigned char min_reduction_table_v_2_2[13] = {
|
|||
|
||||
/* Possible ABM 2.2 Max Reduction configs from least aggressive to most aggressive
|
||||
* 0 1 2 3 4 5 6 7 8 9 10 11 12
|
||||
* 96.1 89.8 85.1 80.3 69.4 64.7 54.9 45.1 30.2 25.1 19.6 12.5 12.5 %
|
||||
* 96.1 89.8 74.9 69.4 64.7 52.2 48.6 39.6 30.2 25.1 19.6 12.5 12.5 %
|
||||
*/
|
||||
static const unsigned char max_reduction_table_v_2_2[13] = {
|
||||
0xf5, 0xe5, 0xd9, 0xcd, 0xb1, 0xa5, 0x8c, 0x73, 0x4d, 0x40, 0x32, 0x20, 0x20};
|
||||
0xf5, 0xe5, 0xbf, 0xb1, 0xa5, 0x85, 0x7c, 0x65, 0x4d, 0x40, 0x32, 0x20, 0x20};
|
||||
|
||||
/* Predefined ABM configuration sets. We may have different configuration sets
|
||||
* in order to satisfy different power/quality requirements.
|
||||
|
@ -67,9 +67,14 @@ static const unsigned char abm_config[abm_defines_max_config][abm_defines_max_le
|
|||
#define NUM_AGGR_LEVEL 4
|
||||
#define NUM_POWER_FN_SEGS 8
|
||||
#define NUM_BL_CURVE_SEGS 16
|
||||
#define IRAM_RESERVE_AREA_START 0xF0 // reserve 0xF0~0xFF are write by DMCU only
|
||||
#define IRAM_SIZE 256
|
||||
|
||||
#define IRAM_RESERVE_AREA_START_V2 0xF0 // reserve 0xF0~0xF6 are write by DMCU only
|
||||
#define IRAM_RESERVE_AREA_END_V2 0xF6 // reserve 0xF0~0xF6 are write by DMCU only
|
||||
|
||||
#define IRAM_RESERVE_AREA_START_V2_2 0xF0 // reserve 0xF0~0xFF are write by DMCU only
|
||||
#define IRAM_RESERVE_AREA_END_V2_2 0xFF // reserve 0xF0~0xFF are write by DMCU only
|
||||
|
||||
#pragma pack(push, 1)
|
||||
/* NOTE: iRAM is 256B in size */
|
||||
struct iram_table_v_2 {
|
||||
|
@ -148,8 +153,10 @@ struct iram_table_v_2_2 {
|
|||
uint16_t dmcu_version; /* 0xf4 */
|
||||
uint8_t dmcu_state; /* 0xf6 */
|
||||
|
||||
uint16_t blRampReduction; /* 0xf7 */
|
||||
uint16_t blRampStart; /* 0xf9 */
|
||||
uint8_t dummy1; /* 0xf7 */
|
||||
uint8_t dummy2; /* 0xf8 */
|
||||
uint8_t dummy3; /* 0xf9 */
|
||||
uint8_t dummy4; /* 0xfa */
|
||||
uint8_t dummy5; /* 0xfb */
|
||||
uint8_t dummy6; /* 0xfc */
|
||||
uint8_t dummy7; /* 0xfd */
|
||||
|
@ -420,11 +427,6 @@ void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
|
|||
ram_table->deviation_gain[2] = 0xb3;
|
||||
ram_table->deviation_gain[3] = 0xb3;
|
||||
|
||||
ram_table->blRampReduction =
|
||||
cpu_to_be16(params.backlight_ramping_reduction);
|
||||
ram_table->blRampStart =
|
||||
cpu_to_be16(params.backlight_ramping_start);
|
||||
|
||||
ram_table->min_reduction[0][0] = min_reduction_table_v_2_2[abm_config[set][0]];
|
||||
ram_table->min_reduction[1][0] = min_reduction_table_v_2_2[abm_config[set][0]];
|
||||
ram_table->min_reduction[2][0] = min_reduction_table_v_2_2[abm_config[set][0]];
|
||||
|
@ -561,6 +563,7 @@ bool dmcu_load_iram(struct dmcu *dmcu,
|
|||
struct dmcu_iram_parameters params)
|
||||
{
|
||||
unsigned char ram_table[IRAM_SIZE];
|
||||
bool result = false;
|
||||
|
||||
if (dmcu == NULL)
|
||||
return false;
|
||||
|
@ -572,10 +575,21 @@ bool dmcu_load_iram(struct dmcu *dmcu,
|
|||
|
||||
if (dmcu->dmcu_version.abm_version == 0x22) {
|
||||
fill_iram_v_2_2((struct iram_table_v_2_2 *)ram_table, params);
|
||||
|
||||
result = dmcu->funcs->load_iram(
|
||||
dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2);
|
||||
} else {
|
||||
fill_iram_v_2((struct iram_table_v_2 *)ram_table, params);
|
||||
|
||||
result = dmcu->funcs->load_iram(
|
||||
dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2);
|
||||
|
||||
if (result)
|
||||
result = dmcu->funcs->load_iram(
|
||||
dmcu, IRAM_RESERVE_AREA_END_V2 + 1,
|
||||
(char *)(&ram_table) + IRAM_RESERVE_AREA_END_V2 + 1,
|
||||
sizeof(ram_table) - IRAM_RESERVE_AREA_END_V2 - 1);
|
||||
}
|
||||
|
||||
return dmcu->funcs->load_iram(
|
||||
dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START);
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -3579,6 +3579,10 @@ static int vega10_generate_dpm_level_enable_mask(
|
|||
vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
|
||||
data->smc_state_table.mem_max_level =
|
||||
vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
|
||||
data->smc_state_table.soc_boot_level =
|
||||
vega10_find_lowest_dpm_level(&(data->dpm_table.soc_table));
|
||||
data->smc_state_table.soc_max_level =
|
||||
vega10_find_highest_dpm_level(&(data->dpm_table.soc_table));
|
||||
|
||||
PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
|
||||
"Attempt to upload DPM Bootup Levels Failed!",
|
||||
|
@ -3593,6 +3597,9 @@ static int vega10_generate_dpm_level_enable_mask(
|
|||
for(i = data->smc_state_table.mem_boot_level; i < data->smc_state_table.mem_max_level; i++)
|
||||
data->dpm_table.mem_table.dpm_levels[i].enabled = true;
|
||||
|
||||
for (i = data->smc_state_table.soc_boot_level; i < data->smc_state_table.soc_max_level; i++)
|
||||
data->dpm_table.soc_table.dpm_levels[i].enabled = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -771,40 +771,47 @@ static int vega20_init_smc_table(struct pp_hwmgr *hwmgr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Override PCIe link speed and link width for DPM Level 1. PPTable entries
|
||||
* reflect the ASIC capabilities and not the system capabilities. For e.g.
|
||||
* Vega20 board in a PCI Gen3 system. In this case, when SMU's tries to switch
|
||||
* to DPM1, it fails as system doesn't support Gen4.
|
||||
*/
|
||||
static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
|
||||
uint32_t pcie_speed = 0, pcie_width = 0, pcie_arg;
|
||||
uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
|
||||
int ret;
|
||||
|
||||
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
|
||||
pcie_speed = 16;
|
||||
pcie_gen = 3;
|
||||
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
|
||||
pcie_speed = 8;
|
||||
pcie_gen = 2;
|
||||
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
|
||||
pcie_speed = 5;
|
||||
pcie_gen = 1;
|
||||
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
|
||||
pcie_speed = 2;
|
||||
pcie_gen = 0;
|
||||
|
||||
if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X32)
|
||||
pcie_width = 32;
|
||||
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
|
||||
pcie_width = 16;
|
||||
if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
|
||||
pcie_width = 6;
|
||||
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
|
||||
pcie_width = 12;
|
||||
pcie_width = 5;
|
||||
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
|
||||
pcie_width = 8;
|
||||
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
|
||||
pcie_width = 4;
|
||||
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
|
||||
pcie_width = 3;
|
||||
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
|
||||
pcie_width = 2;
|
||||
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
|
||||
pcie_width = 1;
|
||||
|
||||
pcie_arg = pcie_width | (pcie_speed << 8);
|
||||
|
||||
/* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
|
||||
* Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
|
||||
* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
|
||||
*/
|
||||
smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
|
||||
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_OverridePcieParameters, pcie_arg);
|
||||
PPSMC_MSG_OverridePcieParameters, smu_pcie_arg);
|
||||
PP_ASSERT_WITH_CODE(!ret,
|
||||
"[OverridePcieParameters] Attempt to override pcie params failed!",
|
||||
return ret);
|
||||
|
@ -1611,11 +1618,6 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
|||
"[EnableDPMTasks] Failed to initialize SMC table!",
|
||||
return result);
|
||||
|
||||
result = vega20_override_pcie_parameters(hwmgr);
|
||||
PP_ASSERT_WITH_CODE(!result,
|
||||
"[EnableDPMTasks] Failed to override pcie parameters!",
|
||||
return result);
|
||||
|
||||
result = vega20_run_btc(hwmgr);
|
||||
PP_ASSERT_WITH_CODE(!result,
|
||||
"[EnableDPMTasks] Failed to run btc!",
|
||||
|
@ -1631,6 +1633,11 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
|||
"[EnableDPMTasks] Failed to enable all smu features!",
|
||||
return result);
|
||||
|
||||
result = vega20_override_pcie_parameters(hwmgr);
|
||||
PP_ASSERT_WITH_CODE(!result,
|
||||
"[EnableDPMTasks] Failed to override pcie parameters!",
|
||||
return result);
|
||||
|
||||
result = vega20_notify_smc_display_change(hwmgr);
|
||||
PP_ASSERT_WITH_CODE(!result,
|
||||
"[EnableDPMTasks] Failed to notify smc display change!",
|
||||
|
|
|
@ -523,6 +523,7 @@ struct drm_amdgpu_gem_va {
|
|||
#define AMDGPU_CHUNK_ID_SYNCOBJ_IN 0x04
|
||||
#define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05
|
||||
#define AMDGPU_CHUNK_ID_BO_HANDLES 0x06
|
||||
#define AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES 0x07
|
||||
|
||||
struct drm_amdgpu_cs_chunk {
|
||||
__u32 chunk_id;
|
||||
|
@ -565,6 +566,11 @@ union drm_amdgpu_cs {
|
|||
* caches (L2/vL1/sL1/I$). */
|
||||
#define AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE (1 << 3)
|
||||
|
||||
/* Set GDS_COMPUTE_MAX_WAVE_ID = DEFAULT before PACKET3_INDIRECT_BUFFER.
|
||||
* This will reset wave ID counters for the IB.
|
||||
*/
|
||||
#define AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID (1 << 4)
|
||||
|
||||
struct drm_amdgpu_cs_chunk_ib {
|
||||
__u32 _pad;
|
||||
/** AMDGPU_IB_FLAG_* */
|
||||
|
|
Loading…
Reference in New Issue