Merge tag 'amd-drm-next-5.8-2020-05-12' of git://people.freedesktop.org/~agd5f/linux into drm-next

amd-drm-next-5.8-2020-05-12:

amdgpu:
- Misc cleanups
- RAS fixes
- Expose FP16 for modesetting
- DP 1.4 compliance test fixes
- Clockgating fixes
- MAINTAINERS update
- Soft recovery for gfx10
- Runtime PM cleanups
- PSP code cleanups

amdkfd:
- Track GPU memory utilization per process
- Report PCI domain in topology

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200512213703.4039-1-alexander.deucher@amd.com
This commit is contained in:
Dave Airlie 2020-05-14 13:21:30 +10:00
commit 49eea1c657
98 changed files with 1549 additions and 1981 deletions

View File

@ -14063,7 +14063,6 @@ F: drivers/net/wireless/quantenna
RADEON and AMDGPU DRM DRIVERS
M: Alex Deucher <alexander.deucher@amd.com>
M: Christian König <christian.koenig@amd.com>
M: David (ChunMing) Zhou <David1.Zhou@amd.com>
L: amd-gfx@lists.freedesktop.org
S: Supported
T: git git://people.freedesktop.org/~agd5f/linux

View File

@ -765,7 +765,6 @@ struct amdgpu_device {
uint8_t *bios;
uint32_t bios_size;
struct amdgpu_bo *stolen_vga_memory;
struct amdgpu_bo *discovery_memory;
uint32_t bios_scratch_reg_offset;
uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
@ -918,7 +917,9 @@ struct amdgpu_device {
struct amdgpu_display_manager dm;
/* discovery */
uint8_t *discovery;
uint8_t *discovery_bin;
uint32_t discovery_tmr_size;
struct amdgpu_bo *discovery_memory;
/* mes */
bool enable_mes;
@ -957,6 +958,7 @@ struct amdgpu_device {
/* s3/s4 mask */
bool in_suspend;
bool in_hibernate;
bool in_gpu_reset;
enum pp_mp1_state mp1_state;

View File

@ -444,7 +444,6 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
/* todo: add DC handling */
if ((req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) &&
!amdgpu_device_has_dc_support(adev)) {
struct amdgpu_encoder *enc = atif->encoder_for_bl;
@ -463,6 +462,27 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
#endif
}
}
#if defined(CONFIG_DRM_AMD_DC)
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
if ((req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) &&
amdgpu_device_has_dc_support(adev)) {
struct amdgpu_display_manager *dm = &adev->dm;
struct backlight_device *bd = dm->backlight_dev;
if (bd) {
DRM_DEBUG_DRIVER("Changing brightness to %d\n",
req.backlight_level);
/*
* XXX backlight_device_set_brightness() is
* hardwired to post BACKLIGHT_UPDATE_SYSFS.
* It probably should accept 'reason' parameter.
*/
backlight_device_set_brightness(bd, req.backlight_level);
}
}
#endif
#endif
if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
if (adev->flags & AMD_IS_PX) {
pm_runtime_get_sync(adev->ddev->dev);

View File

@ -65,6 +65,7 @@ struct kgd_mem {
struct amdgpu_sync sync;
bool aql_queue;
bool is_imported;
};
/* KFD Memory Eviction */
@ -148,6 +149,9 @@ int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev);
void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
int queue_bit);
/* Shared API */
int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
@ -219,7 +223,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
void *vm, struct kgd_mem **mem,
uint64_t *offset, uint32_t flags);
int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
struct kgd_dev *kgd, struct kgd_mem *mem);
struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size);
int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
struct kgd_dev *kgd, struct kgd_mem *mem, void *vm);
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(

View File

@ -1277,7 +1277,7 @@ err:
}
int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
struct kgd_dev *kgd, struct kgd_mem *mem)
struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size)
{
struct amdkfd_process_info *process_info = mem->process_info;
unsigned long bo_size = mem->bo->tbo.mem.size;
@ -1286,9 +1286,11 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
struct ttm_validate_buffer *bo_list_entry;
unsigned int mapped_to_gpu_memory;
int ret;
bool is_imported = 0;
mutex_lock(&mem->lock);
mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
is_imported = mem->is_imported;
mutex_unlock(&mem->lock);
/* lock is not needed after this, since mem is unused and will
* be freed anyway
@ -1340,8 +1342,19 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
kfree(mem->bo->tbo.sg);
}
/* Update the size of the BO being freed if it was allocated from
* VRAM and is not imported.
*/
if (size) {
if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) &&
(!is_imported))
*size = bo_size;
else
*size = 0;
}
/* Free the BO*/
amdgpu_bo_unref(&mem->bo);
drm_gem_object_put_unlocked(&mem->bo->tbo.base);
mutex_destroy(&mem->lock);
kfree(mem);
@ -1686,7 +1699,8 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
| KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
| KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
(*mem)->bo = amdgpu_bo_ref(bo);
drm_gem_object_get(&bo->tbo.base);
(*mem)->bo = bo;
(*mem)->va = va;
(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
@ -1694,6 +1708,7 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
(*mem)->process_info = avm->process_info;
add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
amdgpu_sync_create(&(*mem)->sync);
(*mem)->is_imported = true;
return 0;
}

View File

@ -1208,7 +1208,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct drm_sched_entity *entity = p->entity;
enum drm_sched_priority priority;
struct amdgpu_bo_list_entry *e;
struct amdgpu_job *job;
uint64_t seq;
@ -1258,7 +1257,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
trace_amdgpu_cs_ioctl(job);
amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
priority = job->base.s_priority;
drm_sched_entity_push_job(&job->base, entity);
amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);

View File

@ -68,6 +68,7 @@
#include <linux/suspend.h>
#include <drm/task_barrier.h>
#include <linux/pm_runtime.h>
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
@ -254,6 +255,32 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
uint32_t hi = ~0;
uint64_t last;
#ifdef CONFIG_64BIT
last = min(pos + size, adev->gmc.visible_vram_size);
if (last > pos) {
void __iomem *addr = adev->mman.aper_base_kaddr + pos;
size_t count = last - pos;
if (write) {
memcpy_toio(addr, buf, count);
mb();
amdgpu_asic_flush_hdp(adev, NULL);
} else {
amdgpu_asic_invalidate_hdp(adev, NULL);
mb();
memcpy_fromio(buf, addr, count);
}
if (count == size)
return;
pos += count;
buf += count / 4;
size -= count;
}
#endif
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
for (last = pos + size; pos < last; pos += 4) {
uint32_t tmp = pos >> 31;
@ -2891,6 +2918,14 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
return ret;
}
static const struct attribute *amdgpu_dev_attributes[] = {
&dev_attr_product_name.attr,
&dev_attr_product_number.attr,
&dev_attr_serial_number.attr,
&dev_attr_pcie_replay_count.attr,
NULL
};
/**
* amdgpu_device_init - initialize the driver
*
@ -3240,27 +3275,9 @@ fence_driver_init:
queue_delayed_work(system_wq, &adev->delayed_init_work,
msecs_to_jiffies(AMDGPU_RESUME_MS));
r = device_create_file(adev->dev, &dev_attr_pcie_replay_count);
r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
if (r) {
dev_err(adev->dev, "Could not create pcie_replay_count");
return r;
}
r = device_create_file(adev->dev, &dev_attr_product_name);
if (r) {
dev_err(adev->dev, "Could not create product_name");
return r;
}
r = device_create_file(adev->dev, &dev_attr_product_number);
if (r) {
dev_err(adev->dev, "Could not create product_number");
return r;
}
r = device_create_file(adev->dev, &dev_attr_serial_number);
if (r) {
dev_err(adev->dev, "Could not create serial_number");
dev_err(adev->dev, "Could not create amdgpu device attr\n");
return r;
}
@ -3343,12 +3360,10 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->rmmio = NULL;
amdgpu_device_doorbell_fini(adev);
device_remove_file(adev->dev, &dev_attr_pcie_replay_count);
if (adev->ucode_sysfs_en)
amdgpu_ucode_sysfs_fini(adev);
device_remove_file(adev->dev, &dev_attr_product_name);
device_remove_file(adev->dev, &dev_attr_product_number);
device_remove_file(adev->dev, &dev_attr_serial_number);
sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
if (IS_ENABLED(CONFIG_PERF_EVENTS))
amdgpu_pmu_fini(adev);
if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
@ -4116,6 +4131,64 @@ static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
mutex_unlock(&adev->lock_reset);
}
static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
{
struct pci_dev *p = NULL;
p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
adev->pdev->bus->number, 1);
if (p) {
pm_runtime_enable(&(p->dev));
pm_runtime_resume(&(p->dev));
}
}
static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
{
enum amd_reset_method reset_method;
struct pci_dev *p = NULL;
u64 expires;
/*
* For now, only BACO and mode1 reset are confirmed
* to suffer the audio issue without proper suspended.
*/
reset_method = amdgpu_asic_reset_method(adev);
if ((reset_method != AMD_RESET_METHOD_BACO) &&
(reset_method != AMD_RESET_METHOD_MODE1))
return -EINVAL;
p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
adev->pdev->bus->number, 1);
if (!p)
return -ENODEV;
expires = pm_runtime_autosuspend_expiration(&(p->dev));
if (!expires)
/*
* If we cannot get the audio device autosuspend delay,
* a fixed 4S interval will be used. Considering 3S is
* the audio controller default autosuspend delay setting.
* 4S used here is guaranteed to cover that.
*/
expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
while (!pm_runtime_status_suspended(&(p->dev))) {
if (!pm_runtime_suspend(&(p->dev)))
break;
if (expires < ktime_get_mono_fast_ns()) {
dev_warn(adev->dev, "failed to suspend display audio\n");
/* TODO: abort the succeeding gpu reset? */
return -ETIMEDOUT;
}
}
pm_runtime_disable(&(p->dev));
return 0;
}
/**
* amdgpu_device_gpu_recover - reset the asic and recover scheduler
*
@ -4140,6 +4213,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
bool use_baco =
(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) ?
true : false;
bool audio_suspended = false;
/*
* Flush RAM to disk so that after reboot
@ -4197,6 +4271,19 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
return 0;
}
/*
* Try to put the audio codec into suspend state
* before gpu reset started.
*
* Due to the power domain of the graphics device
* is shared with AZ power domain. Without this,
* we may change the audio hardware from behind
* the audio driver's back. That will trigger
* some audio codec errors.
*/
if (!amdgpu_device_suspend_display_audio(tmp_adev))
audio_suspended = true;
amdgpu_ras_set_error_query_ready(tmp_adev, false);
cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
@ -4309,6 +4396,8 @@ skip_sched_resume:
/*unlock kfd: SRIOV would do it separately */
if (!(in_ras_intr && !use_baco) && !amdgpu_sriov_vf(tmp_adev))
amdgpu_amdkfd_post_reset(tmp_adev);
if (audio_suspended)
amdgpu_device_resume_display_audio(tmp_adev);
amdgpu_device_unlock_adev(tmp_adev);
}

View File

@ -52,9 +52,6 @@ struct amdgpu_df_funcs {
uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val);
void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val,
uint32_t ficadl_val, uint32_t ficadh_val);
uint64_t (*get_dram_base_addr)(struct amdgpu_device *adev,
uint32_t df_inst);
uint32_t (*get_df_inst_id)(struct amdgpu_device *adev);
};
struct amdgpu_df {

View File

@ -133,9 +133,10 @@ static int hw_id_map[MAX_HWIP] = {
static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *binary)
{
uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
uint64_t pos = vram_size - DISCOVERY_TMR_SIZE;
uint64_t pos = vram_size - adev->discovery_tmr_size;
amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, DISCOVERY_TMR_SIZE, false);
amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
adev->discovery_tmr_size, false);
return 0;
}
@ -167,17 +168,18 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
uint16_t checksum;
int r;
adev->discovery = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL);
if (!adev->discovery)
adev->discovery_tmr_size = DISCOVERY_TMR_SIZE;
adev->discovery_bin = kzalloc(adev->discovery_tmr_size, GFP_KERNEL);
if (!adev->discovery_bin)
return -ENOMEM;
r = amdgpu_discovery_read_binary(adev, adev->discovery);
r = amdgpu_discovery_read_binary(adev, adev->discovery_bin);
if (r) {
DRM_ERROR("failed to read ip discovery binary\n");
goto out;
}
bhdr = (struct binary_header *)adev->discovery;
bhdr = (struct binary_header *)adev->discovery_bin;
if (le32_to_cpu(bhdr->binary_signature) != BINARY_SIGNATURE) {
DRM_ERROR("invalid ip discovery binary signature\n");
@ -190,7 +192,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
size = bhdr->binary_size - offset;
checksum = bhdr->binary_checksum;
if (!amdgpu_discovery_verify_checksum(adev->discovery + offset,
if (!amdgpu_discovery_verify_checksum(adev->discovery_bin + offset,
size, checksum)) {
DRM_ERROR("invalid ip discovery binary checksum\n");
r = -EINVAL;
@ -200,7 +202,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
info = &bhdr->table_list[IP_DISCOVERY];
offset = le16_to_cpu(info->offset);
checksum = le16_to_cpu(info->checksum);
ihdr = (struct ip_discovery_header *)(adev->discovery + offset);
ihdr = (struct ip_discovery_header *)(adev->discovery_bin + offset);
if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
DRM_ERROR("invalid ip discovery data table signature\n");
@ -208,7 +210,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
}
if (!amdgpu_discovery_verify_checksum(adev->discovery + offset,
if (!amdgpu_discovery_verify_checksum(adev->discovery_bin + offset,
ihdr->size, checksum)) {
DRM_ERROR("invalid ip discovery data table checksum\n");
r = -EINVAL;
@ -218,9 +220,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
info = &bhdr->table_list[GC];
offset = le16_to_cpu(info->offset);
checksum = le16_to_cpu(info->checksum);
ghdr = (struct gpu_info_header *)(adev->discovery + offset);
ghdr = (struct gpu_info_header *)(adev->discovery_bin + offset);
if (!amdgpu_discovery_verify_checksum(adev->discovery + offset,
if (!amdgpu_discovery_verify_checksum(adev->discovery_bin + offset,
ghdr->size, checksum)) {
DRM_ERROR("invalid gc data table checksum\n");
r = -EINVAL;
@ -230,16 +232,16 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
return 0;
out:
kfree(adev->discovery);
adev->discovery = NULL;
kfree(adev->discovery_bin);
adev->discovery_bin = NULL;
return r;
}
void amdgpu_discovery_fini(struct amdgpu_device *adev)
{
kfree(adev->discovery);
adev->discovery = NULL;
kfree(adev->discovery_bin);
adev->discovery_bin = NULL;
}
int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
@ -263,8 +265,8 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
return r;
}
bhdr = (struct binary_header *)adev->discovery;
ihdr = (struct ip_discovery_header *)(adev->discovery +
bhdr = (struct binary_header *)adev->discovery_bin;
ihdr = (struct ip_discovery_header *)(adev->discovery_bin +
le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
num_dies = le16_to_cpu(ihdr->num_dies);
@ -272,7 +274,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
for (i = 0; i < num_dies; i++) {
die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
dhdr = (struct die_header *)(adev->discovery + die_offset);
dhdr = (struct die_header *)(adev->discovery_bin + die_offset);
num_ips = le16_to_cpu(dhdr->num_ips);
ip_offset = die_offset + sizeof(*dhdr);
@ -286,7 +288,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
le16_to_cpu(dhdr->die_id), num_ips);
for (j = 0; j < num_ips; j++) {
ip = (struct ip *)(adev->discovery + ip_offset);
ip = (struct ip *)(adev->discovery_bin + ip_offset);
num_base_address = ip->num_base_address;
DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
@ -335,24 +337,24 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
uint16_t num_ips;
int i, j;
if (!adev->discovery) {
if (!adev->discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL;
}
bhdr = (struct binary_header *)adev->discovery;
ihdr = (struct ip_discovery_header *)(adev->discovery +
bhdr = (struct binary_header *)adev->discovery_bin;
ihdr = (struct ip_discovery_header *)(adev->discovery_bin +
le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
num_dies = le16_to_cpu(ihdr->num_dies);
for (i = 0; i < num_dies; i++) {
die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
dhdr = (struct die_header *)(adev->discovery + die_offset);
dhdr = (struct die_header *)(adev->discovery_bin + die_offset);
num_ips = le16_to_cpu(dhdr->num_ips);
ip_offset = die_offset + sizeof(*dhdr);
for (j = 0; j < num_ips; j++) {
ip = (struct ip *)(adev->discovery + ip_offset);
ip = (struct ip *)(adev->discovery_bin + ip_offset);
if (le16_to_cpu(ip->hw_id) == hw_id) {
if (major)
@ -375,13 +377,13 @@ int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
struct binary_header *bhdr;
struct gc_info_v1_0 *gc_info;
if (!adev->discovery) {
if (!adev->discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL;
}
bhdr = (struct binary_header *)adev->discovery;
gc_info = (struct gc_info_v1_0 *)(adev->discovery +
bhdr = (struct binary_header *)adev->discovery_bin;
gc_info = (struct gc_info_v1_0 *)(adev->discovery_bin +
le16_to_cpu(bhdr->table_list[GC].offset));
adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->gc_num_se);

View File

@ -1174,14 +1174,6 @@ static int amdgpu_pmops_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
/* GPU comes up enabled by the bios on resume */
if (amdgpu_device_supports_boco(drm_dev) ||
amdgpu_device_supports_baco(drm_dev)) {
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
}
return amdgpu_device_resume(drm_dev, true);
}
@ -1191,7 +1183,9 @@ static int amdgpu_pmops_freeze(struct device *dev)
struct amdgpu_device *adev = drm_dev->dev_private;
int r;
adev->in_hibernate = true;
r = amdgpu_device_suspend(drm_dev, true);
adev->in_hibernate = false;
if (r)
return r;
return amdgpu_asic_reset(adev);

View File

@ -133,8 +133,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
u32 cpp;
u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_VRAM_CLEARED |
AMDGPU_GEM_CREATE_CPU_GTT_USWC;
AMDGPU_GEM_CREATE_VRAM_CLEARED;
info = drm_get_format_info(adev->ddev, mode_cmd);
cpp = info->cpp[0];

View File

@ -48,7 +48,7 @@ int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
return bit;
}
void amdgpu_gfx_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
int *mec, int *pipe, int *queue)
{
*queue = bit % adev->gfx.mec.num_queue_per_pipe;
@ -274,7 +274,7 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap))
continue;
amdgpu_gfx_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
/*
* 1. Using pipes 2/3 from MEC 2 seems cause problems.
@ -485,6 +485,19 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
return amdgpu_ring_test_helper(kiq_ring);
}
int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
int queue_bit)
{
int mec, pipe, queue;
int set_resource_bit = 0;
amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
set_resource_bit = mec * 4 * 8 + pipe * 8 + queue;
return set_resource_bit;
}
int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
{
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
@ -507,7 +520,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
break;
}
queue_mask |= (1ull << i);
queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i));
}
DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,

View File

@ -364,7 +364,7 @@ void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev);
int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
int pipe, int queue);
void amdgpu_gfx_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
int *mec, int *pipe, int *queue);
bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int mec,
int pipe, int queue);

View File

@ -189,10 +189,12 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n");
if (adev->runpm) {
dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
/* only need to skip on ATPX */
if (amdgpu_device_supports_boco(dev) &&
!amdgpu_is_atpx_hybrid())
dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
pm_runtime_set_active(dev->dev);
pm_runtime_allow(dev->dev);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);

View File

@ -664,6 +664,121 @@ int psp_xgmi_initialize(struct psp_context *psp)
return ret;
}
int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
{
struct ta_xgmi_shared_memory *xgmi_cmd;
int ret;
xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
/* Invoke xgmi ta to get hive id */
ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
if (ret)
return ret;
*hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
return 0;
}
int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
{
struct ta_xgmi_shared_memory *xgmi_cmd;
int ret;
xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
/* Invoke xgmi ta to get the node id */
ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
if (ret)
return ret;
*node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
return 0;
}
int psp_xgmi_get_topology_info(struct psp_context *psp,
int number_devices,
struct psp_xgmi_topology_info *topology)
{
struct ta_xgmi_shared_memory *xgmi_cmd;
struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
int i;
int ret;
if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
return -EINVAL;
xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
/* Fill in the shared memory with topology information as input */
topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO;
topology_info_input->num_nodes = number_devices;
for (i = 0; i < topology_info_input->num_nodes; i++) {
topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
}
/* Invoke xgmi ta to get the topology information */
ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO);
if (ret)
return ret;
/* Read the output topology information from the shared memory */
topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
for (i = 0; i < topology->num_nodes; i++) {
topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled;
topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine;
}
return 0;
}
int psp_xgmi_set_topology_info(struct psp_context *psp,
int number_devices,
struct psp_xgmi_topology_info *topology)
{
struct ta_xgmi_shared_memory *xgmi_cmd;
struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
int i;
if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
return -EINVAL;
xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
topology_info_input->num_nodes = number_devices;
for (i = 0; i < topology_info_input->num_nodes; i++) {
topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
topology_info_input->nodes[i].is_sharing_enabled = 1;
topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
}
/* Invoke xgmi ta to set topology information */
return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
}
// ras begin
static int psp_ras_init_shared_buf(struct psp_context *psp)
{
@ -746,13 +861,40 @@ static int psp_ras_unload(struct psp_context *psp)
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
{
struct ta_ras_shared_memory *ras_cmd;
int ret;
ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
/*
* TODO: bypass the loading in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
return psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id);
ret = psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id);
if (amdgpu_ras_intr_triggered())
return ret;
if (ras_cmd->if_version > RAS_TA_HOST_IF_VER)
{
DRM_WARN("RAS: Unsupported Interface");
return -EINVAL;
}
if (!ret) {
if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
dev_warn(psp->adev->dev, "ECC switch disabled\n");
ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
}
else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
dev_warn(psp->adev->dev,
"RAS internal register access blocked\n");
}
return ret;
}
int psp_ras_enable_features(struct psp_context *psp,
@ -836,6 +978,33 @@ static int psp_ras_initialize(struct psp_context *psp)
return 0;
}
int psp_ras_trigger_error(struct psp_context *psp,
struct ta_ras_trigger_error_input *info)
{
struct ta_ras_shared_memory *ras_cmd;
int ret;
if (!psp->ras.ras_initialized)
return -EINVAL;
ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
ras_cmd->ras_in_message.trigger_error = *info;
ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
if (ret)
return -EINVAL;
/* If err_event_athub occurs error inject was successful, however
return status from TA is no long reliable */
if (amdgpu_ras_intr_triggered())
return 0;
return ras_cmd->ras_status;
}
// ras end
// HDCP start
@ -1477,7 +1646,7 @@ static int psp_np_fw_load(struct psp_context *psp)
/* Start rlc autoload after psp recieved all the gfx firmware */
if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) {
ret = psp_rlc_autoload(psp);
ret = psp_rlc_autoload_start(psp);
if (ret) {
DRM_ERROR("Failed to start rlc autoload\n");
return ret;

View File

@ -95,16 +95,6 @@ struct psp_funcs
enum psp_ring_type ring_type);
bool (*smu_reload_quirk)(struct psp_context *psp);
int (*mode1_reset)(struct psp_context *psp);
int (*xgmi_get_node_id)(struct psp_context *psp, uint64_t *node_id);
int (*xgmi_get_hive_id)(struct psp_context *psp, uint64_t *hive_id);
int (*xgmi_get_topology_info)(struct psp_context *psp, int number_devices,
struct psp_xgmi_topology_info *topology);
int (*xgmi_set_topology_info)(struct psp_context *psp, int number_devices,
struct psp_xgmi_topology_info *topology);
int (*ras_trigger_error)(struct psp_context *psp,
struct ta_ras_trigger_error_input *info);
int (*ras_cure_posion)(struct psp_context *psp, uint64_t *mode_ptr);
int (*rlc_autoload_start)(struct psp_context *psp);
int (*mem_training_init)(struct psp_context *psp);
void (*mem_training_fini)(struct psp_context *psp);
int (*mem_training)(struct psp_context *psp, uint32_t ops);
@ -316,18 +306,6 @@ struct amdgpu_psp_funcs {
((psp)->funcs->smu_reload_quirk ? (psp)->funcs->smu_reload_quirk((psp)) : false)
#define psp_mode1_reset(psp) \
((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false)
#define psp_xgmi_get_node_id(psp, node_id) \
((psp)->funcs->xgmi_get_node_id ? (psp)->funcs->xgmi_get_node_id((psp), (node_id)) : -EINVAL)
#define psp_xgmi_get_hive_id(psp, hive_id) \
((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp), (hive_id)) : -EINVAL)
#define psp_xgmi_get_topology_info(psp, num_device, topology) \
((psp)->funcs->xgmi_get_topology_info ? \
(psp)->funcs->xgmi_get_topology_info((psp), (num_device), (topology)) : -EINVAL)
#define psp_xgmi_set_topology_info(psp, num_device, topology) \
((psp)->funcs->xgmi_set_topology_info ? \
(psp)->funcs->xgmi_set_topology_info((psp), (num_device), (topology)) : -EINVAL)
#define psp_rlc_autoload(psp) \
((psp)->funcs->rlc_autoload_start ? (psp)->funcs->rlc_autoload_start((psp)) : 0)
#define psp_mem_training_init(psp) \
((psp)->funcs->mem_training_init ? (psp)->funcs->mem_training_init((psp)) : 0)
#define psp_mem_training_fini(psp) \
@ -335,13 +313,6 @@ struct amdgpu_psp_funcs {
#define psp_mem_training(psp, ops) \
((psp)->funcs->mem_training ? (psp)->funcs->mem_training((psp), (ops)) : 0)
#define psp_ras_trigger_error(psp, info) \
((psp)->funcs->ras_trigger_error ? \
(psp)->funcs->ras_trigger_error((psp), (info)) : -EINVAL)
#define psp_ras_cure_posion(psp, addr) \
((psp)->funcs->ras_cure_posion ? \
(psp)->funcs->ras_cure_posion(psp, (addr)) : -EINVAL)
#define psp_ring_get_wptr(psp) (psp)->funcs->ring_get_wptr((psp))
#define psp_ring_set_wptr(psp, value) (psp)->funcs->ring_set_wptr((psp), (value))
@ -369,10 +340,21 @@ int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
int psp_xgmi_initialize(struct psp_context *psp);
int psp_xgmi_terminate(struct psp_context *psp);
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id);
int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id);
int psp_xgmi_get_topology_info(struct psp_context *psp,
int number_devices,
struct psp_xgmi_topology_info *topology);
int psp_xgmi_set_topology_info(struct psp_context *psp,
int number_devices,
struct psp_xgmi_topology_info *topology);
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_ras_enable_features(struct psp_context *psp,
union ta_ras_cmd_input *info, bool enable);
int psp_ras_trigger_error(struct psp_context *psp,
struct ta_ras_trigger_error_input *info);
int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id);

View File

@ -502,6 +502,29 @@ struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
}
/* obj end */
void amdgpu_ras_parse_status_code(struct amdgpu_device* adev,
const char* invoke_type,
const char* block_name,
enum ta_ras_status ret)
{
switch (ret) {
case TA_RAS_STATUS__SUCCESS:
return;
case TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE:
dev_warn(adev->dev,
"RAS WARN: %s %s currently unavailable\n",
invoke_type,
block_name);
break;
default:
dev_err(adev->dev,
"RAS ERROR: %s %s error failed ret 0x%X\n",
invoke_type,
block_name,
ret);
}
}
/* feature ctl begin */
static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
struct ras_common_if *head)
@ -565,19 +588,23 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
struct ras_common_if *head, bool enable)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
union ta_ras_cmd_input info;
union ta_ras_cmd_input *info;
int ret;
if (!con)
return -EINVAL;
info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
if (!info)
return -ENOMEM;
if (!enable) {
info.disable_features = (struct ta_ras_disable_features_input) {
info->disable_features = (struct ta_ras_disable_features_input) {
.block_id = amdgpu_ras_block_to_ta(head->block),
.error_type = amdgpu_ras_error_to_ta(head->type),
};
} else {
info.enable_features = (struct ta_ras_enable_features_input) {
info->enable_features = (struct ta_ras_enable_features_input) {
.block_id = amdgpu_ras_block_to_ta(head->block),
.error_type = amdgpu_ras_error_to_ta(head->type),
};
@ -586,27 +613,33 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
/* Do not enable if it is not allowed. */
WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
/* Are we alerady in that state we are going to set? */
if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
return 0;
if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head))) {
ret = 0;
goto out;
}
if (!amdgpu_ras_intr_triggered()) {
ret = psp_ras_enable_features(&adev->psp, &info, enable);
ret = psp_ras_enable_features(&adev->psp, info, enable);
if (ret) {
dev_err(adev->dev, "RAS ERROR: %s %s feature "
"failed ret %d\n",
enable ? "enable":"disable",
ras_block_str(head->block),
ret);
amdgpu_ras_parse_status_code(adev,
enable ? "enable":"disable",
ras_block_str(head->block),
(enum ta_ras_status)ret);
if (ret == TA_RAS_STATUS__RESET_NEEDED)
return -EAGAIN;
return -EINVAL;
ret = -EAGAIN;
else
ret = -EINVAL;
goto out;
}
}
/* setup the obj */
__amdgpu_ras_feature_enable(adev, head, enable);
return 0;
ret = 0;
out:
kfree(info);
return ret;
}
/* Only used in device probe stage and called only once. */
@ -821,10 +854,10 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
ret = -EINVAL;
}
if (ret)
dev_err(adev->dev, "RAS ERROR: inject %s error failed ret %d\n",
ras_block_str(info->head.block),
ret);
amdgpu_ras_parse_status_code(adev,
"inject",
ras_block_str(info->head.block),
(enum ta_ras_status)ret);
return ret;
}

View File

@ -1957,17 +1957,19 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r;
/*
* reserve one TMR (64K) memory at the top of VRAM which holds
* reserve TMR memory at the top of VRAM which holds
* IP Discovery data and is protected by PSP.
*/
r = amdgpu_bo_create_kernel_at(adev,
adev->gmc.real_vram_size - DISCOVERY_TMR_SIZE,
DISCOVERY_TMR_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->discovery_memory,
NULL);
if (r)
return r;
if (adev->discovery_tmr_size > 0) {
r = amdgpu_bo_create_kernel_at(adev,
adev->gmc.real_vram_size - adev->discovery_tmr_size,
adev->discovery_tmr_size,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->discovery_memory,
NULL);
if (r)
return r;
}
DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
(unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));

View File

@ -149,6 +149,15 @@ static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO,
static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO,
amdgpu_mem_info_vram_vendor, NULL);
static const struct attribute *amdgpu_vram_mgr_attributes[] = {
&dev_attr_mem_info_vram_total.attr,
&dev_attr_mem_info_vis_vram_total.attr,
&dev_attr_mem_info_vram_used.attr,
&dev_attr_mem_info_vis_vram_used.attr,
&dev_attr_mem_info_vram_vendor.attr,
NULL
};
/**
* amdgpu_vram_mgr_init - init VRAM manager and DRM MM
*
@ -173,31 +182,9 @@ static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
man->priv = mgr;
/* Add the two VRAM-related sysfs files */
ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_total);
if (ret) {
DRM_ERROR("Failed to create device file mem_info_vram_total\n");
return ret;
}
ret = device_create_file(adev->dev, &dev_attr_mem_info_vis_vram_total);
if (ret) {
DRM_ERROR("Failed to create device file mem_info_vis_vram_total\n");
return ret;
}
ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_used);
if (ret) {
DRM_ERROR("Failed to create device file mem_info_vram_used\n");
return ret;
}
ret = device_create_file(adev->dev, &dev_attr_mem_info_vis_vram_used);
if (ret) {
DRM_ERROR("Failed to create device file mem_info_vis_vram_used\n");
return ret;
}
ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_vendor);
if (ret) {
DRM_ERROR("Failed to create device file mem_info_vram_vendor\n");
return ret;
}
ret = sysfs_create_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes);
if (ret)
DRM_ERROR("Failed to register sysfs\n");
return 0;
}
@ -220,11 +207,7 @@ static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
spin_unlock(&mgr->lock);
kfree(mgr);
man->priv = NULL;
device_remove_file(adev->dev, &dev_attr_mem_info_vram_total);
device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_total);
device_remove_file(adev->dev, &dev_attr_mem_info_vram_used);
device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_used);
device_remove_file(adev->dev, &dev_attr_mem_info_vram_vendor);
sysfs_remove_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes);
return 0;
}

View File

@ -649,31 +649,8 @@ void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
uint64_t addr)
{
uint32_t df_inst_id;
uint64_t dram_base_addr = 0;
const struct amdgpu_df_funcs *df_funcs = adev->df.funcs;
if ((!df_funcs) ||
(!df_funcs->get_df_inst_id) ||
(!df_funcs->get_dram_base_addr)) {
dev_warn(adev->dev,
"XGMI: relative phy_addr algorithm is not supported\n");
return addr;
}
if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) {
dev_warn(adev->dev,
"failed to disable DF-Cstate, DF register may not be accessible\n");
return addr;
}
df_inst_id = df_funcs->get_df_inst_id(adev);
dram_base_addr = df_funcs->get_dram_base_addr(adev, df_inst_id);
if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
dev_warn(adev->dev, "failed to enable DF-Cstate\n");
return addr + dram_base_addr;
struct amdgpu_xgmi *xgmi = &adev->gmc.xgmi;
return (addr + xgmi->physical_node_id * xgmi->node_segment_size);
}
static void pcs_clear_status(struct amdgpu_device *adev, uint32_t pcs_status_reg)

View File

@ -686,58 +686,6 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
}
}
static uint64_t df_v3_6_get_dram_base_addr(struct amdgpu_device *adev,
uint32_t df_inst)
{
uint32_t base_addr_reg_val = 0;
uint64_t base_addr = 0;
base_addr_reg_val = RREG32_PCIE(smnDF_CS_UMC_AON0_DramBaseAddress0 +
df_inst * DF_3_6_SMN_REG_INST_DIST);
if (REG_GET_FIELD(base_addr_reg_val,
DF_CS_UMC_AON0_DramBaseAddress0,
AddrRngVal) == 0) {
DRM_WARN("address range not valid");
return 0;
}
base_addr = REG_GET_FIELD(base_addr_reg_val,
DF_CS_UMC_AON0_DramBaseAddress0,
DramBaseAddr);
return base_addr << 28;
}
static uint32_t df_v3_6_get_df_inst_id(struct amdgpu_device *adev)
{
uint32_t xgmi_node_id = 0;
uint32_t df_inst_id = 0;
/* Walk through DF dst nodes to find current XGMI node */
for (df_inst_id = 0; df_inst_id < DF_3_6_INST_CNT; df_inst_id++) {
xgmi_node_id = RREG32_PCIE(smnDF_CS_UMC_AON0_DramLimitAddress0 +
df_inst_id * DF_3_6_SMN_REG_INST_DIST);
xgmi_node_id = REG_GET_FIELD(xgmi_node_id,
DF_CS_UMC_AON0_DramLimitAddress0,
DstFabricID);
/* TODO: establish reason dest fabric id is offset by 7 */
xgmi_node_id = xgmi_node_id >> 7;
if (adev->gmc.xgmi.physical_node_id == xgmi_node_id)
break;
}
if (df_inst_id == DF_3_6_INST_CNT) {
DRM_WARN("cant match df dst id with gpu node");
return 0;
}
return df_inst_id;
}
const struct amdgpu_df_funcs df_v3_6_funcs = {
.sw_init = df_v3_6_sw_init,
.sw_fini = df_v3_6_sw_fini,
@ -752,6 +700,4 @@ const struct amdgpu_df_funcs df_v3_6_funcs = {
.pmc_get_count = df_v3_6_pmc_get_count,
.get_fica = df_v3_6_get_fica,
.set_fica = df_v3_6_set_fica,
.get_dram_base_addr = df_v3_6_get_dram_base_addr,
.get_df_inst_id = df_v3_6_get_df_inst_id
};

View File

@ -4577,11 +4577,13 @@ static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
/* csib */
WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_HI,
adev->gfx.rlc.clear_state_gpu_addr >> 32);
WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_LO,
adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
/* amdgpu_mm_wreg_mmio_rlc will fall back to mmio if doesn't support rlcg_write */
amdgpu_mm_wreg_mmio_rlc(adev, SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
adev->gfx.rlc.clear_state_gpu_addr >> 32, 0);
amdgpu_mm_wreg_mmio_rlc(adev, SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc, 0);
amdgpu_mm_wreg_mmio_rlc(adev, SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
adev->gfx.rlc.clear_state_size, 0);
return 0;
}
@ -5190,7 +5192,7 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
amdgpu_mm_wreg_mmio_rlc(adev, SOC15_REG_OFFSET(GC, 0, mmCP_ME_CNTL), tmp, 0);
for (i = 0; i < adev->usec_timeout; i++) {
if (RREG32_SOC15(GC, 0, mmCP_STAT) == 0)
@ -6540,14 +6542,16 @@ static int gfx_v10_0_hw_init(void *handle)
* loaded firstly, so in direct type, it has to load smc ucode
* here before rlc.
*/
r = smu_load_microcode(&adev->smu);
if (r)
return r;
if (adev->smu.ppt_funcs != NULL) {
r = smu_load_microcode(&adev->smu);
if (r)
return r;
r = smu_check_fw_status(&adev->smu);
if (r) {
pr_err("SMC firmware status is not correct\n");
return r;
r = smu_check_fw_status(&adev->smu);
if (r) {
pr_err("SMC firmware status is not correct\n");
return r;
}
}
}
@ -7011,7 +7015,7 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
/* === CGCG /CGLS for GFX 3D Only === */
gfx_v10_0_update_3d_clock_gating(adev, enable);
/* === MGCG + MGLS === */
/* gfx_v10_0_update_medium_grain_clock_gating(adev, enable); */
gfx_v10_0_update_medium_grain_clock_gating(adev, enable);
}
if (adev->cg_flags &
@ -7086,6 +7090,20 @@ static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = {
.reset = gfx_v10_0_rlc_reset,
.start = gfx_v10_0_rlc_start,
.update_spm_vmid = gfx_v10_0_update_spm_vmid,
};
static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs_sriov = {
.is_rlc_enabled = gfx_v10_0_is_rlc_enabled,
.set_safe_mode = gfx_v10_0_set_safe_mode,
.unset_safe_mode = gfx_v10_0_unset_safe_mode,
.init = gfx_v10_0_rlc_init,
.get_csb_size = gfx_v10_0_get_csb_size,
.get_csb_buffer = gfx_v10_0_get_csb_buffer,
.resume = gfx_v10_0_rlc_resume,
.stop = gfx_v10_0_rlc_stop,
.reset = gfx_v10_0_rlc_reset,
.start = gfx_v10_0_rlc_start,
.update_spm_vmid = gfx_v10_0_update_spm_vmid,
.rlcg_wreg = gfx_v10_rlcg_wreg,
.is_rlcg_access_range = gfx_v10_0_is_rlcg_access_range,
};
@ -7102,11 +7120,7 @@ static int gfx_v10_0_set_powergating_state(void *handle,
switch (adev->asic_type) {
case CHIP_NAVI10:
case CHIP_NAVI14:
if (!enable) {
amdgpu_gfx_off_ctrl(adev, false);
cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
} else
amdgpu_gfx_off_ctrl(adev, true);
amdgpu_gfx_off_ctrl(adev, enable);
break;
default:
break;
@ -7672,6 +7686,19 @@ static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
ref, mask);
}
static void gfx_v10_0_ring_soft_recovery(struct amdgpu_ring *ring,
unsigned vmid)
{
struct amdgpu_device *adev = ring->adev;
uint32_t value = 0;
value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
WREG32_SOC15(GC, 0, mmSQ_CMD, value);
}
static void
gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
uint32_t me, uint32_t pipe,
@ -8063,6 +8090,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
.emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v10_0_ring_soft_recovery,
};
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
@ -8183,9 +8211,11 @@ static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_NAVI12:
adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs;
break;
case CHIP_NAVI12:
adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs_sriov;
break;
default:
break;
}

View File

@ -507,8 +507,8 @@ static const struct soc15_reg_golden golden_settings_gc_9_0[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x00ffff87),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x00ffff8f),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
@ -1233,6 +1233,8 @@ static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
/* https://bugzilla.kernel.org/show_bug.cgi?id=207171 */
{ 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
/* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */
{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
{ 0, 0, 0, 0, 0 },
};
@ -5039,10 +5041,9 @@ static int gfx_v9_0_set_powergating_state(void *handle,
switch (adev->asic_type) {
case CHIP_RAVEN:
case CHIP_RENOIR:
if (!enable) {
if (!enable)
amdgpu_gfx_off_ctrl(adev, false);
cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
}
if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
@ -5066,12 +5067,7 @@ static int gfx_v9_0_set_powergating_state(void *handle,
amdgpu_gfx_off_ctrl(adev, true);
break;
case CHIP_VEGA12:
if (!enable) {
amdgpu_gfx_off_ctrl(adev, false);
cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
} else {
amdgpu_gfx_off_ctrl(adev, true);
}
amdgpu_gfx_off_ctrl(adev, enable);
break;
default:
break;

View File

@ -61,17 +61,6 @@ MODULE_FIRMWARE("amdgpu/si58_mc.bin");
#define MC_SEQ_MISC0__MT__HBM 0x60000000
#define MC_SEQ_MISC0__MT__DDR3 0xB0000000
static const u32 crtc_offsets[6] =
{
SI_CRTC0_REGISTER_OFFSET,
SI_CRTC1_REGISTER_OFFSET,
SI_CRTC2_REGISTER_OFFSET,
SI_CRTC3_REGISTER_OFFSET,
SI_CRTC4_REGISTER_OFFSET,
SI_CRTC5_REGISTER_OFFSET
};
static void gmc_v6_0_mc_stop(struct amdgpu_device *adev)
{
u32 blackout;

View File

@ -762,6 +762,7 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
* @adev: amdgpu_device pointer
* @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
* @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
* @mc_client: VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT register value
*
* Print human readable fault information (CIK).
*/

View File

@ -1005,6 +1005,7 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
* @adev: amdgpu_device pointer
* @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
* @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
* @mc_client: VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT register value
*
* Print human readable fault information (VI).
*/

View File

@ -524,181 +524,6 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp)
return 0;
}
/* TODO: Fill in follow functions once PSP firmware interface for XGMI is ready.
* For now, return success and hack the hive_id so high level code can
* start testing
*/
static int psp_v11_0_xgmi_get_topology_info(struct psp_context *psp,
int number_devices, struct psp_xgmi_topology_info *topology)
{
struct ta_xgmi_shared_memory *xgmi_cmd;
struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
int i;
int ret;
if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
return -EINVAL;
xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
/* Fill in the shared memory with topology information as input */
topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO;
topology_info_input->num_nodes = number_devices;
for (i = 0; i < topology_info_input->num_nodes; i++) {
topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
}
/* Invoke xgmi ta to get the topology information */
ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO);
if (ret)
return ret;
/* Read the output topology information from the shared memory */
topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
for (i = 0; i < topology->num_nodes; i++) {
topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled;
topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine;
}
return 0;
}
static int psp_v11_0_xgmi_set_topology_info(struct psp_context *psp,
int number_devices, struct psp_xgmi_topology_info *topology)
{
struct ta_xgmi_shared_memory *xgmi_cmd;
struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
int i;
if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
return -EINVAL;
xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
topology_info_input->num_nodes = number_devices;
for (i = 0; i < topology_info_input->num_nodes; i++) {
topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
topology_info_input->nodes[i].is_sharing_enabled = 1;
topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
}
/* Invoke xgmi ta to set topology information */
return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
}
static int psp_v11_0_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
{
struct ta_xgmi_shared_memory *xgmi_cmd;
int ret;
xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
/* Invoke xgmi ta to get hive id */
ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
if (ret)
return ret;
*hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
return 0;
}
static int psp_v11_0_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
{
struct ta_xgmi_shared_memory *xgmi_cmd;
int ret;
xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
/* Invoke xgmi ta to get the node id */
ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
if (ret)
return ret;
*node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
return 0;
}
static int psp_v11_0_ras_trigger_error(struct psp_context *psp,
struct ta_ras_trigger_error_input *info)
{
struct ta_ras_shared_memory *ras_cmd;
int ret;
if (!psp->ras.ras_initialized)
return -EINVAL;
ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
ras_cmd->ras_in_message.trigger_error = *info;
ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
if (ret)
return -EINVAL;
/* If err_event_athub occurs error inject was successful, however
return status from TA is no long reliable */
if (amdgpu_ras_intr_triggered())
return 0;
return ras_cmd->ras_status;
}
static int psp_v11_0_ras_cure_posion(struct psp_context *psp, uint64_t *mode_ptr)
{
#if 0
// not support yet.
struct ta_ras_shared_memory *ras_cmd;
int ret;
if (!psp->ras.ras_initialized)
return -EINVAL;
ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
ras_cmd->cmd_id = TA_RAS_COMMAND__CURE_POISON;
ras_cmd->ras_in_message.cure_poison.mode_ptr = mode_ptr;
ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
if (ret)
return -EINVAL;
return ras_cmd->ras_status;
#else
return -EINVAL;
#endif
}
static int psp_v11_0_rlc_autoload_start(struct psp_context *psp)
{
return psp_rlc_autoload_start(psp);
}
static int psp_v11_0_memory_training_send_msg(struct psp_context *psp, int msg)
{
int ret;
@ -995,13 +820,6 @@ static const struct psp_funcs psp_v11_0_funcs = {
.ring_stop = psp_v11_0_ring_stop,
.ring_destroy = psp_v11_0_ring_destroy,
.mode1_reset = psp_v11_0_mode1_reset,
.xgmi_get_topology_info = psp_v11_0_xgmi_get_topology_info,
.xgmi_set_topology_info = psp_v11_0_xgmi_set_topology_info,
.xgmi_get_hive_id = psp_v11_0_xgmi_get_hive_id,
.xgmi_get_node_id = psp_v11_0_xgmi_get_node_id,
.ras_trigger_error = psp_v11_0_ras_trigger_error,
.ras_cure_posion = psp_v11_0_ras_cure_posion,
.rlc_autoload_start = psp_v11_0_rlc_autoload_start,
.mem_training_init = psp_v11_0_memory_training_init,
.mem_training_fini = psp_v11_0_memory_training_fini,
.mem_training = psp_v11_0_memory_training,

View File

@ -24,6 +24,8 @@
#ifndef _TA_RAS_IF_H
#define _TA_RAS_IF_H
#define RAS_TA_HOST_IF_VER 0
/* Responses have bit 31 set */
#define RSP_ID_MASK (1U << 31)
#define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK)
@ -36,18 +38,24 @@ enum ras_command {
TA_RAS_COMMAND__TRIGGER_ERROR,
};
enum ta_ras_status {
TA_RAS_STATUS__SUCCESS = 0x00,
TA_RAS_STATUS__RESET_NEEDED = 0x01,
TA_RAS_STATUS__ERROR_INVALID_PARAMETER = 0x02,
TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE = 0x03,
TA_RAS_STATUS__ERROR_RAS_DUPLICATE_CMD = 0x04,
TA_RAS_STATUS__ERROR_INJECTION_FAILED = 0x05,
TA_RAS_STATUS__ERROR_ASD_READ_WRITE = 0x06,
TA_RAS_STATUS__ERROR_TOGGLE_DF_CSTATE = 0x07,
TA_RAS_STATUS__ERROR_TIMEOUT = 0x08,
TA_RAS_STATUS__ERROR_BLOCK_DISABLED = 0x09,
TA_RAS_STATUS__ERROR_GENERIC = 0x10,
enum ta_ras_status
{
TA_RAS_STATUS__SUCCESS = 0x00,
TA_RAS_STATUS__RESET_NEEDED = 0xA001,
TA_RAS_STATUS__ERROR_INVALID_PARAMETER = 0xA002,
TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE = 0xA003,
TA_RAS_STATUS__ERROR_RAS_DUPLICATE_CMD = 0xA004,
TA_RAS_STATUS__ERROR_INJECTION_FAILED = 0xA005,
TA_RAS_STATUS__ERROR_ASD_READ_WRITE = 0xA006,
TA_RAS_STATUS__ERROR_TOGGLE_DF_CSTATE = 0xA007,
TA_RAS_STATUS__ERROR_TIMEOUT = 0xA008,
TA_RAS_STATUS__ERROR_BLOCK_DISABLED = 0XA009,
TA_RAS_STATUS__ERROR_GENERIC = 0xA00A,
TA_RAS_STATUS__ERROR_RAS_MMHUB_INIT = 0xA00B,
TA_RAS_STATUS__ERROR_GET_DEV_INFO = 0xA00C,
TA_RAS_STATUS__ERROR_UNSUPPORTED_DEV = 0xA00D,
TA_RAS_STATUS__ERROR_NOT_INITIALIZED = 0xA00E,
TA_RAS_STATUS__ERROR_TEE_INTERNAL = 0xA00F
};
enum ta_ras_block {
@ -97,22 +105,39 @@ struct ta_ras_trigger_error_input {
uint64_t value; // method if error injection. i.e persistent, coherent etc.
};
struct ta_ras_output_flags
{
uint8_t ras_init_success_flag;
uint8_t err_inject_switch_disable_flag;
uint8_t reg_access_failure_flag;
};
/* Common input structure for RAS callbacks */
/**********************************************************/
union ta_ras_cmd_input {
struct ta_ras_enable_features_input enable_features;
struct ta_ras_disable_features_input disable_features;
struct ta_ras_trigger_error_input trigger_error;
uint32_t reserve_pad[256];
};
union ta_ras_cmd_output
{
struct ta_ras_output_flags flags;
uint32_t reserve_pad[256];
};
/* Shared Memory structures */
/**********************************************************/
struct ta_ras_shared_memory {
uint32_t cmd_id;
uint32_t resp_id;
enum ta_ras_status ras_status;
uint32_t reserved;
union ta_ras_cmd_input ras_in_message;
uint32_t cmd_id;
uint32_t resp_id;
uint32_t ras_status;
uint32_t if_version;
union ta_ras_cmd_input ras_in_message;
union ta_ras_cmd_output ras_out_message;
};
#endif // TL_RAS_IF_H_

View File

@ -1845,7 +1845,6 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
uint32_t table_size = 0;
struct mmsch_v2_0_cmd_direct_write direct_wt = { {0} };
struct mmsch_v2_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
struct mmsch_v2_0_cmd_direct_polling direct_poll = { {0} };
struct mmsch_v2_0_cmd_end end = { {0} };
struct mmsch_v2_0_init_header *header;
uint32_t *init_table = adev->virt.mm_table.cpu_addr;
@ -1855,8 +1854,6 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
direct_rd_mod_wt.cmd_header.command_type =
MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
direct_poll.cmd_header.command_type =
MMSCH_COMMAND__DIRECT_REG_POLLING;
end.cmd_header.command_type = MMSCH_COMMAND__END;
if (header->vcn_table_offset == 0 && header->vcn_table_size == 0) {

View File

@ -86,7 +86,7 @@ static int vcn_v2_5_early_init(void *handle)
adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING);
harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
adev->vcn.harvest_config |= 1 << i;
}
@ -177,15 +177,15 @@ static int vcn_v2_5_sw_init(void *handle)
adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(UVD, j, mmUVD_SCRATCH9);
adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(VCN, j, mmUVD_SCRATCH9);
adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA0);
adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA0);
adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA1);
adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA1);
adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_CMD);
adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_CMD);
adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(UVD, j, mmUVD_NO_OP);
adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(VCN, j, mmUVD_NO_OP);
ring = &adev->vcn.inst[j].ring_dec;
ring->use_doorbell = true;
@ -399,46 +399,46 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
continue;
/* cache window 0: fw */
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo));
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi));
WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
offset = 0;
} else {
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
lower_32_bits(adev->vcn.inst[i].gpu_addr));
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
upper_32_bits(adev->vcn.inst[i].gpu_addr));
offset = size;
WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0,
AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
}
WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE0, size);
/* cache window 1: stack */
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
/* cache window 2: context */
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
/* non-cache window */
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
lower_32_bits(adev->vcn.inst[i].fw_shared_gpu_addr));
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
upper_32_bits(adev->vcn.inst[i].fw_shared_gpu_addr));
WREG32_SOC15(UVD, i, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
WREG32_SOC15(UVD, i, mmUVD_VCPU_NONCACHE_SIZE0,
WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_SIZE0,
AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
}
}
@ -452,91 +452,91 @@ static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
if (!indirect) {
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
} else {
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
}
offset = 0;
} else {
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
offset = size;
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
VCN, 0, mmUVD_VCPU_CACHE_OFFSET0),
AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
}
if (!indirect)
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
VCN, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
else
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
VCN, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
/* cache window 1: stack */
if (!indirect) {
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
} else {
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
}
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
VCN, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
/* cache window 2: context */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
VCN, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
VCN, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
/* non-cache window */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
VCN, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0),
VCN, 0, mmUVD_VCPU_NONCACHE_SIZE0),
AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
/* VCN global tiling registers */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
VCN, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
}
/**
@ -690,19 +690,19 @@ static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
UVD_CGC_CTRL__VCPU_MODE_MASK |
UVD_CGC_CTRL__MMSCH_MODE_MASK);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
VCN, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
/* turn off clock gating */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
VCN, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
/* turn on SUVD clock gating */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
VCN, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
/* turn on sw mode in UVD_SUVD_CGC_CTRL */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
VCN, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
}
/**
@ -774,13 +774,13 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
uint32_t rb_bufsz, tmp;
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), 1,
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
/* enable dynamic power gating mode */
tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_POWER_STATUS);
tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS);
tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
WREG32_SOC15(UVD, inst_idx, mmUVD_POWER_STATUS, tmp);
WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp);
if (indirect)
adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t*)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
@ -793,11 +793,11 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
/* disable master interupt */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
VCN, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
/* setup mmUVD_LMI_CTRL */
tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
@ -809,28 +809,28 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
0x00100000L);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
VCN, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_MPC_CNTL),
VCN, 0, mmUVD_MPC_CNTL),
0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_MPC_SET_MUXA0),
VCN, 0, mmUVD_MPC_SET_MUXA0),
((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
(0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
(0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_MPC_SET_MUXB0),
VCN, 0, mmUVD_MPC_SET_MUXB0),
((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
(0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
(0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_MPC_SET_MUX),
VCN, 0, mmUVD_MPC_SET_MUX),
((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
@ -838,26 +838,26 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
VCN, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
VCN, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
/* enable LMI MC and UMC channels */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
VCN, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
/* unblock VCPU register access */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
VCN, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
/* enable master interrupt */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_MASTINT_EN),
VCN, 0, mmUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
if (indirect)
@ -873,39 +873,39 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
/* Stall DPG before WPTR/RPTR reset */
WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS),
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
/* set the write pointer delay */
WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
/* set the wb address */
WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
(upper_32_bits(ring->gpu_addr) >> 2));
/* programm the RB_BASE for ring buffer */
WREG32_SOC15(UVD, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
upper_32_bits(ring->gpu_addr));
/* Initialize the ring buffer's read and write pointers */
WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR, 0);
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0);
WREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2, 0);
WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0);
ring->wptr = RREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR);
WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR,
ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR);
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
lower_32_bits(ring->wptr));
fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
/* Unstall DPG */
WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS),
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
return 0;
@ -929,12 +929,12 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
}
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 0,
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
/* set uvd status busy */
tmp = RREG32_SOC15(UVD, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
WREG32_SOC15(UVD, i, mmUVD_STATUS, tmp);
tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
}
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
@ -947,44 +947,44 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i))
continue;
/* enable VCPU clock */
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
/* disable master interrupt */
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), 0,
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
~UVD_MASTINT_EN__VCPU_EN_MASK);
/* setup mmUVD_LMI_CTRL */
tmp = RREG32_SOC15(UVD, i, mmUVD_LMI_CTRL);
tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
tmp &= ~0xff;
WREG32_SOC15(UVD, i, mmUVD_LMI_CTRL, tmp | 0x8|
WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp | 0x8|
UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
/* setup mmUVD_MPC_CNTL */
tmp = RREG32_SOC15(UVD, i, mmUVD_MPC_CNTL);
tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
/* setup UVD_MPC_SET_MUXA0 */
WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXA0,
WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
(0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
(0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
/* setup UVD_MPC_SET_MUXB0 */
WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXB0,
WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
(0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
(0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
/* setup mmUVD_MPC_SET_MUX */
WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUX,
WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
@ -997,27 +997,27 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i))
continue;
/* VCN global tiling registers */
WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG,
WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
adev->gfx.config.gb_addr_config);
WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG,
WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
adev->gfx.config.gb_addr_config);
/* enable LMI MC and UMC channels */
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
/* unblock VCPU register access */
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL), 0,
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
~UVD_VCPU_CNTL__BLK_RST_MASK);
for (k = 0; k < 10; ++k) {
uint32_t status;
for (j = 0; j < 100; ++j) {
status = RREG32_SOC15(UVD, i, mmUVD_STATUS);
status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
if (status & 2)
break;
if (amdgpu_emu_mode == 1)
@ -1030,11 +1030,11 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
break;
DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
UVD_VCPU_CNTL__BLK_RST_MASK,
~UVD_VCPU_CNTL__BLK_RST_MASK);
mdelay(10);
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
~UVD_VCPU_CNTL__BLK_RST_MASK);
mdelay(10);
@ -1047,15 +1047,15 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
}
/* enable master interrupt */
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK,
~UVD_MASTINT_EN__VCPU_EN_MASK);
/* clear the busy bit of VCN_STATUS */
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0,
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_VMID, 0);
WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
ring = &adev->vcn.inst[i].ring_dec;
/* force RBC into idle state */
@ -1065,39 +1065,39 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, tmp);
WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
/* programm the RB_BASE for ring buffer */
WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
upper_32_bits(ring->gpu_addr));
/* Initialize the ring buffer's read and write pointers */
WREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR, 0);
WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
ring->wptr = RREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR);
WREG32_SOC15(UVD, i, mmUVD_RBC_RB_WPTR,
ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
lower_32_bits(ring->wptr));
fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst[i].ring_enc[0];
WREG32_SOC15(UVD, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, i, mmUVD_RB_SIZE, ring->ring_size / 4);
WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst[i].ring_enc[1];
WREG32_SOC15(UVD, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
}
@ -1118,33 +1118,33 @@ static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev,
* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of
* memory descriptor location
*/
WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
/* 2, update vmid of descriptor */
data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID);
data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID);
data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
/* use domain0 for MM scheduler */
data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data);
WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, data);
/* 3, notify mmsch about the size of this descriptor */
WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size);
WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size);
/* 4, set resp to zero */
WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
/*
* 5, kick off the initialization and wait until
* VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
*/
WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
loop = 10;
while ((data & 0x10000002) != 0x10000002) {
udelay(100);
data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
loop--;
if (!loop)
break;
@ -1167,14 +1167,12 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
uint32_t table_size = 0;
struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
struct mmsch_v1_0_cmd_direct_polling direct_poll = { { 0 } };
struct mmsch_v1_0_cmd_end end = { { 0 } };
uint32_t *init_table = adev->virt.mm_table.cpu_addr;
struct mmsch_v1_1_init_header *header = (struct mmsch_v1_1_init_header *)init_table;
direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
end.cmd_header.command_type = MMSCH_COMMAND__END;
header->version = MMSCH_VERSION;
@ -1189,93 +1187,93 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
table_size = 0;
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(
SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
/* mc resume*/
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i,
SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i,
SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
offset = 0;
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
} else {
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i,
SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[i].gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i,
SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[i].gpu_addr));
offset = size;
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0),
AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
}
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0),
SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE0),
size);
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i,
SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i,
SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1),
SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET1),
0);
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1),
SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE1),
AMDGPU_VCN_STACK_SIZE);
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i,
SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[i].gpu_addr + offset +
AMDGPU_VCN_STACK_SIZE));
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i,
SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[i].gpu_addr + offset +
AMDGPU_VCN_STACK_SIZE));
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2),
SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET2),
0);
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE2),
AMDGPU_VCN_CONTEXT_SIZE);
ring = &adev->vcn.inst[i].ring_enc[0];
ring->wptr = 0;
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO),
SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_LO),
lower_32_bits(ring->gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI),
SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_HI),
upper_32_bits(ring->gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE),
SOC15_REG_OFFSET(VCN, i, mmUVD_RB_SIZE),
ring->ring_size / 4);
ring = &adev->vcn.inst[i].ring_dec;
ring->wptr = 0;
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i,
SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
lower_32_bits(ring->gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i,
SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
upper_32_bits(ring->gpu_addr));
@ -1287,7 +1285,7 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
SOC15_REG_OFFSET(VCN, i, mmUVD_RBC_RB_CNTL), tmp);
/* add end packet */
memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
@ -1308,24 +1306,24 @@ static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
uint32_t tmp;
/* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 1,
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
/* wait for read ptr to be equal to write ptr */
tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR);
SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR);
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2);
SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2);
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 1,
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
/* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), 0,
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
return 0;
@ -1369,17 +1367,17 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
return r;
/* block VCPU register access */
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL),
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
/* reset VCPU */
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
UVD_VCPU_CNTL__BLK_RST_MASK,
~UVD_VCPU_CNTL__BLK_RST_MASK);
/* disable VCPU clock */
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
~(UVD_VCPU_CNTL__CLK_EN_MASK));
/* clear status */
@ -1388,7 +1386,7 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
vcn_v2_5_enable_clock_gating(adev);
/* enable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS),
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS),
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
}
@ -1410,11 +1408,11 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
DRM_DEBUG("dpg pause state changed %d -> %d",
adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
reg_data = RREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE) &
reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) &
(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 0x1,
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
if (!ret_code) {
@ -1422,15 +1420,15 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
/* pause DPG */
reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data);
WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
/* wait for ACK */
SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_DPG_PAUSE,
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
/* Stall DPG before WPTR/RPTR reset */
WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS),
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
@ -1438,39 +1436,39 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
ring->wptr = 0;
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst[inst_idx].ring_enc[1];
ring->wptr = 0;
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR,
RREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF);
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
RREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF);
fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
/* Unstall DPG */
WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS),
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS,
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
}
} else {
reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data);
SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 0x1,
WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
}
adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
@ -1490,7 +1488,7 @@ static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR);
}
/**
@ -1507,7 +1505,7 @@ static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
if (ring->use_doorbell)
return adev->wb.wb[ring->wptr_offs];
else
return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR);
}
/**
@ -1522,14 +1520,14 @@ static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
WREG32_SOC15(UVD, ring->me, mmUVD_SCRATCH2,
WREG32_SOC15(VCN, ring->me, mmUVD_SCRATCH2,
lower_32_bits(ring->wptr) | 0x80000000);
if (ring->use_doorbell) {
adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
} else {
WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
}
}
@ -1575,9 +1573,9 @@ static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR);
else
return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2);
}
/**
@ -1595,12 +1593,12 @@ static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring)
if (ring->use_doorbell)
return adev->wb.wb[ring->wptr_offs];
else
return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR);
} else {
if (ring->use_doorbell)
return adev->wb.wb[ring->wptr_offs];
else
return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2);
}
}
@ -1620,14 +1618,14 @@ static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring)
adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
} else {
WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
}
} else {
if (ring->use_doorbell) {
adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
} else {
WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
}
}
}

View File

@ -1323,6 +1323,10 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
goto err_free;
}
/* Update the VRAM usage count */
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + args->size);
mutex_unlock(&p->mutex);
args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
@ -1338,7 +1342,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
return 0;
err_free:
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, NULL);
err_unlock:
mutex_unlock(&p->mutex);
return err;
@ -1352,6 +1356,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
void *mem;
struct kfd_dev *dev;
int ret;
uint64_t size = 0;
dev = kfd_device_by_id(GET_GPU_ID(args->handle));
if (!dev)
@ -1374,7 +1379,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
}
ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd,
(struct kgd_mem *)mem);
(struct kgd_mem *)mem, &size);
/* If freeing the buffer failed, leave the handle in place for
* clean-up during process tear-down.
@ -1383,6 +1388,8 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
kfd_process_device_remove_obj_handle(
pdd, GET_IDR_HANDLE(args->handle));
WRITE_ONCE(pdd->vram_usage, pdd->vram_usage - size);
err_unlock:
mutex_unlock(&p->mutex);
return ret;
@ -1727,7 +1734,7 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
return 0;
err_free:
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, NULL);
err_unlock:
mutex_unlock(&p->mutex);
return r;

View File

@ -1089,7 +1089,9 @@ static int set_sched_resources(struct device_queue_manager *dqm)
break;
}
res.queue_mask |= (1ull << i);
res.queue_mask |= 1ull
<< amdgpu_queue_mask_bit_to_set_resource_bit(
(struct amdgpu_device *)dqm->dev->kgd, i);
}
res.gws_mask = ~0ull;
res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0;

View File

@ -629,6 +629,8 @@ enum kfd_pdd_bound {
PDD_BOUND_SUSPENDED,
};
#define MAX_VRAM_FILENAME_LEN 11
/* Data that is per-process-per device. */
struct kfd_process_device {
/*
@ -671,6 +673,11 @@ struct kfd_process_device {
/* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
enum kfd_pdd_bound bound;
/* VRAM usage */
uint64_t vram_usage;
struct attribute attr_vram;
char vram_filename[MAX_VRAM_FILENAME_LEN];
};
#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)

View File

@ -79,18 +79,22 @@ static struct kfd_procfs_tree procfs;
static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
char *buffer)
{
int val = 0;
if (strcmp(attr->name, "pasid") == 0) {
struct kfd_process *p = container_of(attr, struct kfd_process,
attr_pasid);
val = p->pasid;
return snprintf(buffer, PAGE_SIZE, "%d\n", p->pasid);
} else if (strncmp(attr->name, "vram_", 5) == 0) {
struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
attr_vram);
if (pdd)
return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage));
} else {
pr_err("Invalid attribute");
return -EINVAL;
}
return snprintf(buffer, PAGE_SIZE, "%d\n", val);
return 0;
}
static void kfd_procfs_kobj_release(struct kobject *kobj)
@ -206,6 +210,34 @@ int kfd_procfs_add_queue(struct queue *q)
return 0;
}
int kfd_procfs_add_vram_usage(struct kfd_process *p)
{
int ret = 0;
struct kfd_process_device *pdd;
if (!p)
return -EINVAL;
if (!p->kobj)
return -EFAULT;
/* Create proc/<pid>/vram_<gpuid> file for each GPU */
list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
snprintf(pdd->vram_filename, MAX_VRAM_FILENAME_LEN, "vram_%u",
pdd->dev->id);
pdd->attr_vram.name = pdd->vram_filename;
pdd->attr_vram.mode = KFD_SYSFS_FILE_MODE;
sysfs_attr_init(&pdd->attr_vram);
ret = sysfs_create_file(p->kobj, &pdd->attr_vram);
if (ret)
pr_warn("Creating vram usage for gpu id %d failed",
(int)pdd->dev->id);
}
return ret;
}
void kfd_procfs_del_queue(struct queue *q)
{
if (!q)
@ -248,7 +280,7 @@ static void kfd_process_free_gpuvm(struct kgd_mem *mem,
struct kfd_dev *dev = pdd->dev;
amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->vm);
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem);
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem, NULL);
}
/* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
@ -312,7 +344,7 @@ sync_memory_failed:
return err;
err_map_mem:
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem);
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem, NULL);
err_alloc_mem:
*kptr = NULL;
return err;
@ -411,6 +443,11 @@ struct kfd_process *kfd_create_process(struct file *filep)
process->kobj);
if (!process->kobj_queues)
pr_warn("Creating KFD proc/queues folder failed");
ret = kfd_procfs_add_vram_usage(process);
if (ret)
pr_warn("Creating vram usage file for pid %d failed",
(int)process->lead_thread->pid);
}
out:
if (!IS_ERR(process))
@ -488,7 +525,7 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
peer_pdd->dev->kgd, mem, peer_pdd->vm);
}
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem);
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem, NULL);
kfd_process_device_remove_obj_handle(pdd, id);
}
}
@ -551,6 +588,7 @@ static void kfd_process_wq_release(struct work_struct *work)
{
struct kfd_process *p = container_of(work, struct kfd_process,
release_work);
struct kfd_process_device *pdd;
/* Remove the procfs files */
if (p->kobj) {
@ -558,6 +596,10 @@ static void kfd_process_wq_release(struct work_struct *work)
kobject_del(p->kobj_queues);
kobject_put(p->kobj_queues);
p->kobj_queues = NULL;
list_for_each_entry(pdd, &p->per_device_data, per_device_list)
sysfs_remove_file(p->kobj, &pdd->attr_vram);
kobject_del(p->kobj);
kobject_put(p->kobj);
p->kobj = NULL;
@ -863,6 +905,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
pdd->bound = PDD_UNBOUND;
pdd->already_dequeued = false;
pdd->runtime_inuse = false;
pdd->vram_usage = 0;
list_add(&pdd->per_device_list, &p->per_device_data);
/* Init idr used for memory handle translation */
@ -1079,7 +1122,7 @@ struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
return p;
}
/* process_evict_queues - Evict all user queues of a process
/* kfd_process_evict_queues - Evict all user queues of a process
*
* Eviction is reference-counted per process-device. This means multiple
* evictions from different sources can be nested safely.
@ -1119,7 +1162,7 @@ fail:
return r;
}
/* process_restore_queues - Restore all user queues of a process */
/* kfd_process_restore_queues - Restore all user queues of a process */
int kfd_process_restore_queues(struct kfd_process *p)
{
struct kfd_process_device *pdd;

View File

@ -478,6 +478,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.device_id);
sysfs_show_32bit_prop(buffer, "location_id",
dev->node_props.location_id);
sysfs_show_32bit_prop(buffer, "domain",
dev->node_props.domain);
sysfs_show_32bit_prop(buffer, "drm_render_minor",
dev->node_props.drm_render_minor);
sysfs_show_64bit_prop(buffer, "hive_id",
@ -1306,6 +1308,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
HSA_CAP_ASIC_REVISION_SHIFT) &
HSA_CAP_ASIC_REVISION_MASK);
dev->node_props.location_id = pci_dev_id(gpu->pdev);
dev->node_props.domain = pci_domain_nr(gpu->pdev->bus);
dev->node_props.max_engine_clk_fcompute =
amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->kgd);
dev->node_props.max_engine_clk_ccompute =

View File

@ -80,6 +80,7 @@ struct kfd_node_properties {
uint32_t vendor_id;
uint32_t device_id;
uint32_t location_id;
uint32_t domain;
uint32_t max_engine_clk_fcompute;
uint32_t max_engine_clk_ccompute;
int32_t drm_render_minor;

View File

@ -30,7 +30,7 @@
#include "dc.h"
#include "dc/inc/core_types.h"
#include "dal_asic_id.h"
#include "dmub/inc/dmub_srv.h"
#include "dmub/dmub_srv.h"
#include "dc/inc/hw/dmcu.h"
#include "dc/inc/hw/abm.h"
#include "dc/dc_dmub_srv.h"
@ -441,7 +441,7 @@ static void dm_vupdate_high_irq(void *interrupt_params)
/**
* dm_crtc_high_irq() - Handles CRTC interrupt
* @interrupt_params: ignored
* @interrupt_params: used for determining the CRTC instance
*
* Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
* event handler.
@ -455,70 +455,6 @@ static void dm_crtc_high_irq(void *interrupt_params)
unsigned long flags;
acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
if (acrtc) {
acrtc_state = to_dm_crtc_state(acrtc->base.state);
DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
acrtc->crtc_id,
amdgpu_dm_vrr_active(acrtc_state));
/* Core vblank handling at start of front-porch is only possible
* in non-vrr mode, as only there vblank timestamping will give
* valid results while done in front-porch. Otherwise defer it
* to dm_vupdate_high_irq after end of front-porch.
*/
if (!amdgpu_dm_vrr_active(acrtc_state))
drm_crtc_handle_vblank(&acrtc->base);
/* Following stuff must happen at start of vblank, for crc
* computation and below-the-range btr support in vrr mode.
*/
amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
acrtc_state->vrr_params.supported &&
acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
spin_lock_irqsave(&adev->ddev->event_lock, flags);
mod_freesync_handle_v_update(
adev->dm.freesync_module,
acrtc_state->stream,
&acrtc_state->vrr_params);
dc_stream_adjust_vmin_vmax(
adev->dm.dc,
acrtc_state->stream,
&acrtc_state->vrr_params.adjust);
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
}
}
}
#if defined(CONFIG_DRM_AMD_DC_DCN)
/**
* dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs
* @interrupt params - interrupt parameters
*
* Notify DRM's vblank event handler at VSTARTUP
*
* Unlike DCE hardware, we trigger the handler at VSTARTUP. at which:
* * We are close enough to VUPDATE - the point of no return for hw
* * We are in the fixed portion of variable front porch when vrr is enabled
* * We are before VUPDATE, where double-buffered vrr registers are swapped
*
* It is therefore the correct place to signal vblank, send user flip events,
* and update VRR.
*/
static void dm_dcn_crtc_high_irq(void *interrupt_params)
{
struct common_irq_params *irq_params = interrupt_params;
struct amdgpu_device *adev = irq_params->adev;
struct amdgpu_crtc *acrtc;
struct dm_crtc_state *acrtc_state;
unsigned long flags;
acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
if (!acrtc)
return;
@ -528,22 +464,35 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
amdgpu_dm_vrr_active(acrtc_state),
acrtc_state->active_planes);
/**
* Core vblank handling at start of front-porch is only possible
* in non-vrr mode, as only there vblank timestamping will give
* valid results while done in front-porch. Otherwise defer it
* to dm_vupdate_high_irq after end of front-porch.
*/
if (!amdgpu_dm_vrr_active(acrtc_state))
drm_crtc_handle_vblank(&acrtc->base);
/**
* Following stuff must happen at start of vblank, for crc
* computation and below-the-range btr support in vrr mode.
*/
amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
drm_crtc_handle_vblank(&acrtc->base);
/* BTR updates need to happen before VUPDATE on Vega and above. */
if (adev->family < AMDGPU_FAMILY_AI)
return;
spin_lock_irqsave(&adev->ddev->event_lock, flags);
if (acrtc_state->vrr_params.supported &&
if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
mod_freesync_handle_v_update(
adev->dm.freesync_module,
acrtc_state->stream,
&acrtc_state->vrr_params);
mod_freesync_handle_v_update(adev->dm.freesync_module,
acrtc_state->stream,
&acrtc_state->vrr_params);
dc_stream_adjust_vmin_vmax(
adev->dm.dc,
acrtc_state->stream,
&acrtc_state->vrr_params.adjust);
dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
&acrtc_state->vrr_params.adjust);
}
/*
@ -556,7 +505,8 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
* avoid race conditions between flip programming and completion,
* which could cause too early flip completion events.
*/
if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
if (adev->family >= AMDGPU_FAMILY_RV &&
acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
acrtc_state->active_planes == 0) {
if (acrtc->event) {
drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
@ -568,7 +518,6 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
}
#endif
static int dm_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
@ -1389,9 +1338,14 @@ static int dm_late_init(void *handle)
struct dmcu_iram_parameters params;
unsigned int linear_lut[16];
int i;
struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
struct dmcu *dmcu = NULL;
bool ret = false;
if (!adev->dm.fw_dmcu)
return detect_mst_link_for_all_connectors(adev->ddev);
dmcu = adev->dm.dc->res_pool->dmcu;
for (i = 0; i < 16; i++)
linear_lut[i] = 0xFFFF * i / 15;
@ -1571,7 +1525,6 @@ static int dm_suspend(void *handle)
{
struct amdgpu_device *adev = handle;
struct amdgpu_display_manager *dm = &adev->dm;
int ret = 0;
WARN_ON(adev->dm.cached_state);
adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
@ -1583,7 +1536,7 @@ static int dm_suspend(void *handle)
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
return ret;
return 0;
}
static struct amdgpu_dm_connector *
@ -2013,17 +1966,22 @@ void amdgpu_dm_update_connector_after_detect(
dc_sink_retain(aconnector->dc_sink);
if (sink->dc_edid.length == 0) {
aconnector->edid = NULL;
drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
if (aconnector->dc_link->aux_mode) {
drm_dp_cec_unset_edid(
&aconnector->dm_dp_aux.aux);
}
} else {
aconnector->edid =
(struct edid *) sink->dc_edid.raw_edid;
(struct edid *)sink->dc_edid.raw_edid;
drm_connector_update_edid_property(connector,
aconnector->edid);
drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
aconnector->edid);
aconnector->edid);
if (aconnector->dc_link->aux_mode)
drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
aconnector->edid);
}
amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
update_connector_ext_caps(aconnector);
} else {
@ -2445,8 +2403,36 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
c_irq_params->adev = adev;
c_irq_params->irq_src = int_params.irq_source;
amdgpu_dm_irq_register_interrupt(
adev, &int_params, dm_crtc_high_irq, c_irq_params);
}
/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
* the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
* to trigger at end of each vblank, regardless of state of the lock,
* matching DCE behaviour.
*/
for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
i++) {
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
if (r) {
DRM_ERROR("Failed to add vupdate irq id!\n");
return r;
}
int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
int_params.irq_source =
dc_interrupt_to_irq_source(dc, i, 0);
c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
c_irq_params->adev = adev;
c_irq_params->irq_src = int_params.irq_source;
amdgpu_dm_irq_register_interrupt(adev, &int_params,
dm_dcn_crtc_high_irq, c_irq_params);
dm_vupdate_high_irq, c_irq_params);
}
/* Use GRPH_PFLIP interrupt */
@ -3661,6 +3647,10 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
case DRM_FORMAT_P010:
plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
break;
case DRM_FORMAT_XRGB16161616F:
case DRM_FORMAT_ARGB16161616F:
plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
break;
default:
DRM_ERROR(
"Unsupported screen format %s\n",
@ -4458,10 +4448,6 @@ static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
struct amdgpu_device *adev = crtc->dev->dev_private;
int rc;
/* Do not set vupdate for DCN hardware */
if (adev->family > AMDGPU_FAMILY_AI)
return 0;
irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
@ -5577,6 +5563,10 @@ static int get_plane_formats(const struct drm_plane *plane,
formats[num_formats++] = DRM_FORMAT_NV12;
if (plane_cap && plane_cap->pixel_format_support.p010)
formats[num_formats++] = DRM_FORMAT_P010;
if (plane_cap && plane_cap->pixel_format_support.fp16) {
formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
}
break;
case DRM_PLANE_TYPE_OVERLAY:
@ -6865,7 +6855,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
dc_state);
if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
acrtc_state->stream->link->psr_settings.psr_version != PSR_VERSION_UNSUPPORTED &&
acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
amdgpu_dm_link_setup_psr(acrtc_state->stream);
else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
@ -7895,6 +7885,7 @@ static int dm_update_plane_state(struct dc *dc,
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
struct amdgpu_crtc *new_acrtc;
bool needs_reset;
int ret = 0;
@ -7904,9 +7895,30 @@ static int dm_update_plane_state(struct dc *dc,
dm_new_plane_state = to_dm_plane_state(new_plane_state);
dm_old_plane_state = to_dm_plane_state(old_plane_state);
/*TODO Implement atomic check for cursor plane */
if (plane->type == DRM_PLANE_TYPE_CURSOR)
/*TODO Implement better atomic check for cursor plane */
if (plane->type == DRM_PLANE_TYPE_CURSOR) {
if (!enable || !new_plane_crtc ||
drm_atomic_plane_disabling(plane->state, new_plane_state))
return 0;
new_acrtc = to_amdgpu_crtc(new_plane_crtc);
if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
(new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
new_plane_state->crtc_w, new_plane_state->crtc_h);
return -EINVAL;
}
if (new_plane_state->crtc_x <= -new_acrtc->max_cursor_width ||
new_plane_state->crtc_y <= -new_acrtc->max_cursor_height) {
DRM_DEBUG_ATOMIC("Bad cursor position %d, %d\n",
new_plane_state->crtc_x, new_plane_state->crtc_y);
return -EINVAL;
}
return 0;
}
needs_reset = should_reset_plane(state, plane, old_plane_state,
new_plane_state);
@ -8640,10 +8652,10 @@ static void amdgpu_dm_set_psr_caps(struct dc_link *link)
link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
if (dpcd_data[0] == 0) {
link->psr_settings.psr_version = PSR_VERSION_UNSUPPORTED;
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
link->psr_settings.psr_feature_enabled = false;
} else {
link->psr_settings.psr_version = PSR_VERSION_1;
link->psr_settings.psr_version = DC_PSR_VERSION_1;
link->psr_settings.psr_feature_enabled = true;
}
@ -8662,14 +8674,12 @@ static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
struct dc_link *link = NULL;
struct psr_config psr_config = {0};
struct psr_context psr_context = {0};
struct dc *dc = NULL;
bool ret = false;
if (stream == NULL)
return false;
link = stream->link;
dc = link->ctx->dc;
psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;

View File

@ -32,7 +32,7 @@
#include "amdgpu_dm.h"
#include "amdgpu_dm_debugfs.h"
#include "dm_helpers.h"
#include "dmub/inc/dmub_srv.h"
#include "dmub/dmub_srv.h"
struct dmub_debugfs_trace_header {
uint32_t entry_count;

View File

@ -398,15 +398,15 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
struct mod_hdcp_display *display = &hdcp_work[link_index].display;
struct mod_hdcp_link *link = &hdcp_work[link_index].link;
memset(display, 0, sizeof(*display));
memset(link, 0, sizeof(*link));
display->index = aconnector->base.index;
if (config->dpms_off) {
hdcp_remove_display(hdcp_work, link_index, aconnector);
return;
}
memset(display, 0, sizeof(*display));
memset(link, 0, sizeof(*link));
display->index = aconnector->base.index;
display->state = MOD_HDCP_DISPLAY_ACTIVE;
if (aconnector->dc_sink != NULL)

View File

@ -554,6 +554,7 @@ enum dc_edid_status dm_helpers_read_local_edid(
struct dc_sink *sink)
{
struct amdgpu_dm_connector *aconnector = link->priv;
struct drm_connector *connector = &aconnector->base;
struct i2c_adapter *ddc;
int retry = 3;
enum dc_edid_status edid_status;
@ -571,6 +572,15 @@ enum dc_edid_status dm_helpers_read_local_edid(
edid = drm_get_edid(&aconnector->base, ddc);
/* DP Compliance Test 4.2.2.6 */
if (link->aux_mode && connector->edid_corrupt)
drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, connector->real_edid_checksum);
if (!edid && connector->edid_corrupt) {
connector->edid_corrupt = false;
return EDID_BAD_CHECKSUM;
}
if (!edid)
return EDID_NO_RESPONSE;
@ -605,34 +615,10 @@ enum dc_edid_status dm_helpers_read_local_edid(
DRM_ERROR("EDID err: %d, on connector: %s",
edid_status,
aconnector->base.name);
if (link->aux_mode) {
union test_request test_request = { {0} };
union test_response test_response = { {0} };
dm_helpers_dp_read_dpcd(ctx,
link,
DP_TEST_REQUEST,
&test_request.raw,
sizeof(union test_request));
if (!test_request.bits.EDID_READ)
return edid_status;
test_response.bits.EDID_CHECKSUM_WRITE = 1;
dm_helpers_dp_write_dpcd(ctx,
link,
DP_TEST_EDID_CHECKSUM,
&sink->dc_edid.raw_edid[sink->dc_edid.length-1],
1);
dm_helpers_dp_write_dpcd(ctx,
link,
DP_TEST_RESPONSE,
&test_response.raw,
sizeof(test_response));
}
/* DP Compliance Test 4.2.2.3 */
if (link->aux_mode)
drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, sink->dc_edid.raw_edid[sink->dc_edid.length-1]);
return edid_status;
}

View File

@ -66,7 +66,7 @@
#include "dce/dce_i2c.h"
#include "dmub/inc/dmub_cmd_dal.h"
#include "dmub/dmub_srv.h"
#define CTX \
dc->ctx
@ -839,11 +839,10 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
{
int i;
int count = 0;
struct pipe_ctx *pipe;
PERF_TRACE();
for (i = 0; i < MAX_PIPES; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
int count = 0;
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->plane_state)
continue;
@ -2210,9 +2209,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
if (should_program_abm) {
if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(
pipe_ctx->stream_res.abm,
pipe_ctx->stream->link->panel_cntl->inst);
dc->hwss.set_abm_immediate_disable(pipe_ctx);
} else {
pipe_ctx->stream_res.abm->funcs->set_abm_level(
pipe_ctx->stream_res.abm, stream->abm_level);

View File

@ -26,7 +26,7 @@
#include <linux/slab.h>
#include "dm_services.h"
#include "atom.h"
#include "atomfirmware.h"
#include "dm_helpers.h"
#include "dc.h"
#include "grph_object_id.h"
@ -46,7 +46,7 @@
#include "dmcu.h"
#include "hw/clk_mgr.h"
#include "dce/dmub_psr.h"
#include "dmub/inc/dmub_cmd_dal.h"
#include "dmub/dmub_srv.h"
#include "inc/hw/panel_cntl.h"
#define DC_LOGGER_INIT(logger)
@ -1552,7 +1552,7 @@ static bool dc_link_construct(struct dc_link *link,
*/
program_hpd_filter(link);
link->psr_settings.psr_version = PSR_VERSION_UNSUPPORTED;
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
return true;
device_tag_fail:
@ -2504,59 +2504,56 @@ int dc_link_get_target_backlight_pwm(const struct dc_link *link)
return (int) abm->funcs->get_target_backlight(abm);
}
static struct pipe_ctx *get_pipe_from_link(const struct dc_link *link)
{
int i;
struct dc *dc = link->ctx->dc;
struct pipe_ctx *pipe_ctx = NULL;
for (i = 0; i < MAX_PIPES; i++) {
if (dc->current_state->res_ctx.pipe_ctx[i].stream) {
if (dc->current_state->res_ctx.pipe_ctx[i].stream->link == link) {
pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
break;
}
}
}
return pipe_ctx;
}
bool dc_link_set_backlight_level(const struct dc_link *link,
uint32_t backlight_pwm_u16_16,
uint32_t frame_ramp)
{
struct dc *dc = link->ctx->dc;
int i;
DC_LOGGER_INIT(link->ctx->logger);
DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
backlight_pwm_u16_16, backlight_pwm_u16_16);
if (dc_is_embedded_signal(link->connector_signal)) {
struct pipe_ctx *pipe_ctx = NULL;
struct pipe_ctx *pipe_ctx = get_pipe_from_link(link);
for (i = 0; i < MAX_PIPES; i++) {
if (dc->current_state->res_ctx.pipe_ctx[i].stream) {
if (dc->current_state->res_ctx.
pipe_ctx[i].stream->link
== link) {
pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
/* Disable brightness ramping when the display is blanked
* as it can hang the DMCU
*/
if (dc->current_state->res_ctx.pipe_ctx[i].plane_state == NULL)
frame_ramp = 0;
}
}
}
if (pipe_ctx == NULL)
if (pipe_ctx) {
/* Disable brightness ramping when the display is blanked
* as it can hang the DMCU
*/
if (pipe_ctx->plane_state == NULL)
frame_ramp = 0;
} else {
ASSERT(false);
return false;
}
dc->hwss.set_backlight_level(
pipe_ctx,
backlight_pwm_u16_16,
frame_ramp);
}
return true;
}
bool dc_link_set_abm_disable(const struct dc_link *link)
{
struct abm *abm = get_abm_from_stream_res(link);
bool success = false;
if (abm)
success = abm->funcs->set_abm_immediate_disable(abm, link->panel_cntl->inst);
return success;
}
bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool wait)
{
struct dc *dc = link->ctx->dc;

View File

@ -13,7 +13,6 @@
#include "core_status.h"
#include "dpcd_defs.h"
#include "resource.h"
#define DC_LOGGER \
link->ctx->logger

View File

@ -1547,35 +1547,6 @@ bool dc_add_all_planes_for_stream(
return add_all_planes_for_stream(dc, stream, &set, 1, context);
}
static bool is_hdr_static_meta_changed(struct dc_stream_state *cur_stream,
struct dc_stream_state *new_stream)
{
if (cur_stream == NULL)
return true;
if (memcmp(&cur_stream->hdr_static_metadata,
&new_stream->hdr_static_metadata,
sizeof(struct dc_info_packet)) != 0)
return true;
return false;
}
static bool is_vsc_info_packet_changed(struct dc_stream_state *cur_stream,
struct dc_stream_state *new_stream)
{
if (cur_stream == NULL)
return true;
if (memcmp(&cur_stream->vsc_infopacket,
&new_stream->vsc_infopacket,
sizeof(struct dc_info_packet)) != 0)
return true;
return false;
}
static bool is_timing_changed(struct dc_stream_state *cur_stream,
struct dc_stream_state *new_stream)
{
@ -1610,15 +1581,9 @@ static bool are_stream_backends_same(
if (is_timing_changed(stream_a, stream_b))
return false;
if (is_hdr_static_meta_changed(stream_a, stream_b))
return false;
if (stream_a->dpms_off != stream_b->dpms_off)
return false;
if (is_vsc_info_packet_changed(stream_a, stream_b))
return false;
return true;
}
@ -1758,21 +1723,6 @@ static struct audio *find_first_free_audio(
return 0;
}
bool resource_is_stream_unchanged(
struct dc_state *old_context, struct dc_stream_state *stream)
{
int i;
for (i = 0; i < old_context->stream_count; i++) {
struct dc_stream_state *old_stream = old_context->streams[i];
if (are_stream_backends_same(old_stream, stream))
return true;
}
return false;
}
/**
* dc_add_stream_to_ctx() - Add a new dc_stream_state to a dc_state.
*/
@ -2027,17 +1977,6 @@ enum dc_status resource_map_pool_resources(
int pipe_idx = -1;
struct dc_bios *dcb = dc->ctx->dc_bios;
/* TODO Check if this is needed */
/*if (!resource_is_stream_unchanged(old_context, stream)) {
if (stream != NULL && old_context->streams[i] != NULL) {
stream->bit_depth_params =
old_context->streams[i]->bit_depth_params;
stream->clamping = old_context->streams[i]->clamping;
continue;
}
}
*/
calculate_phy_pix_clks(stream);
/* TODO: Check Linux */
@ -2720,15 +2659,9 @@ bool pipe_need_reprogram(
if (is_timing_changed(pipe_ctx_old->stream, pipe_ctx->stream))
return true;
if (is_hdr_static_meta_changed(pipe_ctx_old->stream, pipe_ctx->stream))
return true;
if (pipe_ctx_old->stream->dpms_off != pipe_ctx->stream->dpms_off)
return true;
if (is_vsc_info_packet_changed(pipe_ctx_old->stream, pipe_ctx->stream))
return true;
if (false == pipe_ctx_old->stream->link->link_state_valid &&
false == pipe_ctx_old->stream->dpms_off)
return true;

View File

@ -42,7 +42,7 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
#define DC_VER "3.2.83.1"
#define DC_VER "3.2.84"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@ -277,6 +277,7 @@ struct dc_config {
bool disable_extended_timeout_support; // Used to disable extended timeout and lttpr feature as well
bool multi_mon_pp_mclk_switch;
bool disable_dmcu;
bool enable_4to1MPC;
};
enum visual_confirm {
@ -476,6 +477,7 @@ struct dc_debug_options {
bool enable_dmcub_surface_flip;
bool usbc_combo_phy_reset_wa;
bool disable_dsc;
bool enable_dram_clock_change_one_display_vactive;
};
struct dc_debug_data {

View File

@ -25,7 +25,7 @@
#include "dc.h"
#include "dc_dmub_srv.h"
#include "../dmub/inc/dmub_srv.h"
#include "../dmub/dmub_srv.h"
static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc,
struct dmub_srv *dmub)

View File

@ -27,7 +27,7 @@
#define _DMUB_DC_SRV_H_
#include "os_types.h"
#include "dmub/inc/dmub_cmd.h"
#include "dmub/dmub_srv.h"
struct dmub_srv;

View File

@ -29,7 +29,6 @@
#include "dc.h"
#include "dc_types.h"
#include "grph_object_defs.h"
#include "dmub/inc/dmub_cmd_dal.h"
enum dc_link_fec_state {
dc_link_fec_not_ready,
@ -72,7 +71,7 @@ struct link_trace {
struct psr_settings {
bool psr_feature_enabled; // PSR is supported by sink
bool psr_allow_active; // PSR is currently active
enum psr_version psr_version; // Internal PSR version, determined based on DPCD
enum dc_psr_version psr_version; // Internal PSR version, determined based on DPCD
/* These parameters are calculated in Driver,
* based on display timing and Sink capabilities.
@ -220,8 +219,6 @@ int dc_link_get_backlight_level(const struct dc_link *dc_link);
int dc_link_get_target_backlight_pwm(const struct dc_link *link);
bool dc_link_set_abm_disable(const struct dc_link *dc_link);
bool dc_link_set_psr_allow_active(struct dc_link *dc_link, bool enable, bool wait);
bool dc_link_get_psr_state(const struct dc_link *dc_link, uint32_t *psr_state);

View File

@ -862,4 +862,9 @@ struct dsc_dec_dpcd_caps {
uint32_t branch_max_line_width;
};
enum dc_psr_version {
DC_PSR_VERSION_1 = 0,
DC_PSR_VERSION_UNSUPPORTED = 0xFFFFFFFF,
};
#endif /* DC_TYPES_H_ */

View File

@ -83,120 +83,6 @@ static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id, uint32_t p
return true;
}
static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_abm *abm_dce)
{
uint64_t current_backlight;
uint32_t round_result;
uint32_t pwm_period_cntl, bl_period, bl_int_count;
uint32_t bl_pwm_cntl, bl_pwm, fractional_duty_cycle_en;
uint32_t bl_period_mask, bl_pwm_mask;
pwm_period_cntl = REG_READ(BL_PWM_PERIOD_CNTL);
REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, &bl_period);
REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, &bl_int_count);
bl_pwm_cntl = REG_READ(BL_PWM_CNTL);
REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, (uint32_t *)(&bl_pwm));
REG_GET(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, &fractional_duty_cycle_en);
if (bl_int_count == 0)
bl_int_count = 16;
bl_period_mask = (1 << bl_int_count) - 1;
bl_period &= bl_period_mask;
bl_pwm_mask = bl_period_mask << (16 - bl_int_count);
if (fractional_duty_cycle_en == 0)
bl_pwm &= bl_pwm_mask;
else
bl_pwm &= 0xFFFF;
current_backlight = bl_pwm << (1 + bl_int_count);
if (bl_period == 0)
bl_period = 0xFFFF;
current_backlight = div_u64(current_backlight, bl_period);
current_backlight = (current_backlight + 1) >> 1;
current_backlight = (uint64_t)(current_backlight) * bl_period;
round_result = (uint32_t)(current_backlight & 0xFFFFFFFF);
round_result = (round_result >> (bl_int_count-1)) & 1;
current_backlight >>= bl_int_count;
current_backlight += round_result;
return (uint32_t)(current_backlight);
}
static void driver_set_backlight_level(struct dce_abm *abm_dce,
uint32_t backlight_pwm_u16_16)
{
uint32_t backlight_16bit;
uint32_t masked_pwm_period;
uint8_t bit_count;
uint64_t active_duty_cycle;
uint32_t pwm_period_bitcnt;
/*
* 1. Find 16 bit backlight active duty cycle, where 0 <= backlight
* active duty cycle <= backlight period
*/
/* 1.1 Apply bitmask for backlight period value based on value of BITCNT
*/
REG_GET_2(BL_PWM_PERIOD_CNTL,
BL_PWM_PERIOD_BITCNT, &pwm_period_bitcnt,
BL_PWM_PERIOD, &masked_pwm_period);
if (pwm_period_bitcnt == 0)
bit_count = 16;
else
bit_count = pwm_period_bitcnt;
/* e.g. maskedPwmPeriod = 0x24 when bitCount is 6 */
masked_pwm_period = masked_pwm_period & ((1 << bit_count) - 1);
/* 1.2 Calculate integer active duty cycle required upper 16 bits
* contain integer component, lower 16 bits contain fractional component
* of active duty cycle e.g. 0x21BDC0 = 0xEFF0 * 0x24
*/
active_duty_cycle = backlight_pwm_u16_16 * masked_pwm_period;
/* 1.3 Calculate 16 bit active duty cycle from integer and fractional
* components shift by bitCount then mask 16 bits and add rounding bit
* from MSB of fraction e.g. 0x86F7 = ((0x21BDC0 >> 6) & 0xFFF) + 0
*/
backlight_16bit = active_duty_cycle >> bit_count;
backlight_16bit &= 0xFFFF;
backlight_16bit += (active_duty_cycle >> (bit_count - 1)) & 0x1;
/*
* 2. Program register with updated value
*/
/* 2.1 Lock group 2 backlight registers */
REG_UPDATE_2(BL_PWM_GRP1_REG_LOCK,
BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, 1,
BL_PWM_GRP1_REG_LOCK, 1);
// 2.2 Write new active duty cycle
REG_UPDATE(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, backlight_16bit);
/* 2.3 Unlock group 2 backlight registers */
REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
BL_PWM_GRP1_REG_LOCK, 0);
/* 3 Wait for pending bit to be cleared */
REG_WAIT(BL_PWM_GRP1_REG_LOCK,
BL_PWM_GRP1_REG_UPDATE_PENDING, 0,
1, 10000);
}
static void dmcu_set_backlight_level(
struct dce_abm *abm_dce,
uint32_t backlight_pwm_u16_16,
@ -249,10 +135,9 @@ static void dmcu_set_backlight_level(
0, 1, 80000);
}
static void dce_abm_init(struct abm *abm)
static void dce_abm_init(struct abm *abm, uint32_t backlight)
{
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
unsigned int backlight = calculate_16_bit_backlight_from_pwm(abm_dce);
REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x103);
REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x101);
@ -334,85 +219,11 @@ static bool dce_abm_set_level(struct abm *abm, uint32_t level)
static bool dce_abm_immediate_disable(struct abm *abm, uint32_t panel_inst)
{
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
if (abm->dmcu_is_running == false)
return true;
dce_abm_set_pipe(abm, MCP_DISABLE_ABM_IMMEDIATELY, panel_inst);
abm->stored_backlight_registers.BL_PWM_CNTL =
REG_READ(BL_PWM_CNTL);
abm->stored_backlight_registers.BL_PWM_CNTL2 =
REG_READ(BL_PWM_CNTL2);
abm->stored_backlight_registers.BL_PWM_PERIOD_CNTL =
REG_READ(BL_PWM_PERIOD_CNTL);
REG_GET(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
&abm->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
return true;
}
static bool dce_abm_init_backlight(struct abm *abm)
{
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
uint32_t value;
/* It must not be 0, so we have to restore them
* Bios bug w/a - period resets to zero,
* restoring to cache values which is always correct
*/
REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, &value);
if (value == 0 || value == 1) {
if (abm->stored_backlight_registers.BL_PWM_CNTL != 0) {
REG_WRITE(BL_PWM_CNTL,
abm->stored_backlight_registers.BL_PWM_CNTL);
REG_WRITE(BL_PWM_CNTL2,
abm->stored_backlight_registers.BL_PWM_CNTL2);
REG_WRITE(BL_PWM_PERIOD_CNTL,
abm->stored_backlight_registers.BL_PWM_PERIOD_CNTL);
REG_UPDATE(LVTMA_PWRSEQ_REF_DIV,
BL_PWM_REF_DIV,
abm->stored_backlight_registers.
LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
} else {
/* TODO: Note: This should not really happen since VBIOS
* should have initialized PWM registers on boot.
*/
REG_WRITE(BL_PWM_CNTL, 0xC000FA00);
REG_WRITE(BL_PWM_PERIOD_CNTL, 0x000C0FA0);
}
} else {
abm->stored_backlight_registers.BL_PWM_CNTL =
REG_READ(BL_PWM_CNTL);
abm->stored_backlight_registers.BL_PWM_CNTL2 =
REG_READ(BL_PWM_CNTL2);
abm->stored_backlight_registers.BL_PWM_PERIOD_CNTL =
REG_READ(BL_PWM_PERIOD_CNTL);
REG_GET(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
&abm->stored_backlight_registers.
LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
}
/* Have driver take backlight control
* TakeBacklightControl(true)
*/
value = REG_READ(BIOS_SCRATCH_2);
value |= ATOM_S2_VRI_BRIGHT_ENABLE;
REG_WRITE(BIOS_SCRATCH_2, value);
/* Enable the backlight output */
REG_UPDATE(BL_PWM_CNTL, BL_PWM_EN, 1);
/* Disable fractional pwm if configured */
REG_UPDATE(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN,
abm->ctx->dc->config.disable_fractional_pwm ? 0 : 1);
/* Unlock group 2 backlight registers */
REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
BL_PWM_GRP1_REG_LOCK, 0);
return true;
}
@ -421,23 +232,18 @@ static bool dce_abm_set_backlight_level_pwm(
unsigned int backlight_pwm_u16_16,
unsigned int frame_ramp,
unsigned int controller_id,
unsigned int panel_inst,
bool fw_set_brightness)
unsigned int panel_inst)
{
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
backlight_pwm_u16_16, backlight_pwm_u16_16);
/* If DMCU is in reset state, DMCU is uninitialized */
if (fw_set_brightness)
dmcu_set_backlight_level(abm_dce,
backlight_pwm_u16_16,
frame_ramp,
controller_id,
panel_inst);
else
driver_set_backlight_level(abm_dce, backlight_pwm_u16_16);
dmcu_set_backlight_level(abm_dce,
backlight_pwm_u16_16,
frame_ramp,
controller_id,
panel_inst);
return true;
}
@ -445,13 +251,12 @@ static bool dce_abm_set_backlight_level_pwm(
static const struct abm_funcs dce_funcs = {
.abm_init = dce_abm_init,
.set_abm_level = dce_abm_set_level,
.init_backlight = dce_abm_init_backlight,
.set_pipe = dce_abm_set_pipe,
.set_backlight_level_pwm = dce_abm_set_backlight_level_pwm,
.get_current_backlight = dce_abm_get_current_backlight,
.get_target_backlight = dce_abm_get_target_backlight,
.init_abm_config = NULL,
.set_abm_immediate_disable = dce_abm_immediate_disable
.set_abm_immediate_disable = dce_abm_immediate_disable,
};
static void dce_abm_construct(
@ -465,10 +270,6 @@ static void dce_abm_construct(
base->ctx = ctx;
base->funcs = &dce_funcs;
base->stored_backlight_registers.BL_PWM_CNTL = 0;
base->stored_backlight_registers.BL_PWM_CNTL2 = 0;
base->stored_backlight_registers.BL_PWM_PERIOD_CNTL = 0;
base->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV = 0;
base->dmcu_is_running = false;
abm_dce->regs = regs;

View File

@ -30,11 +30,6 @@
#include "abm.h"
#define ABM_COMMON_REG_LIST_DCE_BASE() \
SR(BL_PWM_PERIOD_CNTL), \
SR(BL_PWM_CNTL), \
SR(BL_PWM_CNTL2), \
SR(BL_PWM_GRP1_REG_LOCK), \
SR(LVTMA_PWRSEQ_REF_DIV), \
SR(MASTER_COMM_CNTL_REG), \
SR(MASTER_COMM_CMD_REG), \
SR(MASTER_COMM_DATA_REG1)
@ -85,15 +80,6 @@
.field_name = reg_name ## __ ## field_name ## post_fix
#define ABM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
ABM_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, mask_sh), \
ABM_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, mask_sh), \
ABM_SF(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, mask_sh), \
ABM_SF(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, mask_sh), \
ABM_SF(BL_PWM_CNTL, BL_PWM_EN, mask_sh), \
ABM_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, mask_sh), \
ABM_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_LOCK, mask_sh), \
ABM_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_UPDATE_PENDING, mask_sh), \
ABM_SF(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV, mask_sh), \
ABM_SF(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, mask_sh), \
ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, mask_sh), \
ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE1, mask_sh), \
@ -178,19 +164,10 @@
type ABM1_HG_REG_READ_MISSED_FRAME_CLEAR; \
type ABM1_LS_REG_READ_MISSED_FRAME_CLEAR; \
type ABM1_BL_REG_READ_MISSED_FRAME_CLEAR; \
type BL_PWM_PERIOD; \
type BL_PWM_PERIOD_BITCNT; \
type BL_ACTIVE_INT_FRAC_CNT; \
type BL_PWM_FRACTIONAL_EN; \
type MASTER_COMM_INTERRUPT; \
type MASTER_COMM_CMD_REG_BYTE0; \
type MASTER_COMM_CMD_REG_BYTE1; \
type MASTER_COMM_CMD_REG_BYTE2; \
type BL_PWM_REF_DIV; \
type BL_PWM_EN; \
type BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN; \
type BL_PWM_GRP1_REG_LOCK; \
type BL_PWM_GRP1_REG_UPDATE_PENDING
type MASTER_COMM_CMD_REG_BYTE2
struct dce_abm_shift {
ABM_REG_FIELD_LIST(uint8_t);
@ -201,10 +178,6 @@ struct dce_abm_mask {
};
struct dce_abm_registers {
uint32_t BL_PWM_PERIOD_CNTL;
uint32_t BL_PWM_CNTL;
uint32_t BL_PWM_CNTL2;
uint32_t LVTMA_PWRSEQ_REF_DIV;
uint32_t DC_ABM1_HG_SAMPLE_RATE;
uint32_t DC_ABM1_LS_SAMPLE_RATE;
uint32_t BL1_PWM_BL_UPDATE_SAMPLE_RATE;
@ -219,7 +192,6 @@ struct dce_abm_registers {
uint32_t MASTER_COMM_CMD_REG;
uint32_t MASTER_COMM_DATA_REG1;
uint32_t BIOS_SCRATCH_2;
uint32_t BL_PWM_GRP1_REG_LOCK;
};
struct dce_abm {

View File

@ -28,6 +28,7 @@
#include "dc_dmub_srv.h"
#include "panel_cntl.h"
#include "dce_panel_cntl.h"
#include "atom.h"
#define TO_DCE_PANEL_CNTL(panel_cntl)\
container_of(panel_cntl, struct dce_panel_cntl, base)
@ -45,9 +46,113 @@
#define FN(reg_name, field_name) \
dce_panel_cntl->shift->field_name, dce_panel_cntl->mask->field_name
void dce_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_panel_cntl *dce_panel_cntl)
{
uint64_t current_backlight;
uint32_t round_result;
uint32_t pwm_period_cntl, bl_period, bl_int_count;
uint32_t bl_pwm_cntl, bl_pwm, fractional_duty_cycle_en;
uint32_t bl_period_mask, bl_pwm_mask;
pwm_period_cntl = REG_READ(BL_PWM_PERIOD_CNTL);
REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, &bl_period);
REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, &bl_int_count);
bl_pwm_cntl = REG_READ(BL_PWM_CNTL);
REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, (uint32_t *)(&bl_pwm));
REG_GET(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, &fractional_duty_cycle_en);
if (bl_int_count == 0)
bl_int_count = 16;
bl_period_mask = (1 << bl_int_count) - 1;
bl_period &= bl_period_mask;
bl_pwm_mask = bl_period_mask << (16 - bl_int_count);
if (fractional_duty_cycle_en == 0)
bl_pwm &= bl_pwm_mask;
else
bl_pwm &= 0xFFFF;
current_backlight = bl_pwm << (1 + bl_int_count);
if (bl_period == 0)
bl_period = 0xFFFF;
current_backlight = div_u64(current_backlight, bl_period);
current_backlight = (current_backlight + 1) >> 1;
current_backlight = (uint64_t)(current_backlight) * bl_period;
round_result = (uint32_t)(current_backlight & 0xFFFFFFFF);
round_result = (round_result >> (bl_int_count-1)) & 1;
current_backlight >>= bl_int_count;
current_backlight += round_result;
return (uint32_t)(current_backlight);
}
uint32_t dce_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
{
struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
uint32_t value;
uint32_t current_backlight;
/* It must not be 0, so we have to restore them
* Bios bug w/a - period resets to zero,
* restoring to cache values which is always correct
*/
REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, &value);
if (value == 0 || value == 1) {
if (panel_cntl->stored_backlight_registers.BL_PWM_CNTL != 0) {
REG_WRITE(BL_PWM_CNTL,
panel_cntl->stored_backlight_registers.BL_PWM_CNTL);
REG_WRITE(BL_PWM_CNTL2,
panel_cntl->stored_backlight_registers.BL_PWM_CNTL2);
REG_WRITE(BL_PWM_PERIOD_CNTL,
panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL);
REG_UPDATE(PWRSEQ_REF_DIV,
BL_PWM_REF_DIV,
panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
} else {
/* TODO: Note: This should not really happen since VBIOS
* should have initialized PWM registers on boot.
*/
REG_WRITE(BL_PWM_CNTL, 0xC000FA00);
REG_WRITE(BL_PWM_PERIOD_CNTL, 0x000C0FA0);
}
} else {
panel_cntl->stored_backlight_registers.BL_PWM_CNTL =
REG_READ(BL_PWM_CNTL);
panel_cntl->stored_backlight_registers.BL_PWM_CNTL2 =
REG_READ(BL_PWM_CNTL2);
panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL =
REG_READ(BL_PWM_PERIOD_CNTL);
REG_GET(PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
&panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
}
// Have driver take backlight control
// TakeBacklightControl(true)
value = REG_READ(BIOS_SCRATCH_2);
value |= ATOM_S2_VRI_BRIGHT_ENABLE;
REG_WRITE(BIOS_SCRATCH_2, value);
// Enable the backlight output
REG_UPDATE(BL_PWM_CNTL, BL_PWM_EN, 1);
// Unlock group 2 backlight registers
REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
BL_PWM_GRP1_REG_LOCK, 0);
current_backlight = calculate_16_bit_backlight_from_pwm(dce_panel_cntl);
return current_backlight;
}
bool dce_is_panel_backlight_on(struct panel_cntl *panel_cntl)
@ -55,7 +160,7 @@ bool dce_is_panel_backlight_on(struct panel_cntl *panel_cntl)
struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
uint32_t value;
REG_GET(PWRSEQ_CNTL, BLON, &value);
REG_GET(PWRSEQ_CNTL, LVTMA_BLON, &value);
return value;
}
@ -65,13 +170,94 @@ bool dce_is_panel_powered_on(struct panel_cntl *panel_cntl)
struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
uint32_t pwr_seq_state, dig_on, dig_on_ovrd;
REG_GET(PWRSEQ_STATE, PWRSEQ_TARGET_STATE_R, &pwr_seq_state);
REG_GET(PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &pwr_seq_state);
REG_GET_2(PWRSEQ_CNTL, DIGON, &dig_on, DIGON_OVRD, &dig_on_ovrd);
REG_GET_2(PWRSEQ_CNTL, LVTMA_DIGON, &dig_on, LVTMA_DIGON_OVRD, &dig_on_ovrd);
return (pwr_seq_state == 1) || (dig_on == 1 && dig_on_ovrd == 1);
}
void dce_store_backlight_level(struct panel_cntl *panel_cntl)
{
struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
panel_cntl->stored_backlight_registers.BL_PWM_CNTL =
REG_READ(BL_PWM_CNTL);
panel_cntl->stored_backlight_registers.BL_PWM_CNTL2 =
REG_READ(BL_PWM_CNTL2);
panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL =
REG_READ(BL_PWM_PERIOD_CNTL);
REG_GET(PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
&panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
}
void dce_driver_set_backlight(struct panel_cntl *panel_cntl,
uint32_t backlight_pwm_u16_16)
{
uint32_t backlight_16bit;
uint32_t masked_pwm_period;
uint8_t bit_count;
uint64_t active_duty_cycle;
uint32_t pwm_period_bitcnt;
struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
/*
* 1. Find 16 bit backlight active duty cycle, where 0 <= backlight
* active duty cycle <= backlight period
*/
/* 1.1 Apply bitmask for backlight period value based on value of BITCNT
*/
REG_GET_2(BL_PWM_PERIOD_CNTL,
BL_PWM_PERIOD_BITCNT, &pwm_period_bitcnt,
BL_PWM_PERIOD, &masked_pwm_period);
if (pwm_period_bitcnt == 0)
bit_count = 16;
else
bit_count = pwm_period_bitcnt;
/* e.g. maskedPwmPeriod = 0x24 when bitCount is 6 */
masked_pwm_period = masked_pwm_period & ((1 << bit_count) - 1);
/* 1.2 Calculate integer active duty cycle required upper 16 bits
* contain integer component, lower 16 bits contain fractional component
* of active duty cycle e.g. 0x21BDC0 = 0xEFF0 * 0x24
*/
active_duty_cycle = backlight_pwm_u16_16 * masked_pwm_period;
/* 1.3 Calculate 16 bit active duty cycle from integer and fractional
* components shift by bitCount then mask 16 bits and add rounding bit
* from MSB of fraction e.g. 0x86F7 = ((0x21BDC0 >> 6) & 0xFFF) + 0
*/
backlight_16bit = active_duty_cycle >> bit_count;
backlight_16bit &= 0xFFFF;
backlight_16bit += (active_duty_cycle >> (bit_count - 1)) & 0x1;
/*
* 2. Program register with updated value
*/
/* 2.1 Lock group 2 backlight registers */
REG_UPDATE_2(BL_PWM_GRP1_REG_LOCK,
BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, 1,
BL_PWM_GRP1_REG_LOCK, 1);
// 2.2 Write new active duty cycle
REG_UPDATE(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, backlight_16bit);
/* 2.3 Unlock group 2 backlight registers */
REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
BL_PWM_GRP1_REG_LOCK, 0);
/* 3 Wait for pending bit to be cleared */
REG_WAIT(BL_PWM_GRP1_REG_LOCK,
BL_PWM_GRP1_REG_UPDATE_PENDING, 0,
1, 10000);
}
static void dce_panel_cntl_destroy(struct panel_cntl **panel_cntl)
{
struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(*panel_cntl);
@ -85,7 +271,8 @@ static const struct panel_cntl_funcs dce_link_panel_cntl_funcs = {
.hw_init = dce_panel_cntl_hw_init,
.is_panel_backlight_on = dce_is_panel_backlight_on,
.is_panel_powered_on = dce_is_panel_powered_on,
.store_backlight_level = dce_store_backlight_level,
.driver_set_backlight = dce_driver_set_backlight,
};
void dce_panel_cntl_construct(
@ -95,6 +282,13 @@ void dce_panel_cntl_construct(
const struct dce_panel_cntl_shift *shift,
const struct dce_panel_cntl_mask *mask)
{
struct panel_cntl *base = &dce_panel_cntl->base;
base->stored_backlight_registers.BL_PWM_CNTL = 0;
base->stored_backlight_registers.BL_PWM_CNTL2 = 0;
base->stored_backlight_registers.BL_PWM_PERIOD_CNTL = 0;
base->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV = 0;
dce_panel_cntl->regs = regs;
dce_panel_cntl->shift = shift;
dce_panel_cntl->mask = mask;

View File

@ -35,10 +35,12 @@
#define DCE_PANEL_CNTL_REG_LIST()\
DCE_PANEL_CNTL_SR(PWRSEQ_CNTL, LVTMA), \
DCE_PANEL_CNTL_SR(PWRSEQ_STATE, LVTMA), \
DCE_PANEL_CNTL_SR(PWRSEQ_REF_DIV, LVTMA), \
SR(BL_PWM_CNTL), \
SR(BL_PWM_CNTL2), \
SR(BL_PWM_PERIOD_CNTL), \
SR(BL_PWM_GRP1_REG_LOCK)
SR(BL_PWM_GRP1_REG_LOCK), \
SR(BIOS_SCRATCH_2)
#define DCN_PANEL_CNTL_SR(reg_name, block)\
.reg_name = BASE(mm ## block ## _ ## reg_name ## _BASE_IDX) + \
@ -47,33 +49,37 @@
#define DCN_PANEL_CNTL_REG_LIST()\
DCN_PANEL_CNTL_SR(PWRSEQ_CNTL, LVTMA), \
DCN_PANEL_CNTL_SR(PWRSEQ_STATE, LVTMA), \
DCE_PANEL_CNTL_SR(PWRSEQ_REF_DIV, LVTMA), \
SR(BL_PWM_CNTL), \
SR(BL_PWM_CNTL2), \
SR(BL_PWM_PERIOD_CNTL), \
SR(BL_PWM_GRP1_REG_LOCK)
SR(BL_PWM_GRP1_REG_LOCK), \
SR(BIOS_SCRATCH_2)
#define DCE_PANEL_CNTL_SF(block, reg_name, field_name, post_fix)\
.field_name = block ## reg_name ## __ ## block ## field_name ## post_fix
#define DCE_PANEL_CNTL_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
#define DCE_PANEL_CNTL_MASK_SH_LIST(mask_sh) \
DCE_PANEL_CNTL_SF(LVTMA_, PWRSEQ_CNTL, BLON, mask_sh),\
DCE_PANEL_CNTL_SF(LVTMA_, PWRSEQ_CNTL, DIGON, mask_sh),\
DCE_PANEL_CNTL_SF(LVTMA_, PWRSEQ_CNTL, DIGON_OVRD, mask_sh),\
DCE_PANEL_CNTL_SF(LVTMA_, PWRSEQ_STATE, PWRSEQ_TARGET_STATE_R, mask_sh), \
DCE_PANEL_CNTL_SF(, BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, mask_sh), \
DCE_PANEL_CNTL_SF(, BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, mask_sh), \
DCE_PANEL_CNTL_SF(, BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, mask_sh), \
DCE_PANEL_CNTL_SF(, BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, mask_sh), \
DCE_PANEL_CNTL_SF(, BL_PWM_CNTL, BL_PWM_EN, mask_sh), \
DCE_PANEL_CNTL_SF(, BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, mask_sh), \
DCE_PANEL_CNTL_SF(, BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_LOCK, mask_sh), \
DCE_PANEL_CNTL_SF(, BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_UPDATE_PENDING, mask_sh)
DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, mask_sh),\
DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON_OVRD, mask_sh),\
DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh), \
DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV, mask_sh), \
DCE_PANEL_CNTL_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, mask_sh), \
DCE_PANEL_CNTL_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, mask_sh), \
DCE_PANEL_CNTL_SF(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, mask_sh), \
DCE_PANEL_CNTL_SF(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, mask_sh), \
DCE_PANEL_CNTL_SF(BL_PWM_CNTL, BL_PWM_EN, mask_sh), \
DCE_PANEL_CNTL_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, mask_sh), \
DCE_PANEL_CNTL_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_LOCK, mask_sh), \
DCE_PANEL_CNTL_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_UPDATE_PENDING, mask_sh)
#define DCE_PANEL_CNTL_REG_FIELD_LIST(type) \
type BLON;\
type DIGON;\
type DIGON_OVRD;\
type PWRSEQ_TARGET_STATE_R; \
type LVTMA_BLON;\
type LVTMA_DIGON;\
type LVTMA_DIGON_OVRD;\
type LVTMA_PWRSEQ_TARGET_STATE_R; \
type BL_PWM_REF_DIV; \
type BL_PWM_EN; \
type BL_ACTIVE_INT_FRAC_CNT; \
type BL_PWM_FRACTIONAL_EN; \
@ -98,6 +104,8 @@ struct dce_panel_cntl_registers {
uint32_t BL_PWM_CNTL2;
uint32_t BL_PWM_PERIOD_CNTL;
uint32_t BL_PWM_GRP1_REG_LOCK;
uint32_t PWRSEQ_REF_DIV;
uint32_t BIOS_SCRATCH_2;
};
struct dce_panel_cntl {

View File

@ -1336,7 +1336,6 @@ static void dce110_se_audio_setup(
{
struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
uint32_t speakers = 0;
uint32_t channels = 0;
ASSERT(audio_info);
@ -1344,7 +1343,6 @@ static void dce110_se_audio_setup(
/* This should not happen.it does so we don't get BSOD*/
return;
speakers = audio_info->flags.info.ALLSPEAKERS;
channels = speakers_to_channels(audio_info->flags.speaker_flags).all;
/* setup the audio stream source select (audio -> dig mapping) */

View File

@ -27,7 +27,7 @@
#include "dce_abm.h"
#include "dc.h"
#include "dc_dmub_srv.h"
#include "dmub/inc/dmub_srv.h"
#include "dmub/dmub_srv.h"
#include "core_types.h"
#include "dm_services.h"
#include "reg_helper.h"
@ -70,53 +70,6 @@ static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t panel
return true;
}
static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_abm *dce_abm)
{
uint64_t current_backlight;
uint32_t round_result;
uint32_t bl_period, bl_int_count;
uint32_t bl_pwm, fractional_duty_cycle_en;
uint32_t bl_period_mask, bl_pwm_mask;
REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, &bl_period);
REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, &bl_int_count);
REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, &bl_pwm);
REG_GET(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, &fractional_duty_cycle_en);
if (bl_int_count == 0)
bl_int_count = 16;
bl_period_mask = (1 << bl_int_count) - 1;
bl_period &= bl_period_mask;
bl_pwm_mask = bl_period_mask << (16 - bl_int_count);
if (fractional_duty_cycle_en == 0)
bl_pwm &= bl_pwm_mask;
else
bl_pwm &= 0xFFFF;
current_backlight = (uint64_t)bl_pwm << (1 + bl_int_count);
if (bl_period == 0)
bl_period = 0xFFFF;
current_backlight = div_u64(current_backlight, bl_period);
current_backlight = (current_backlight + 1) >> 1;
current_backlight = (uint64_t)(current_backlight) * bl_period;
round_result = (uint32_t)(current_backlight & 0xFFFFFFFF);
round_result = (round_result >> (bl_int_count-1)) & 1;
current_backlight >>= bl_int_count;
current_backlight += round_result;
return (uint32_t)(current_backlight);
}
static void dmcub_set_backlight_level(
struct dce_abm *dce_abm,
uint32_t backlight_pwm_u16_16,
@ -178,10 +131,9 @@ static void dmub_abm_enable_fractional_pwm(struct dc_context *dc)
dc_dmub_srv_wait_idle(dc->dmub_srv);
}
static void dmub_abm_init(struct abm *abm)
static void dmub_abm_init(struct abm *abm, uint32_t backlight)
{
struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
unsigned int backlight = calculate_16_bit_backlight_from_pwm(dce_abm);
REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x103);
REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x101);
@ -261,77 +213,8 @@ static bool dmub_abm_set_level(struct abm *abm, uint32_t level)
static bool dmub_abm_immediate_disable(struct abm *abm, uint32_t panel_inst)
{
struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
dmub_abm_set_pipe(abm, DISABLE_ABM_IMMEDIATELY, panel_inst);
abm->stored_backlight_registers.BL_PWM_CNTL =
REG_READ(BL_PWM_CNTL);
abm->stored_backlight_registers.BL_PWM_CNTL2 =
REG_READ(BL_PWM_CNTL2);
abm->stored_backlight_registers.BL_PWM_PERIOD_CNTL =
REG_READ(BL_PWM_PERIOD_CNTL);
REG_GET(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
&abm->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
return true;
}
static bool dmub_abm_init_backlight(struct abm *abm)
{
struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
uint32_t value;
/* It must not be 0, so we have to restore them
* Bios bug w/a - period resets to zero,
* restoring to cache values which is always correct
*/
REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, &value);
if (value == 0 || value == 1) {
if (abm->stored_backlight_registers.BL_PWM_CNTL != 0) {
REG_WRITE(BL_PWM_CNTL,
abm->stored_backlight_registers.BL_PWM_CNTL);
REG_WRITE(BL_PWM_CNTL2,
abm->stored_backlight_registers.BL_PWM_CNTL2);
REG_WRITE(BL_PWM_PERIOD_CNTL,
abm->stored_backlight_registers.BL_PWM_PERIOD_CNTL);
REG_UPDATE(LVTMA_PWRSEQ_REF_DIV,
BL_PWM_REF_DIV,
abm->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
} else {
/* TODO: Note: This should not really happen since VBIOS
* should have initialized PWM registers on boot.
*/
REG_WRITE(BL_PWM_CNTL, 0xC000FA00);
REG_WRITE(BL_PWM_PERIOD_CNTL, 0x000C0FA0);
}
} else {
abm->stored_backlight_registers.BL_PWM_CNTL =
REG_READ(BL_PWM_CNTL);
abm->stored_backlight_registers.BL_PWM_CNTL2 =
REG_READ(BL_PWM_CNTL2);
abm->stored_backlight_registers.BL_PWM_PERIOD_CNTL =
REG_READ(BL_PWM_PERIOD_CNTL);
REG_GET(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
&abm->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
}
// Have driver take backlight control
// TakeBacklightControl(true)
value = REG_READ(BIOS_SCRATCH_2);
value |= ATOM_S2_VRI_BRIGHT_ENABLE;
REG_WRITE(BIOS_SCRATCH_2, value);
// Enable the backlight output
REG_UPDATE(BL_PWM_CNTL, BL_PWM_EN, 1);
// Unlock group 2 backlight registers
REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
BL_PWM_GRP1_REG_LOCK, 0);
return true;
}
@ -340,8 +223,7 @@ static bool dmub_abm_set_backlight_level_pwm(
unsigned int backlight_pwm_u16_16,
unsigned int frame_ramp,
unsigned int otg_inst,
uint32_t panel_inst,
bool fw_set_brightness)
uint32_t panel_inst)
{
struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
@ -384,7 +266,6 @@ static bool dmub_abm_init_config(struct abm *abm,
static const struct abm_funcs abm_funcs = {
.abm_init = dmub_abm_init,
.set_abm_level = dmub_abm_set_level,
.init_backlight = dmub_abm_init_backlight,
.set_pipe = dmub_abm_set_pipe,
.set_backlight_level_pwm = dmub_abm_set_backlight_level_pwm,
.get_current_backlight = dmub_abm_get_current_backlight,
@ -404,10 +285,6 @@ static void dmub_abm_construct(
base->ctx = ctx;
base->funcs = &abm_funcs;
base->stored_backlight_registers.BL_PWM_CNTL = 0;
base->stored_backlight_registers.BL_PWM_CNTL2 = 0;
base->stored_backlight_registers.BL_PWM_PERIOD_CNTL = 0;
base->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV = 0;
base->dmcu_is_running = false;
abm_dce->regs = regs;

View File

@ -26,8 +26,7 @@
#include "dmub_psr.h"
#include "dc.h"
#include "dc_dmub_srv.h"
#include "dmub/inc/dmub_srv.h"
#include "dmub/inc/dmub_gpint_cmd.h"
#include "dmub/dmub_srv.h"
#include "core_types.h"
#define MAX_PIPES 6
@ -94,12 +93,20 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *
union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx;
if (stream->link->psr_settings.psr_version == PSR_VERSION_UNSUPPORTED)
if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED)
return false;
cmd.psr_set_version.header.type = DMUB_CMD__PSR;
cmd.psr_set_version.header.sub_type = DMUB_CMD__PSR_SET_VERSION;
cmd.psr_set_version.psr_set_version_data.version = stream->link->psr_settings.psr_version;
switch (stream->link->psr_settings.psr_version) {
case DC_PSR_VERSION_1:
cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_1;
break;
case DC_PSR_VERSION_UNSUPPORTED:
default:
cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_UNSUPPORTED;
break;
}
cmd.psr_set_version.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data);
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);

View File

@ -1066,7 +1066,7 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
hws->funcs.edp_backlight_control(link, false);
dc_link_set_abm_disable(link);
link->dc->hwss.set_abm_immediate_disable(pipe_ctx);
}
if (dc_is_dp_signal(pipe_ctx->stream->signal))
@ -2355,6 +2355,7 @@ static void init_hw(struct dc *dc)
struct abm *abm;
struct dmcu *dmcu;
struct dce_hwseq *hws = dc->hwseq;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
bp = dc->ctx->dc_bios;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@ -2401,12 +2402,17 @@ static void init_hw(struct dc *dc)
audio->funcs->hw_init(audio);
}
abm = dc->res_pool->abm;
if (abm != NULL) {
abm->funcs->init_backlight(abm);
abm->funcs->abm_init(abm);
for (i = 0; i < dc->link_count; i++) {
struct dc_link *link = dc->links[i];
if (link->panel_cntl)
backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
}
abm = dc->res_pool->abm;
if (abm != NULL)
abm->funcs->abm_init(abm, backlight);
dmcu = dc->res_pool->dmcu;
if (dmcu != NULL && abm != NULL)
abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
@ -2721,6 +2727,7 @@ bool dce110_set_backlight_level(struct pipe_ctx *pipe_ctx,
struct dc_link *link = pipe_ctx->stream->link;
struct dc *dc = link->ctx->dc;
struct abm *abm = pipe_ctx->stream_res.abm;
struct panel_cntl *panel_cntl = link->panel_cntl;
struct dmcu *dmcu = dc->res_pool->dmcu;
bool fw_set_brightness = true;
/* DMCU -1 for all controller id values,
@ -2728,23 +2735,38 @@ bool dce110_set_backlight_level(struct pipe_ctx *pipe_ctx,
*/
uint32_t controller_id = pipe_ctx->stream_res.tg->inst + 1;
if (abm == NULL || (abm->funcs->set_backlight_level_pwm == NULL))
if (abm == NULL || panel_cntl == NULL || (abm->funcs->set_backlight_level_pwm == NULL))
return false;
if (dmcu)
fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
abm->funcs->set_backlight_level_pwm(
abm,
backlight_pwm_u16_16,
frame_ramp,
controller_id,
link->panel_cntl->inst,
fw_set_brightness);
if (!fw_set_brightness && panel_cntl->funcs->driver_set_backlight)
panel_cntl->funcs->driver_set_backlight(panel_cntl, backlight_pwm_u16_16);
else
abm->funcs->set_backlight_level_pwm(
abm,
backlight_pwm_u16_16,
frame_ramp,
controller_id,
link->panel_cntl->inst);
return true;
}
void dce110_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx)
{
struct abm *abm = pipe_ctx->stream_res.abm;
struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
if (abm)
abm->funcs->set_abm_immediate_disable(abm,
pipe_ctx->stream->link->panel_cntl->inst);
if (panel_cntl)
panel_cntl->funcs->store_backlight_level(panel_cntl);
}
static const struct hw_sequencer_funcs dce110_funcs = {
.program_gamut_remap = program_gamut_remap,
.program_output_csc = program_output_csc,
@ -2781,6 +2803,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.set_cursor_position = dce110_set_cursor_position,
.set_cursor_attribute = dce110_set_cursor_attribute,
.set_backlight_level = dce110_set_backlight_level,
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
};
static const struct hwseq_private_funcs dce110_private_funcs = {

View File

@ -88,6 +88,7 @@ void dce110_edp_wait_for_hpd_ready(
bool dce110_set_backlight_level(struct pipe_ctx *pipe_ctx,
uint32_t backlight_pwm_u16_16,
uint32_t frame_ramp);
void dce110_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx);
#endif /* __DC_HWSS_DCE110_H__ */

View File

@ -134,13 +134,6 @@ bool dpp1_get_optimal_number_of_taps(
struct scaler_data *scl_data,
const struct scaling_taps *in_taps)
{
uint32_t pixel_width;
if (scl_data->viewport.width > scl_data->recout.width)
pixel_width = scl_data->recout.width;
else
pixel_width = scl_data->viewport.width;
/* Some ASICs does not support FP16 scaling, so we reject modes require this*/
if (scl_data->format == PIXEL_FORMAT_FP16 &&
dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&

View File

@ -826,6 +826,14 @@ enum dc_status dcn10_enable_stream_timing(
color_space = stream->output_color_space;
color_space_to_black_color(dc, color_space, &black_color);
/*
* The way 420 is packed, 2 channels carry Y component, 1 channel
* alternate between Cb and Cr, so both channels need the pixel
* value for Y
*/
if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
black_color.color_r_cr = black_color.color_g_y;
if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
pipe_ctx->stream_res.tg->funcs->set_blank_color(
pipe_ctx->stream_res.tg,
@ -903,8 +911,7 @@ static void dcn10_reset_back_end_for_pipe(
if (pipe_ctx->top_pipe == NULL) {
if (pipe_ctx->stream_res.abm)
pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm,
pipe_ctx->stream->link->panel_cntl->inst);
dc->hwss.set_abm_immediate_disable(pipe_ctx);
pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
@ -1245,6 +1252,7 @@ void dcn10_init_hw(struct dc *dc)
struct dce_hwseq *hws = dc->hwseq;
struct dc_bios *dcb = dc->ctx->dc_bios;
struct resource_pool *res_pool = dc->res_pool;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
@ -1411,11 +1419,16 @@ void dcn10_init_hw(struct dc *dc)
audio->funcs->hw_init(audio);
}
if (abm != NULL) {
abm->funcs->init_backlight(abm);
abm->funcs->abm_init(abm);
for (i = 0; i < dc->link_count; i++) {
struct dc_link *link = dc->links[i];
if (link->panel_cntl)
backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
}
if (abm != NULL)
abm->funcs->abm_init(abm, backlight);
if (dmcu != NULL && !dmcu->auto_load_dmcu)
dmcu->funcs->dmcu_init(dmcu);
@ -2249,6 +2262,14 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
&blnd_cfg.black_color);
}
/*
* The way 420 is packed, 2 channels carry Y component, 1 channel
* alternate between Cb and Cr, so both channels need the pixel
* value for Y
*/
if (pipe_ctx->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
blnd_cfg.black_color.color_r_cr = blnd_cfg.black_color.color_g_y;
if (per_pixel_alpha)
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
else
@ -2490,9 +2511,7 @@ void dcn10_blank_pixel_data(
stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
}
} else if (blank) {
if (stream_res->abm)
stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm,
stream->link->panel_cntl->inst);
dc->hwss.set_abm_immediate_disable(pipe_ctx);
if (stream_res->tg->funcs->set_blank)
stream_res->tg->funcs->set_blank(stream_res->tg, blank);
}

View File

@ -73,6 +73,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.get_clock = dcn10_get_clock,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.set_backlight_level = dce110_set_backlight_level,
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
};
static const struct hwseq_private_funcs dcn10_private_funcs = {

View File

@ -1121,24 +1121,6 @@ static enum dc_status build_mapped_resource(
{
struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
/*TODO Seems unneeded anymore */
/* if (old_context && resource_is_stream_unchanged(old_context, stream)) {
if (stream != NULL && old_context->streams[i] != NULL) {
todo: shouldn't have to copy missing parameter here
resource_build_bit_depth_reduction_params(stream,
&stream->bit_depth_params);
stream->clamping.pixel_encoding =
stream->timing.pixel_encoding;
resource_build_bit_depth_reduction_params(stream,
&stream->bit_depth_params);
build_clamping_params(stream);
continue;
}
}
*/
if (!pipe_ctx)
return DC_ERROR_UNEXPECTED;

View File

@ -1274,7 +1274,6 @@ static void enc1_se_audio_setup(
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
uint32_t speakers = 0;
uint32_t channels = 0;
ASSERT(audio_info);
@ -1282,7 +1281,6 @@ static void enc1_se_audio_setup(
/* This should not happen.it does so we don't get BSOD*/
return;
speakers = audio_info->flags.info.ALLSPEAKERS;
channels = speakers_to_channels(audio_info->flags.speaker_flags).all;
/* setup the audio stream source select (audio -> dig mapping) */

View File

@ -961,9 +961,7 @@ void dcn20_blank_pixel_data(
width = width / odm_cnt;
if (blank) {
if (stream_res->abm)
stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm,
stream->link->panel_cntl->inst);
dc->hwss.set_abm_immediate_disable(pipe_ctx);
if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES;
@ -2042,9 +2040,7 @@ static void dcn20_reset_back_end_for_pipe(
*/
if (pipe_ctx->top_pipe == NULL) {
if (pipe_ctx->stream_res.abm)
pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm,
pipe_ctx->stream->link->panel_cntl->inst);
dc->hwss.set_abm_immediate_disable(pipe_ctx);
pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);

View File

@ -84,6 +84,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.set_backlight_level = dce110_set_backlight_level,
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
};
static const struct hwseq_private_funcs dcn20_private_funcs = {

View File

@ -1653,24 +1653,6 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state
enum dc_status status = DC_OK;
struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
/*TODO Seems unneeded anymore */
/* if (old_context && resource_is_stream_unchanged(old_context, stream)) {
if (stream != NULL && old_context->streams[i] != NULL) {
todo: shouldn't have to copy missing parameter here
resource_build_bit_depth_reduction_params(stream,
&stream->bit_depth_params);
stream->clamping.pixel_encoding =
stream->timing.pixel_encoding;
resource_build_bit_depth_reduction_params(stream,
&stream->bit_depth_params);
build_clamping_params(stream);
continue;
}
}
*/
if (!pipe_ctx)
return DC_ERROR_UNEXPECTED;
@ -1940,7 +1922,7 @@ bool dcn20_split_stream_for_odm(
return true;
}
bool dcn20_split_stream_for_mpc(
void dcn20_split_stream_for_mpc(
struct resource_context *res_ctx,
const struct resource_pool *pool,
struct pipe_ctx *primary_pipe,
@ -1969,11 +1951,6 @@ bool dcn20_split_stream_for_mpc(
secondary_pipe->top_pipe = primary_pipe;
ASSERT(primary_pipe->plane_state);
if (!resource_build_scaling_params(primary_pipe) ||
!resource_build_scaling_params(secondary_pipe))
return false;
return true;
}
void dcn20_populate_dml_writeback_from_context(
@ -2599,11 +2576,32 @@ static void dcn20_merge_pipes_for_validate(
}
}
int dcn20_find_previous_split_count(struct pipe_ctx *pipe)
{
int previous_split = 1;
struct pipe_ctx *current_pipe = pipe;
while (current_pipe->bottom_pipe) {
if (current_pipe->plane_state != current_pipe->bottom_pipe->plane_state)
break;
previous_split++;
current_pipe = current_pipe->bottom_pipe;
}
current_pipe = pipe;
while (current_pipe->top_pipe) {
if (current_pipe->plane_state != current_pipe->top_pipe->plane_state)
break;
previous_split++;
current_pipe = current_pipe->top_pipe;
}
return previous_split;
}
int dcn20_validate_apply_pipe_split_flags(
struct dc *dc,
struct dc_state *context,
int vlevel,
bool *split,
int *split,
bool *merge)
{
int i, pipe_idx, vlevel_split;
@ -2658,8 +2656,14 @@ int dcn20_validate_apply_pipe_split_flags(
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
if (force_split || context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_plane] > 1)
split[i] = true;
if (force_split
|| context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_plane] > 1) {
if (context->stream_count == 1 && plane_count == 1
&& dc->config.enable_4to1MPC && dc->res_pool->pipe_count >= 4)
split[i] = 4;
else
split[i] = 2;
}
if ((pipe->stream->view_format ==
VIEW_3D_FORMAT_SIDE_BY_SIDE ||
pipe->stream->view_format ==
@ -2668,9 +2672,9 @@ int dcn20_validate_apply_pipe_split_flags(
TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
pipe->stream->timing.timing_3d_format ==
TIMING_3D_FORMAT_SIDE_BY_SIDE))
split[i] = true;
split[i] = 2;
if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
split[i] = true;
split[i] = 2;
context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1;
}
context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] =
@ -2678,39 +2682,58 @@ int dcn20_validate_apply_pipe_split_flags(
if (pipe->prev_odm_pipe && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] != dm_odm_combine_mode_disabled) {
/*Already split odm pipe tree, don't try to split again*/
split[i] = false;
split[pipe->prev_odm_pipe->pipe_idx] = false;
split[i] = 0;
split[pipe->prev_odm_pipe->pipe_idx] = 0;
} else if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state
&& context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) {
/*Already split mpc tree, don't try to split again, assumes only 2x mpc combine*/
split[i] = false;
split[pipe->top_pipe->pipe_idx] = false;
} else if (pipe->prev_odm_pipe || (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state)) {
if (split[i] == false) {
/*If 2 way split but can support 4 way split, then split each pipe again*/
if (context->stream_count == 1 && plane_count == 1
&& dc->config.enable_4to1MPC && dc->res_pool->pipe_count >= 4) {
split[i] = 2;
} else {
split[i] = 0;
split[pipe->top_pipe->pipe_idx] = 0;
}
} else if (pipe->prev_odm_pipe || (dcn20_find_previous_split_count(pipe) == 2 && pipe->top_pipe)) {
if (split[i] == 0) {
/*Exiting mpc/odm combine*/
merge[i] = true;
if (pipe->prev_odm_pipe) {
ASSERT(0); /*should not actually happen yet*/
merge[pipe->prev_odm_pipe->pipe_idx] = true;
} else
merge[pipe->top_pipe->pipe_idx] = true;
} else {
/*Transition from mpc combine to odm combine or vice versa*/
ASSERT(0); /*should not actually happen yet*/
split[i] = true;
split[i] = 2;
merge[i] = true;
if (pipe->prev_odm_pipe) {
split[pipe->prev_odm_pipe->pipe_idx] = true;
split[pipe->prev_odm_pipe->pipe_idx] = 2;
merge[pipe->prev_odm_pipe->pipe_idx] = true;
} else {
split[pipe->top_pipe->pipe_idx] = true;
split[pipe->top_pipe->pipe_idx] = 2;
merge[pipe->top_pipe->pipe_idx] = true;
}
}
} else if (dcn20_find_previous_split_count(pipe) == 3) {
if (split[i] == 0 && !pipe->top_pipe) {
merge[pipe->bottom_pipe->pipe_idx] = true;
merge[pipe->bottom_pipe->bottom_pipe->pipe_idx] = true;
} else if (split[i] == 2 && !pipe->top_pipe) {
merge[pipe->bottom_pipe->bottom_pipe->pipe_idx] = true;
split[i] = 0;
}
} else if (dcn20_find_previous_split_count(pipe) == 4) {
if (split[i] == 0 && !pipe->top_pipe) {
merge[pipe->bottom_pipe->pipe_idx] = true;
merge[pipe->bottom_pipe->bottom_pipe->pipe_idx] = true;
merge[pipe->bottom_pipe->bottom_pipe->bottom_pipe->pipe_idx] = true;
} else if (split[i] == 2 && !pipe->top_pipe) {
merge[pipe->bottom_pipe->bottom_pipe->pipe_idx] = true;
merge[pipe->bottom_pipe->bottom_pipe->bottom_pipe->pipe_idx] = true;
split[i] = 0;
}
}
/* Adjust dppclk when split is forced, do not bother with dispclk */
if (split[i] && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1)
if (split[i] != 0
&& context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1)
context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2;
pipe_idx++;
}
@ -2727,7 +2750,7 @@ bool dcn20_fast_validate_bw(
int *vlevel_out)
{
bool out = false;
bool split[MAX_PIPES] = { false };
int split[MAX_PIPES] = { 0 };
int pipe_cnt, i, pipe_idx, vlevel;
ASSERT(pipes);
@ -2787,7 +2810,7 @@ bool dcn20_fast_validate_bw(
&& context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx])
goto validate_fail;
if (split[i]) {
if (split[i] == 2) {
if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) {
/* pipe not split previously needs split */
hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
@ -2802,11 +2825,13 @@ bool dcn20_fast_validate_bw(
pipe, hsplit_pipe))
goto validate_fail;
dcn20_build_mapped_resource(dc, context, pipe->stream);
} else
if (!dcn20_split_stream_for_mpc(
} else {
dcn20_split_stream_for_mpc(
&context->res_ctx, dc->res_pool,
pipe, hsplit_pipe))
pipe, hsplit_pipe);
if (!resource_build_scaling_params(pipe) || !resource_build_scaling_params(hsplit_pipe))
goto validate_fail;
}
pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
}
} else if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) {
@ -3107,25 +3132,34 @@ validate_out:
return out;
}
bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
bool fast_validate)
/*
* This must be noinline to ensure anything that deals with FP registers
* is contained within this call; previously our compiling with hard-float
* would result in fp instructions being emitted outside of the boundaries
* of the DC_FP_START/END macros, which makes sense as the compiler has no
* idea about what is wrapped and what is not
*
* This is largely just a workaround to avoid breakage introduced with 5.6,
* ideally all fp-using code should be moved into its own file, only that
* should be compiled with hard-float, and all code exported from there
* should be strictly wrapped with DC_FP_START/END
*/
static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc,
struct dc_state *context, bool fast_validate)
{
bool voltage_supported = false;
bool full_pstate_supported = false;
bool dummy_pstate_supported = false;
double p_state_latency_us;
DC_FP_START();
p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us;
context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support =
dc->debug.disable_dram_clock_change_vactive_support;
context->bw_ctx.dml.soc.allow_dram_clock_one_display_vactive =
dc->debug.enable_dram_clock_change_one_display_vactive;
if (fast_validate) {
voltage_supported = dcn20_validate_bandwidth_internal(dc, context, true);
DC_FP_END();
return voltage_supported;
return dcn20_validate_bandwidth_internal(dc, context, true);
}
// Best case, we support full UCLK switch latency
@ -3154,7 +3188,15 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
restore_dml_state:
context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us;
return voltage_supported;
}
bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
bool fast_validate)
{
bool voltage_supported = false;
DC_FP_START();
voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate);
DC_FP_END();
return voltage_supported;
}
@ -3213,8 +3255,6 @@ static struct dc_cap_funcs cap_funcs = {
enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state)
{
enum dc_status result = DC_OK;
enum surface_pixel_format surf_pix_format = plane_state->format;
unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format);
@ -3226,7 +3266,7 @@ enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_stat
swizzle = DC_SW_64KB_S;
plane_state->tiling_info.gfx9.swizzle = swizzle;
return result;
return DC_OK;
}
static struct resource_funcs dcn20_res_pool_funcs = {

View File

@ -119,17 +119,18 @@ void dcn20_set_mcif_arb_params(
display_e2e_pipe_params_st *pipes,
int pipe_cnt);
bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate);
int dcn20_find_previous_split_count(struct pipe_ctx *pipe);
int dcn20_validate_apply_pipe_split_flags(
struct dc *dc,
struct dc_state *context,
int vlevel,
bool *split,
int *split,
bool *merge);
void dcn20_release_dsc(struct resource_context *res_ctx,
const struct resource_pool *pool,
struct display_stream_compressor **dsc);
bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx);
bool dcn20_split_stream_for_mpc(
void dcn20_split_stream_for_mpc(
struct resource_context *res_ctx,
const struct resource_pool *pool,
struct pipe_ctx *primary_pipe,

View File

@ -86,13 +86,9 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
.optimize_pwr_state = dcn21_optimize_pwr_state,
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.set_cursor_position = dcn10_set_cursor_position,
.set_cursor_attribute = dcn10_set_cursor_attribute,
.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
.optimize_pwr_state = dcn21_optimize_pwr_state,
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
.power_down = dce110_power_down,
.set_backlight_level = dce110_set_backlight_level,
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
};
static const struct hwseq_private_funcs dcn21_private_funcs = {

View File

@ -1384,7 +1384,8 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);
struct clk_limit_table *clk_table = &bw_params->clk_table;
struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
unsigned int i, j, closest_clk_lvl;
unsigned int i, closest_clk_lvl;
int j;
// Default clock levels are used for diags, which may lead to overclocking.
if (!IS_DIAG_DC(dc->ctx->dce_environment)) {

View File

@ -2599,18 +2599,40 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
}
}
{
float SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
int PlaneWithMinActiveDRAMClockChangeMargin = -1;
mode_lib->vba.MinActiveDRAMClockChangeMargin = 999999;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k]
< mode_lib->vba.MinActiveDRAMClockChangeMargin) {
mode_lib->vba.MinActiveDRAMClockChangeMargin =
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
if (mode_lib->vba.BlendingAndTiming[k] == k) {
PlaneWithMinActiveDRAMClockChangeMargin = k;
} else {
for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
if (mode_lib->vba.BlendingAndTiming[k] == j) {
PlaneWithMinActiveDRAMClockChangeMargin = j;
}
}
}
}
}
mode_lib->vba.MinActiveDRAMClockChangeLatencySupported =
mode_lib->vba.MinActiveDRAMClockChangeMargin
+ mode_lib->vba.DRAMClockChangeLatency;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (mode_lib->vba.BlendingAndTiming[k] == k))
&& !(mode_lib->vba.BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
&& mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k]
< SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank =
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
}
}
if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
@ -2629,7 +2651,11 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
} else {
if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
if ((mode_lib->vba.SynchronizedVBlank
|| mode_lib->vba.NumberOfActivePlanes == 1
|| (SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0 &&
mode_lib->vba.AllowDramClockChangeOneDisplayVactive))
&& mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vblank;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (!mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k]) {
@ -2641,6 +2667,7 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_unsupported;
}
}
}
for (k = 0; k <= mode_lib->vba.soc.num_states; k++)
for (j = 0; j < 2; j++)
mode_lib->vba.DRAMClockChangeSupport[k][j] = mode_lib->vba.DRAMClockChangeSupport[0][0];

View File

@ -3190,6 +3190,7 @@ static void CalculateFlipSchedule(
double TimeForFetchingRowInVBlankImmediateFlip;
double ImmediateFlipBW;
double HostVMInefficiencyFactor;
double VRatioClamped;
if (GPUVMEnable == true && HostVMEnable == true) {
HostVMInefficiencyFactor =
@ -3222,31 +3223,32 @@ static void CalculateFlipSchedule(
*DestinationLinesToRequestRowInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
*final_flip_bw = dml_max(PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInImmediateFlip * LineTime), (MetaRowBytes + DPTEBytesPerRow) * HostVMInefficiencyFactor / (*DestinationLinesToRequestRowInImmediateFlip * LineTime));
VRatioClamped = (VRatio < 1.0) ? 1.0 : VRatio;
if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
if (GPUVMEnable == true && DCCEnable != true) {
min_row_time = dml_min(
dpte_row_height * LineTime / VRatio,
dpte_row_height_chroma * LineTime / (VRatio / 2));
dpte_row_height * LineTime / VRatioClamped,
dpte_row_height_chroma * LineTime / (VRatioClamped / 2));
} else if (GPUVMEnable != true && DCCEnable == true) {
min_row_time = dml_min(
meta_row_height * LineTime / VRatio,
meta_row_height_chroma * LineTime / (VRatio / 2));
meta_row_height * LineTime / VRatioClamped,
meta_row_height_chroma * LineTime / (VRatioClamped / 2));
} else {
min_row_time = dml_min4(
dpte_row_height * LineTime / VRatio,
meta_row_height * LineTime / VRatio,
dpte_row_height_chroma * LineTime / (VRatio / 2),
meta_row_height_chroma * LineTime / (VRatio / 2));
dpte_row_height * LineTime / VRatioClamped,
meta_row_height * LineTime / VRatioClamped,
dpte_row_height_chroma * LineTime / (VRatioClamped / 2),
meta_row_height_chroma * LineTime / (VRatioClamped / 2));
}
} else {
if (GPUVMEnable == true && DCCEnable != true) {
min_row_time = dpte_row_height * LineTime / VRatio;
min_row_time = dpte_row_height * LineTime / VRatioClamped;
} else if (GPUVMEnable != true && DCCEnable == true) {
min_row_time = meta_row_height * LineTime / VRatio;
min_row_time = meta_row_height * LineTime / VRatioClamped;
} else {
min_row_time = dml_min(
dpte_row_height * LineTime / VRatio,
meta_row_height * LineTime / VRatio);
dpte_row_height * LineTime / VRatioClamped,
meta_row_height * LineTime / VRatioClamped);
}
}

View File

@ -1200,7 +1200,7 @@ static void dml_rq_dlg_get_dlg_params(
min_hratio_fact_l = 1.0;
min_hratio_fact_c = 1.0;
if (htaps_l <= 1)
if (hratio_l <= 1)
min_hratio_fact_l = 2.0;
else if (htaps_l <= 6) {
if ((hratio_l * 2.0) > 4.0)
@ -1216,7 +1216,7 @@ static void dml_rq_dlg_get_dlg_params(
hscale_pixel_rate_l = min_hratio_fact_l * dppclk_freq_in_mhz;
if (htaps_c <= 1)
if (hratio_c <= 1)
min_hratio_fact_c = 2.0;
else if (htaps_c <= 6) {
if ((hratio_c * 2.0) > 4.0)
@ -1533,8 +1533,8 @@ static void dml_rq_dlg_get_dlg_params(
disp_dlg_regs->refcyc_per_vm_group_vblank = get_refcyc_per_vm_group_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
disp_dlg_regs->refcyc_per_vm_group_flip = get_refcyc_per_vm_group_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
disp_dlg_regs->refcyc_per_vm_req_vblank = get_refcyc_per_vm_req_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
disp_dlg_regs->refcyc_per_vm_req_flip = get_refcyc_per_vm_req_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
disp_dlg_regs->refcyc_per_vm_req_vblank = get_refcyc_per_vm_req_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz * dml_pow(2, 10);
disp_dlg_regs->refcyc_per_vm_req_flip = get_refcyc_per_vm_req_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz * dml_pow(2, 10);
// Clamp to max for now
if (disp_dlg_regs->refcyc_per_vm_group_vblank >= (unsigned int)dml_pow(2, 23))

View File

@ -118,6 +118,7 @@ struct _vcs_dpi_soc_bounding_box_st {
double urgent_latency_adjustment_fabric_clock_component_us;
double urgent_latency_adjustment_fabric_clock_reference_mhz;
bool disable_dram_clock_change_vactive_support;
bool allow_dram_clock_one_display_vactive;
};
struct _vcs_dpi_ip_params_st {

View File

@ -224,6 +224,7 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib)
mode_lib->vba.DummyPStateCheck = soc->dram_clock_change_latency_us == soc->dummy_pstate_latency_us;
mode_lib->vba.DRAMClockChangeSupportsVActive = !soc->disable_dram_clock_change_vactive_support ||
mode_lib->vba.DummyPStateCheck;
mode_lib->vba.AllowDramClockChangeOneDisplayVactive = soc->allow_dram_clock_one_display_vactive;
mode_lib->vba.Downspreading = soc->downspread_percent;
mode_lib->vba.DRAMChannelWidth = soc->dram_channel_width_bytes; // new!

View File

@ -899,6 +899,7 @@ struct vba_vars_st {
double BPP;
enum odm_combine_policy ODMCombinePolicy;
bool UseMinimumRequiredDCFCLK;
bool AllowDramClockChangeOneDisplayVactive;
};
bool CalculateMinAndMaxPrefetchMode(

View File

@ -27,27 +27,17 @@
#include "dm_services_types.h"
struct abm_backlight_registers {
unsigned int BL_PWM_CNTL;
unsigned int BL_PWM_CNTL2;
unsigned int BL_PWM_PERIOD_CNTL;
unsigned int LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
};
struct abm {
struct dc_context *ctx;
const struct abm_funcs *funcs;
bool dmcu_is_running;
/* registers setting needs to be saved and restored at InitBacklight */
struct abm_backlight_registers stored_backlight_registers;
};
struct abm_funcs {
void (*abm_init)(struct abm *abm);
void (*abm_init)(struct abm *abm, uint32_t back_light);
bool (*set_abm_level)(struct abm *abm, unsigned int abm_level);
bool (*set_abm_immediate_disable)(struct abm *abm, unsigned int panel_inst);
bool (*set_pipe)(struct abm *abm, unsigned int controller_id, unsigned int panel_inst);
bool (*init_backlight)(struct abm *abm);
/* backlight_pwm_u16_16 is unsigned 32 bit,
* 16 bit integer + 16 fractional, where 1.0 is max backlight value.
@ -56,8 +46,7 @@ struct abm_funcs {
unsigned int backlight_pwm_u16_16,
unsigned int frame_ramp,
unsigned int controller_id,
unsigned int panel_inst,
bool fw_set_brightness);
unsigned int panel_inst);
unsigned int (*get_current_backlight)(struct abm *abm);
unsigned int (*get_target_backlight)(struct abm *abm);

View File

@ -32,11 +32,23 @@
#include "dc_types.h"
#define MAX_BACKLIGHT_LEVEL 0xFFFF
struct panel_cntl_backlight_registers {
unsigned int BL_PWM_CNTL;
unsigned int BL_PWM_CNTL2;
unsigned int BL_PWM_PERIOD_CNTL;
unsigned int LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
};
struct panel_cntl_funcs {
void (*destroy)(struct panel_cntl **panel_cntl);
void (*hw_init)(struct panel_cntl *panel_cntl);
uint32_t (*hw_init)(struct panel_cntl *panel_cntl);
bool (*is_panel_backlight_on)(struct panel_cntl *panel_cntl);
bool (*is_panel_powered_on)(struct panel_cntl *panel_cntl);
void (*store_backlight_level)(struct panel_cntl *panel_cntl);
void (*driver_set_backlight)(struct panel_cntl *panel_cntl,
uint32_t backlight_pwm_u16_16);
};
struct panel_cntl_init_data {
@ -48,6 +60,8 @@ struct panel_cntl {
const struct panel_cntl_funcs *funcs;
struct dc_context *ctx;
uint32_t inst;
/* registers setting needs to be saved and restored at InitBacklight */
struct panel_cntl_backlight_registers stored_backlight_registers;
};
#endif /* DC_PANEL_CNTL_H_ */

View File

@ -196,6 +196,8 @@ struct hw_sequencer_funcs {
uint32_t backlight_pwm_u16_16,
uint32_t frame_ramp);
void (*set_abm_immediate_disable)(struct pipe_ctx *pipe_ctx);
};

View File

@ -138,9 +138,6 @@ struct pipe_ctx *find_idle_secondary_pipe(
const struct resource_pool *pool,
const struct pipe_ctx *primary_pipe);
bool resource_is_stream_unchanged(
struct dc_state *old_context, struct dc_stream_state *stream);
bool resource_validate_attach_surfaces(
const struct dc_validation_set set[],
int set_count,

View File

@ -108,7 +108,7 @@
#define ASSERT(expr) ASSERT_CRITICAL(expr)
#else
#define ASSERT(expr) WARN_ON(!(expr))
#define ASSERT(expr) WARN_ON_ONCE(!(expr))
#endif
#define BREAK_TO_DEBUGGER() ASSERT(0)

View File

@ -64,10 +64,11 @@
* other component within DAL.
*/
#include "dmub_types.h"
#include "dmub_cmd.h"
#include "dmub_gpint_cmd.h"
#include "dmub_rb.h"
#include "inc/dmub_types.h"
#include "inc/dmub_cmd.h"
#include "inc/dmub_gpint_cmd.h"
#include "inc/dmub_cmd_dal.h"
#include "inc/dmub_rb.h"
#if defined(__cplusplus)
extern "C" {

View File

@ -262,6 +262,7 @@ struct dmub_cmd_abm_set_pipe_data {
uint32_t ramping_boundary;
uint32_t otg_inst;
uint32_t panel_inst;
uint32_t set_pipe_option;
};
struct dmub_rb_cmd_abm_set_pipe {

View File

@ -23,7 +23,7 @@
*
*/
#include "../inc/dmub_srv.h"
#include "../dmub_srv.h"
#include "dmub_reg.h"
#include "dmub_dcn20.h"

View File

@ -23,7 +23,7 @@
*
*/
#include "../inc/dmub_srv.h"
#include "../dmub_srv.h"
#include "dmub_reg.h"
#include "dmub_dcn21.h"

View File

@ -24,7 +24,7 @@
*/
#include "dmub_reg.h"
#include "../inc/dmub_srv.h"
#include "../dmub_srv.h"
struct dmub_reg_value_masks {
uint32_t value;

View File

@ -23,7 +23,7 @@
*
*/
#include "../inc/dmub_srv.h"
#include "../dmub_srv.h"
#include "dmub_dcn20.h"
#include "dmub_dcn21.h"
#include "dmub_fw_meta.h"

View File

@ -29,7 +29,6 @@
#include "mod_shared.h"
#include "mod_freesync.h"
#include "dc.h"
#include "dmub/inc/dmub_cmd_dal.h"
enum vsc_packet_revision {
vsc_packet_undefined = 0,
@ -145,7 +144,7 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
}
/*VSC packet set to 2 when DP revision >= 1.2*/
if (stream->link->psr_settings.psr_version != PSR_VERSION_UNSUPPORTED)
if (stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED)
vsc_packet_revision = vsc_packet_rev2;
/* Update to revision 5 for extended colorimetry support */

View File

@ -1,448 +0,0 @@
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "mod_stats.h"
#include "dm_services.h"
#include "dc.h"
#include "core_types.h"
#define DAL_STATS_ENABLE_REGKEY "DalStatsEnable"
#define DAL_STATS_ENABLE_REGKEY_DEFAULT 0x00000000
#define DAL_STATS_ENABLE_REGKEY_ENABLED 0x00000001
#define DAL_STATS_ENTRIES_REGKEY "DalStatsEntries"
#define DAL_STATS_ENTRIES_REGKEY_DEFAULT 0x00350000
#define DAL_STATS_ENTRIES_REGKEY_MAX 0x01000000
#define DAL_STATS_EVENT_ENTRIES_DEFAULT 0x00000100
#define MOD_STATS_NUM_VSYNCS 5
#define MOD_STATS_EVENT_STRING_MAX 512
struct stats_time_cache {
unsigned int entry_id;
unsigned long flip_timestamp_in_ns;
unsigned long vupdate_timestamp_in_ns;
unsigned int render_time_in_us;
unsigned int avg_render_time_in_us_last_ten;
unsigned int v_sync_time_in_us[MOD_STATS_NUM_VSYNCS];
unsigned int num_vsync_between_flips;
unsigned int flip_to_vsync_time_in_us;
unsigned int vsync_to_flip_time_in_us;
unsigned int min_window;
unsigned int max_window;
unsigned int v_total_min;
unsigned int v_total_max;
unsigned int event_triggers;
unsigned int lfc_mid_point_in_us;
unsigned int num_frames_inserted;
unsigned int inserted_duration_in_us;
unsigned int flags;
};
struct stats_event_cache {
unsigned int entry_id;
char event_string[MOD_STATS_EVENT_STRING_MAX];
};
struct core_stats {
struct mod_stats public;
struct dc *dc;
bool enabled;
unsigned int entries;
unsigned int event_entries;
unsigned int entry_id;
struct stats_time_cache *time;
unsigned int index;
struct stats_event_cache *events;
unsigned int event_index;
};
#define MOD_STATS_TO_CORE(mod_stats)\
container_of(mod_stats, struct core_stats, public)
bool mod_stats_init(struct mod_stats *mod_stats)
{
bool result = false;
struct core_stats *core_stats = NULL;
struct dc *dc = NULL;
if (mod_stats == NULL)
return false;
core_stats = MOD_STATS_TO_CORE(mod_stats);
dc = core_stats->dc;
return result;
}
struct mod_stats *mod_stats_create(struct dc *dc)
{
struct core_stats *core_stats = NULL;
struct persistent_data_flag flag;
unsigned int reg_data;
int i = 0;
if (dc == NULL)
goto fail_construct;
core_stats = kzalloc(sizeof(struct core_stats), GFP_KERNEL);
if (core_stats == NULL)
goto fail_construct;
core_stats->dc = dc;
core_stats->enabled = DAL_STATS_ENABLE_REGKEY_DEFAULT;
if (dm_read_persistent_data(dc->ctx, NULL, NULL,
DAL_STATS_ENABLE_REGKEY,
&reg_data, sizeof(unsigned int), &flag))
core_stats->enabled = reg_data;
if (core_stats->enabled) {
core_stats->entries = DAL_STATS_ENTRIES_REGKEY_DEFAULT;
if (dm_read_persistent_data(dc->ctx, NULL, NULL,
DAL_STATS_ENTRIES_REGKEY,
&reg_data, sizeof(unsigned int), &flag)) {
if (reg_data > DAL_STATS_ENTRIES_REGKEY_MAX)
core_stats->entries = DAL_STATS_ENTRIES_REGKEY_MAX;
else
core_stats->entries = reg_data;
}
core_stats->time = kcalloc(core_stats->entries,
sizeof(struct stats_time_cache),
GFP_KERNEL);
if (core_stats->time == NULL)
goto fail_construct_time;
core_stats->event_entries = DAL_STATS_EVENT_ENTRIES_DEFAULT;
core_stats->events = kcalloc(core_stats->event_entries,
sizeof(struct stats_event_cache),
GFP_KERNEL);
if (core_stats->events == NULL)
goto fail_construct_events;
} else {
core_stats->entries = 0;
}
/* Purposely leave index 0 unused so we don't need special logic to
* handle calculation cases that depend on previous flip data.
*/
core_stats->index = 1;
core_stats->event_index = 0;
// Keeps track of ordering within the different stats structures
core_stats->entry_id = 0;
return &core_stats->public;
fail_construct_events:
kfree(core_stats->time);
fail_construct_time:
kfree(core_stats);
fail_construct:
return NULL;
}
void mod_stats_destroy(struct mod_stats *mod_stats)
{
if (mod_stats != NULL) {
struct core_stats *core_stats = MOD_STATS_TO_CORE(mod_stats);
kfree(core_stats->time);
kfree(core_stats->events);
kfree(core_stats);
}
}
void mod_stats_dump(struct mod_stats *mod_stats)
{
struct dc *dc = NULL;
struct dal_logger *logger = NULL;
struct core_stats *core_stats = NULL;
struct stats_time_cache *time = NULL;
struct stats_event_cache *events = NULL;
unsigned int time_index = 1;
unsigned int event_index = 0;
unsigned int index = 0;
struct log_entry log_entry;
if (mod_stats == NULL)
return;
core_stats = MOD_STATS_TO_CORE(mod_stats);
dc = core_stats->dc;
logger = dc->ctx->logger;
time = core_stats->time;
events = core_stats->events;
DISPLAY_STATS_BEGIN(log_entry);
DISPLAY_STATS("==Display Caps==\n");
DISPLAY_STATS("==Display Stats==\n");
DISPLAY_STATS("%10s %10s %10s %10s %10s"
" %11s %11s %17s %10s %14s"
" %10s %10s %10s %10s %10s"
" %10s %10s %10s %10s\n",
"render", "avgRender",
"minWindow", "midPoint", "maxWindow",
"vsyncToFlip", "flipToVsync", "vsyncsBetweenFlip",
"numFrame", "insertDuration",
"vTotalMin", "vTotalMax", "eventTrigs",
"vSyncTime1", "vSyncTime2", "vSyncTime3",
"vSyncTime4", "vSyncTime5", "flags");
for (int i = 0; i < core_stats->entry_id; i++) {
if (event_index < core_stats->event_index &&
i == events[event_index].entry_id) {
DISPLAY_STATS("==Event==%s\n", events[event_index].event_string);
event_index++;
} else if (time_index < core_stats->index &&
i == time[time_index].entry_id) {
DISPLAY_STATS("%10u %10u %10u %10u %10u"
" %11u %11u %17u %10u %14u"
" %10u %10u %10u %10u %10u"
" %10u %10u %10u %10u\n",
time[time_index].render_time_in_us,
time[time_index].avg_render_time_in_us_last_ten,
time[time_index].min_window,
time[time_index].lfc_mid_point_in_us,
time[time_index].max_window,
time[time_index].vsync_to_flip_time_in_us,
time[time_index].flip_to_vsync_time_in_us,
time[time_index].num_vsync_between_flips,
time[time_index].num_frames_inserted,
time[time_index].inserted_duration_in_us,
time[time_index].v_total_min,
time[time_index].v_total_max,
time[time_index].event_triggers,
time[time_index].v_sync_time_in_us[0],
time[time_index].v_sync_time_in_us[1],
time[time_index].v_sync_time_in_us[2],
time[time_index].v_sync_time_in_us[3],
time[time_index].v_sync_time_in_us[4],
time[time_index].flags);
time_index++;
}
}
DISPLAY_STATS_END(log_entry);
}
void mod_stats_reset_data(struct mod_stats *mod_stats)
{
struct core_stats *core_stats = NULL;
struct stats_time_cache *time = NULL;
unsigned int index = 0;
if (mod_stats == NULL)
return;
core_stats = MOD_STATS_TO_CORE(mod_stats);
memset(core_stats->time, 0,
sizeof(struct stats_time_cache) * core_stats->entries);
memset(core_stats->events, 0,
sizeof(struct stats_event_cache) * core_stats->event_entries);
core_stats->index = 1;
core_stats->event_index = 0;
// Keeps track of ordering within the different stats structures
core_stats->entry_id = 0;
}
void mod_stats_update_event(struct mod_stats *mod_stats,
char *event_string,
unsigned int length)
{
struct core_stats *core_stats = NULL;
struct stats_event_cache *events = NULL;
unsigned int index = 0;
unsigned int copy_length = 0;
if (mod_stats == NULL)
return;
core_stats = MOD_STATS_TO_CORE(mod_stats);
if (core_stats->event_index >= core_stats->event_entries)
return;
events = core_stats->events;
index = core_stats->event_index;
copy_length = length;
if (length > MOD_STATS_EVENT_STRING_MAX)
copy_length = MOD_STATS_EVENT_STRING_MAX;
memcpy(&events[index].event_string, event_string, copy_length);
events[index].event_string[copy_length - 1] = '\0';
events[index].entry_id = core_stats->entry_id;
core_stats->event_index++;
core_stats->entry_id++;
}
void mod_stats_update_flip(struct mod_stats *mod_stats,
unsigned long timestamp_in_ns)
{
struct core_stats *core_stats = NULL;
struct stats_time_cache *time = NULL;
unsigned int index = 0;
if (mod_stats == NULL)
return;
core_stats = MOD_STATS_TO_CORE(mod_stats);
if (core_stats->index >= core_stats->entries)
return;
time = core_stats->time;
index = core_stats->index;
time[index].flip_timestamp_in_ns = timestamp_in_ns;
time[index].render_time_in_us =
(timestamp_in_ns - time[index - 1].flip_timestamp_in_ns) / 1000;
if (index >= 10) {
for (unsigned int i = 0; i < 10; i++)
time[index].avg_render_time_in_us_last_ten +=
time[index - i].render_time_in_us;
time[index].avg_render_time_in_us_last_ten /= 10;
}
if (time[index].num_vsync_between_flips > 0)
time[index].vsync_to_flip_time_in_us =
(timestamp_in_ns -
time[index].vupdate_timestamp_in_ns) / 1000;
else
time[index].vsync_to_flip_time_in_us =
(timestamp_in_ns -
time[index - 1].vupdate_timestamp_in_ns) / 1000;
time[index].entry_id = core_stats->entry_id;
core_stats->index++;
core_stats->entry_id++;
}
void mod_stats_update_vupdate(struct mod_stats *mod_stats,
unsigned long timestamp_in_ns)
{
struct core_stats *core_stats = NULL;
struct stats_time_cache *time = NULL;
unsigned int index = 0;
unsigned int num_vsyncs = 0;
unsigned int prev_vsync_in_ns = 0;
if (mod_stats == NULL)
return;
core_stats = MOD_STATS_TO_CORE(mod_stats);
if (core_stats->index >= core_stats->entries)
return;
time = core_stats->time;
index = core_stats->index;
num_vsyncs = time[index].num_vsync_between_flips;
if (num_vsyncs < MOD_STATS_NUM_VSYNCS) {
if (num_vsyncs == 0) {
prev_vsync_in_ns =
time[index - 1].vupdate_timestamp_in_ns;
time[index].flip_to_vsync_time_in_us =
(timestamp_in_ns -
time[index - 1].flip_timestamp_in_ns) /
1000;
} else {
prev_vsync_in_ns =
time[index].vupdate_timestamp_in_ns;
}
time[index].v_sync_time_in_us[num_vsyncs] =
(timestamp_in_ns - prev_vsync_in_ns) / 1000;
}
time[index].vupdate_timestamp_in_ns = timestamp_in_ns;
time[index].num_vsync_between_flips++;
}
void mod_stats_update_freesync(struct mod_stats *mod_stats,
unsigned int v_total_min,
unsigned int v_total_max,
unsigned int event_triggers,
unsigned int window_min,
unsigned int window_max,
unsigned int lfc_mid_point_in_us,
unsigned int inserted_frames,
unsigned int inserted_duration_in_us)
{
struct core_stats *core_stats = NULL;
struct stats_time_cache *time = NULL;
unsigned int index = 0;
if (mod_stats == NULL)
return;
core_stats = MOD_STATS_TO_CORE(mod_stats);
if (core_stats->index >= core_stats->entries)
return;
time = core_stats->time;
index = core_stats->index;
time[index].v_total_min = v_total_min;
time[index].v_total_max = v_total_max;
time[index].event_triggers = event_triggers;
time[index].min_window = window_min;
time[index].max_window = window_max;
time[index].lfc_mid_point_in_us = lfc_mid_point_in_us;
time[index].num_frames_inserted = inserted_frames;
time[index].inserted_duration_in_us = inserted_duration_in_us;
}

View File

@ -5599,6 +5599,7 @@
#define GRBM_PWR_CNTL__ALL_REQ_EN_MASK 0x00008000L
//GRBM_STATUS
#define GRBM_STATUS__ME0PIPE0_CMDFIFO_AVAIL__SHIFT 0x0
#define GRBM_STATUS__RSMU_RQ_PENDING__SHIFT 0x5
#define GRBM_STATUS__ME0PIPE0_CF_RQ_PENDING__SHIFT 0x7
#define GRBM_STATUS__ME0PIPE0_PF_RQ_PENDING__SHIFT 0x8
#define GRBM_STATUS__GDS_DMA_RQ_PENDING__SHIFT 0x9
@ -5619,6 +5620,7 @@
#define GRBM_STATUS__CB_BUSY__SHIFT 0x1e
#define GRBM_STATUS__GUI_ACTIVE__SHIFT 0x1f
#define GRBM_STATUS__ME0PIPE0_CMDFIFO_AVAIL_MASK 0x0000000FL
#define GRBM_STATUS__RSMU_RQ_PENDING_MASK 0x00000020L
#define GRBM_STATUS__ME0PIPE0_CF_RQ_PENDING_MASK 0x00000080L
#define GRBM_STATUS__ME0PIPE0_PF_RQ_PENDING_MASK 0x00000100L
#define GRBM_STATUS__GDS_DMA_RQ_PENDING_MASK 0x00000200L
@ -5832,6 +5834,7 @@
#define GRBM_READ_ERROR__READ_ERROR_MASK 0x80000000L
//GRBM_READ_ERROR2
#define GRBM_READ_ERROR2__READ_REQUESTER_CPF__SHIFT 0x10
#define GRBM_READ_ERROR2__READ_REQUESTER_RSMU__SHIFT 0x11
#define GRBM_READ_ERROR2__READ_REQUESTER_RLC__SHIFT 0x12
#define GRBM_READ_ERROR2__READ_REQUESTER_GDS_DMA__SHIFT 0x13
#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_CF__SHIFT 0x14
@ -5847,6 +5850,7 @@
#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE2__SHIFT 0x1e
#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE3__SHIFT 0x1f
#define GRBM_READ_ERROR2__READ_REQUESTER_CPF_MASK 0x00010000L
#define GRBM_READ_ERROR2__READ_REQUESTER_RSMU_MASK 0x00020000L
#define GRBM_READ_ERROR2__READ_REQUESTER_RLC_MASK 0x00040000L
#define GRBM_READ_ERROR2__READ_REQUESTER_GDS_DMA_MASK 0x00080000L
#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_CF_MASK 0x00100000L

View File

@ -972,11 +972,13 @@ struct atom_ext_display_path
};
//usCaps
enum ext_display_path_cap_def
{
EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE =0x0001,
EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN =0x0002,
EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK =0x007C,
enum ext_display_path_cap_def {
EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE = 0x0001,
EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN = 0x0002,
EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK = 0x007C,
EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204 = (0x01 << 2), //PI redriver chip
EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT = (0x02 << 2), //TI retimer chip
EXT_DISPLAY_PATH_CAPS__HDMI20_PARADE_PS175 = (0x03 << 2) //Parade DP->HDMI recoverter chip
};
struct atom_external_display_connection_info

View File

@ -322,12 +322,12 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
if (*level & profile_mode_mask) {
hwmgr->saved_dpm_level = hwmgr->dpm_level;
hwmgr->en_umd_pstate = true;
amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_CG_STATE_UNGATE);
amdgpu_device_ip_set_powergating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_PG_STATE_UNGATE);
amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_CG_STATE_UNGATE);
}
} else {
/* exit umd pstate, restore level, enable gfx cg*/

View File

@ -1498,7 +1498,7 @@ static int smu_disable_dpm(struct smu_context *smu)
bool use_baco = !smu->is_apu &&
((adev->in_gpu_reset &&
(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
(adev->in_runpm && amdgpu_asic_supports_baco(adev)));
((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev)));
ret = smu_get_smc_version(smu, NULL, &smu_version);
if (ret) {
@ -1784,12 +1784,12 @@ static int smu_enable_umd_pstate(void *handle,
if (*level & profile_mode_mask) {
smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
smu_dpm_ctx->enable_umd_pstate = true;
amdgpu_device_ip_set_clockgating_state(smu->adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_CG_STATE_UNGATE);
amdgpu_device_ip_set_powergating_state(smu->adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_PG_STATE_UNGATE);
amdgpu_device_ip_set_clockgating_state(smu->adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_CG_STATE_UNGATE);
}
} else {
/* exit umd pstate, restore level, enable gfx cg*/

View File

@ -2251,7 +2251,7 @@ static bool arcturus_is_baco_supported(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
uint32_t val;
if (!smu_v11_0_baco_is_support(smu))
if (!smu_v11_0_baco_is_support(smu) || amdgpu_sriov_vf(adev))
return false;
val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);

View File

@ -1357,7 +1357,6 @@ static int smu10_asic_reset(struct pp_hwmgr *hwmgr, enum SMU_ASIC_RESET_MODE mod
static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.backend_init = smu10_hwmgr_backend_init,
.backend_fini = smu10_hwmgr_backend_fini,
.asic_setup = NULL,
.apply_state_adjust_rules = smu10_apply_state_adjust_rules,
.force_dpm_level = smu10_dpm_force_dpm_level,
.get_power_state_size = smu10_get_power_state_size,

View File

@ -1293,8 +1293,6 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
}
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
if (size < 0)
return -EINVAL;
ret = smu_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,

View File

@ -1211,8 +1211,7 @@ static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32
SDEBUG("<<\n");
free:
if (ws)
kfree(ectx.ws);
kfree(ectx.ws);
return ret;
}

View File

@ -828,7 +828,7 @@ int radeon_enable_vblank_kms(struct drm_crtc *crtc)
unsigned long irqflags;
int r;
if (pipe < 0 || pipe >= rdev->num_crtc) {
if (pipe >= rdev->num_crtc) {
DRM_ERROR("Invalid crtc %d\n", pipe);
return -EINVAL;
}
@ -854,7 +854,7 @@ void radeon_disable_vblank_kms(struct drm_crtc *crtc)
struct radeon_device *rdev = dev->dev_private;
unsigned long irqflags;
if (pipe < 0 || pipe >= rdev->num_crtc) {
if (pipe >= rdev->num_crtc) {
DRM_ERROR("Invalid crtc %d\n", pipe);
return;
}

View File

@ -251,7 +251,7 @@ struct kfd_memory_exception_failure {
__u32 imprecise; /* Can't determine the exact fault address */
};
/* memory exception data*/
/* memory exception data */
struct kfd_hsa_memory_exception_data {
struct kfd_memory_exception_failure failure;
__u64 va;