Merge tag 'amd-drm-next-5.6-2020-01-09' of git://people.freedesktop.org/~agd5f/linux into drm-next
amd-drm-next-5.6-2020-01-09: amdgpu: - Enable DCN support on POWER - Enable GFXOFF for Raven1 refresh - Clean up MM engine idle handlers - HDMI 2.0 audio fixes - Fixes for some 10 bpc EDP panels - Watermark fixes for renoir - SR-IOV fixes - Runtime pm robustness fixes - Arcturus VCN fixes - RAS fixes - BACO fixes for Arcturus - Stable pstate fixes for swSMU - HDCP fixes - PSP cleanup - HDMI fixes - Misc cleanups amdkfd: - Spread interrupt work across cores to reduce latency - Topology fixes for APUs - GPU reset improvements UAPI: - Enable DRIVER_SYNCOBJ_TIMELINE for vulkan - Return better error values for kfd process ioctl Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200109230338.8022-1-alexander.deucher@amd.com
This commit is contained in:
commit
d5d88cd6ee
|
@ -636,9 +636,8 @@ struct amdgpu_fw_vram_usage {
|
||||||
struct amdgpu_bo *reserved_bo;
|
struct amdgpu_bo *reserved_bo;
|
||||||
void *va;
|
void *va;
|
||||||
|
|
||||||
/* Offset on the top of VRAM, used as c2p write buffer.
|
/* GDDR6 training support flag.
|
||||||
*/
|
*/
|
||||||
u64 mem_train_fb_loc;
|
|
||||||
bool mem_train_support;
|
bool mem_train_support;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -994,8 +993,6 @@ struct amdgpu_device {
|
||||||
|
|
||||||
bool pm_sysfs_en;
|
bool pm_sysfs_en;
|
||||||
bool ucode_sysfs_en;
|
bool ucode_sysfs_en;
|
||||||
|
|
||||||
bool in_baco;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
|
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
|
||||||
|
|
|
@ -46,6 +46,8 @@
|
||||||
#include "soc15.h"
|
#include "soc15.h"
|
||||||
#include "soc15d.h"
|
#include "soc15d.h"
|
||||||
#include "amdgpu_amdkfd_gfx_v9.h"
|
#include "amdgpu_amdkfd_gfx_v9.h"
|
||||||
|
#include "gfxhub_v1_0.h"
|
||||||
|
#include "mmhub_v9_4.h"
|
||||||
|
|
||||||
#define HQD_N_REGS 56
|
#define HQD_N_REGS 56
|
||||||
#define DUMP_REG(addr) do { \
|
#define DUMP_REG(addr) do { \
|
||||||
|
@ -258,6 +260,22 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void kgd_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
|
||||||
|
uint64_t page_table_base)
|
||||||
|
{
|
||||||
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
||||||
|
|
||||||
|
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
|
||||||
|
pr_err("trying to set page table base for wrong VMID %u\n",
|
||||||
|
vmid);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
mmhub_v9_4_setup_vm_pt_regs(adev, vmid, page_table_base);
|
||||||
|
|
||||||
|
gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
|
||||||
|
}
|
||||||
|
|
||||||
const struct kfd2kgd_calls arcturus_kfd2kgd = {
|
const struct kfd2kgd_calls arcturus_kfd2kgd = {
|
||||||
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
|
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
|
||||||
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
|
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
|
||||||
|
@ -277,7 +295,7 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
|
||||||
.get_atc_vmid_pasid_mapping_info =
|
.get_atc_vmid_pasid_mapping_info =
|
||||||
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
|
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
|
||||||
.get_tile_config = kgd_gfx_v9_get_tile_config,
|
.get_tile_config = kgd_gfx_v9_get_tile_config,
|
||||||
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
|
.set_vm_context_page_table_base = kgd_set_vm_context_page_table_base,
|
||||||
.invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs,
|
.invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs,
|
||||||
.invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid,
|
.invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid,
|
||||||
.get_hive_id = amdgpu_amdkfd_get_hive_id,
|
.get_hive_id = amdgpu_amdkfd_get_hive_id,
|
||||||
|
|
|
@ -40,7 +40,6 @@
|
||||||
#include "soc15d.h"
|
#include "soc15d.h"
|
||||||
#include "mmhub_v1_0.h"
|
#include "mmhub_v1_0.h"
|
||||||
#include "gfxhub_v1_0.h"
|
#include "gfxhub_v1_0.h"
|
||||||
#include "mmhub_v9_4.h"
|
|
||||||
|
|
||||||
|
|
||||||
enum hqd_dequeue_request_type {
|
enum hqd_dequeue_request_type {
|
||||||
|
@ -758,8 +757,8 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
|
static void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd,
|
||||||
uint64_t page_table_base)
|
uint32_t vmid, uint64_t page_table_base)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
||||||
|
|
||||||
|
@ -769,13 +768,6 @@ void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmi
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* TODO: take advantage of per-process address space size. For
|
|
||||||
* now, all processes share the same address space size, like
|
|
||||||
* on GFX8 and older.
|
|
||||||
*/
|
|
||||||
if (adev->asic_type == CHIP_ARCTURUS) {
|
|
||||||
mmhub_v9_4_setup_vm_pt_regs(adev, vmid, page_table_base);
|
|
||||||
} else
|
|
||||||
mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
|
mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
|
||||||
|
|
||||||
gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
|
gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
|
||||||
|
|
|
@ -57,8 +57,6 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
|
||||||
|
|
||||||
bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
|
bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
|
||||||
uint8_t vmid, uint16_t *p_pasid);
|
uint8_t vmid, uint16_t *p_pasid);
|
||||||
void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
|
|
||||||
uint64_t page_table_base);
|
|
||||||
int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
|
int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
|
||||||
int kgd_gfx_v9_invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
|
int kgd_gfx_v9_invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
|
||||||
int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd,
|
int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd,
|
||||||
|
|
|
@ -2022,7 +2022,7 @@ int amdgpu_atombios_init(struct amdgpu_device *adev)
|
||||||
if (adev->is_atom_fw) {
|
if (adev->is_atom_fw) {
|
||||||
amdgpu_atomfirmware_scratch_regs_init(adev);
|
amdgpu_atomfirmware_scratch_regs_init(adev);
|
||||||
amdgpu_atomfirmware_allocate_fb_scratch(adev);
|
amdgpu_atomfirmware_allocate_fb_scratch(adev);
|
||||||
ret = amdgpu_atomfirmware_get_mem_train_fb_loc(adev);
|
ret = amdgpu_atomfirmware_get_mem_train_info(adev);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRM_ERROR("Failed to get mem train fb location.\n");
|
DRM_ERROR("Failed to get mem train fb location.\n");
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -525,16 +525,12 @@ static int gddr6_mem_train_support(struct amdgpu_device *adev)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev)
|
int amdgpu_atomfirmware_get_mem_train_info(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct atom_context *ctx = adev->mode_info.atom_context;
|
struct atom_context *ctx = adev->mode_info.atom_context;
|
||||||
unsigned char *bios = ctx->bios;
|
int index;
|
||||||
struct vram_reserve_block *reserved_block;
|
|
||||||
int index, block_number;
|
|
||||||
uint8_t frev, crev;
|
uint8_t frev, crev;
|
||||||
uint16_t data_offset, size;
|
uint16_t data_offset, size;
|
||||||
uint32_t start_address_in_kb;
|
|
||||||
uint64_t offset;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
adev->fw_vram_usage.mem_train_support = false;
|
adev->fw_vram_usage.mem_train_support = false;
|
||||||
|
@ -569,32 +565,6 @@ int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
reserved_block = (struct vram_reserve_block *)
|
|
||||||
(bios + data_offset + sizeof(struct atom_common_table_header));
|
|
||||||
block_number = ((unsigned int)size - sizeof(struct atom_common_table_header))
|
|
||||||
/ sizeof(struct vram_reserve_block);
|
|
||||||
reserved_block += (block_number > 0) ? block_number-1 : 0;
|
|
||||||
DRM_DEBUG("block_number:0x%04x, last block: 0x%08xkb sz, %dkb fw, %dkb drv.\n",
|
|
||||||
block_number,
|
|
||||||
le32_to_cpu(reserved_block->start_address_in_kb),
|
|
||||||
le16_to_cpu(reserved_block->used_by_firmware_in_kb),
|
|
||||||
le16_to_cpu(reserved_block->used_by_driver_in_kb));
|
|
||||||
if (reserved_block->used_by_firmware_in_kb > 0) {
|
|
||||||
start_address_in_kb = le32_to_cpu(reserved_block->start_address_in_kb);
|
|
||||||
offset = (uint64_t)start_address_in_kb * ONE_KiB;
|
|
||||||
if ((offset & (ONE_MiB - 1)) < (4 * ONE_KiB + 1) ) {
|
|
||||||
offset -= ONE_MiB;
|
|
||||||
}
|
|
||||||
|
|
||||||
offset &= ~(ONE_MiB - 1);
|
|
||||||
adev->fw_vram_usage.mem_train_fb_loc = offset;
|
|
||||||
adev->fw_vram_usage.mem_train_support = true;
|
adev->fw_vram_usage.mem_train_support = true;
|
||||||
DRM_DEBUG("mem_train_fb_loc:0x%09llx.\n", offset);
|
return 0;
|
||||||
ret = 0;
|
|
||||||
} else {
|
|
||||||
DRM_ERROR("used_by_firmware_in_kb is 0!\n");
|
|
||||||
ret = -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,7 +31,7 @@ void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev);
|
||||||
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
|
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
|
||||||
int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
|
int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
|
||||||
int *vram_width, int *vram_type, int *vram_vendor);
|
int *vram_width, int *vram_type, int *vram_vendor);
|
||||||
int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev);
|
int amdgpu_atomfirmware_get_mem_train_info(struct amdgpu_device *adev);
|
||||||
int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
|
int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
|
||||||
int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
|
int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
|
||||||
bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev);
|
bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev);
|
||||||
|
|
|
@ -613,7 +613,17 @@ static bool amdgpu_atpx_detect(void)
|
||||||
bool d3_supported = false;
|
bool d3_supported = false;
|
||||||
struct pci_dev *parent_pdev;
|
struct pci_dev *parent_pdev;
|
||||||
|
|
||||||
while ((pdev = pci_get_class(PCI_BASE_CLASS_DISPLAY << 16, pdev)) != NULL) {
|
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
|
||||||
|
vga_count++;
|
||||||
|
|
||||||
|
has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
|
||||||
|
|
||||||
|
parent_pdev = pci_upstream_bridge(pdev);
|
||||||
|
d3_supported |= parent_pdev && parent_pdev->bridge_d3;
|
||||||
|
amdgpu_atpx_get_quirks(pdev);
|
||||||
|
}
|
||||||
|
|
||||||
|
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
|
||||||
vga_count++;
|
vga_count++;
|
||||||
|
|
||||||
has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
|
has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
|
||||||
|
|
|
@ -74,7 +74,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
|
||||||
struct amdgpu_ctx *ctx)
|
struct amdgpu_ctx *ctx)
|
||||||
{
|
{
|
||||||
unsigned num_entities = amdgpu_ctx_total_num_entities();
|
unsigned num_entities = amdgpu_ctx_total_num_entities();
|
||||||
unsigned i, j, k;
|
unsigned i, j;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
|
if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
|
||||||
|
@ -121,72 +121,57 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
|
||||||
ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
|
ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
|
||||||
|
|
||||||
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
|
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
|
||||||
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
|
struct drm_gpu_scheduler **scheds;
|
||||||
struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
|
struct drm_gpu_scheduler *sched;
|
||||||
unsigned num_rings = 0;
|
unsigned num_scheds = 0;
|
||||||
unsigned num_rqs = 0;
|
|
||||||
|
|
||||||
switch (i) {
|
switch (i) {
|
||||||
case AMDGPU_HW_IP_GFX:
|
case AMDGPU_HW_IP_GFX:
|
||||||
rings[0] = &adev->gfx.gfx_ring[0];
|
sched = &adev->gfx.gfx_ring[0].sched;
|
||||||
num_rings = 1;
|
scheds = &sched;
|
||||||
|
num_scheds = 1;
|
||||||
break;
|
break;
|
||||||
case AMDGPU_HW_IP_COMPUTE:
|
case AMDGPU_HW_IP_COMPUTE:
|
||||||
for (j = 0; j < adev->gfx.num_compute_rings; ++j)
|
scheds = adev->gfx.compute_sched;
|
||||||
rings[j] = &adev->gfx.compute_ring[j];
|
num_scheds = adev->gfx.num_compute_sched;
|
||||||
num_rings = adev->gfx.num_compute_rings;
|
|
||||||
break;
|
break;
|
||||||
case AMDGPU_HW_IP_DMA:
|
case AMDGPU_HW_IP_DMA:
|
||||||
for (j = 0; j < adev->sdma.num_instances; ++j)
|
scheds = adev->sdma.sdma_sched;
|
||||||
rings[j] = &adev->sdma.instance[j].ring;
|
num_scheds = adev->sdma.num_sdma_sched;
|
||||||
num_rings = adev->sdma.num_instances;
|
|
||||||
break;
|
break;
|
||||||
case AMDGPU_HW_IP_UVD:
|
case AMDGPU_HW_IP_UVD:
|
||||||
rings[0] = &adev->uvd.inst[0].ring;
|
sched = &adev->uvd.inst[0].ring.sched;
|
||||||
num_rings = 1;
|
scheds = &sched;
|
||||||
|
num_scheds = 1;
|
||||||
break;
|
break;
|
||||||
case AMDGPU_HW_IP_VCE:
|
case AMDGPU_HW_IP_VCE:
|
||||||
rings[0] = &adev->vce.ring[0];
|
sched = &adev->vce.ring[0].sched;
|
||||||
num_rings = 1;
|
scheds = &sched;
|
||||||
|
num_scheds = 1;
|
||||||
break;
|
break;
|
||||||
case AMDGPU_HW_IP_UVD_ENC:
|
case AMDGPU_HW_IP_UVD_ENC:
|
||||||
rings[0] = &adev->uvd.inst[0].ring_enc[0];
|
sched = &adev->uvd.inst[0].ring_enc[0].sched;
|
||||||
num_rings = 1;
|
scheds = &sched;
|
||||||
|
num_scheds = 1;
|
||||||
break;
|
break;
|
||||||
case AMDGPU_HW_IP_VCN_DEC:
|
case AMDGPU_HW_IP_VCN_DEC:
|
||||||
for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
|
scheds = adev->vcn.vcn_dec_sched;
|
||||||
if (adev->vcn.harvest_config & (1 << j))
|
num_scheds = adev->vcn.num_vcn_dec_sched;
|
||||||
continue;
|
|
||||||
rings[num_rings++] = &adev->vcn.inst[j].ring_dec;
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case AMDGPU_HW_IP_VCN_ENC:
|
case AMDGPU_HW_IP_VCN_ENC:
|
||||||
for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
|
scheds = adev->vcn.vcn_enc_sched;
|
||||||
if (adev->vcn.harvest_config & (1 << j))
|
num_scheds = adev->vcn.num_vcn_enc_sched;
|
||||||
continue;
|
|
||||||
for (k = 0; k < adev->vcn.num_enc_rings; ++k)
|
|
||||||
rings[num_rings++] = &adev->vcn.inst[j].ring_enc[k];
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case AMDGPU_HW_IP_VCN_JPEG:
|
case AMDGPU_HW_IP_VCN_JPEG:
|
||||||
for (j = 0; j < adev->jpeg.num_jpeg_inst; ++j) {
|
scheds = adev->jpeg.jpeg_sched;
|
||||||
if (adev->jpeg.harvest_config & (1 << j))
|
num_scheds = adev->jpeg.num_jpeg_sched;
|
||||||
continue;
|
|
||||||
rings[num_rings++] = &adev->jpeg.inst[j].ring_dec;
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (j = 0; j < num_rings; ++j) {
|
|
||||||
if (!rings[j]->adev)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
rqs[num_rqs++] = &rings[j]->sched.sched_rq[priority];
|
|
||||||
}
|
|
||||||
|
|
||||||
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
|
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
|
||||||
r = drm_sched_entity_init(&ctx->entities[i][j].entity,
|
r = drm_sched_entity_init(&ctx->entities[i][j].entity,
|
||||||
rqs, num_rqs, &ctx->guilty);
|
priority, scheds,
|
||||||
|
num_scheds, &ctx->guilty);
|
||||||
if (r)
|
if (r)
|
||||||
goto error_cleanup_entities;
|
goto error_cleanup_entities;
|
||||||
}
|
}
|
||||||
|
@ -627,3 +612,45 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
|
||||||
idr_destroy(&mgr->ctx_handles);
|
idr_destroy(&mgr->ctx_handles);
|
||||||
mutex_destroy(&mgr->lock);
|
mutex_destroy(&mgr->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
int i, j;
|
||||||
|
|
||||||
|
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
|
||||||
|
adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
|
||||||
|
adev->gfx.num_gfx_sched++;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
||||||
|
adev->gfx.compute_sched[i] = &adev->gfx.compute_ring[i].sched;
|
||||||
|
adev->gfx.num_compute_sched++;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||||
|
adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
|
||||||
|
adev->sdma.num_sdma_sched++;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
|
||||||
|
if (adev->vcn.harvest_config & (1 << i))
|
||||||
|
continue;
|
||||||
|
adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] =
|
||||||
|
&adev->vcn.inst[i].ring_dec.sched;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
|
||||||
|
if (adev->vcn.harvest_config & (1 << i))
|
||||||
|
continue;
|
||||||
|
for (j = 0; j < adev->vcn.num_enc_rings; ++j)
|
||||||
|
adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] =
|
||||||
|
&adev->vcn.inst[i].ring_enc[j].sched;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
|
||||||
|
if (adev->jpeg.harvest_config & (1 << i))
|
||||||
|
continue;
|
||||||
|
adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] =
|
||||||
|
&adev->jpeg.inst[i].ring_dec.sched;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -87,4 +87,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
|
||||||
long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
|
long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
|
||||||
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
|
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
|
||||||
|
|
||||||
|
void amdgpu_ctx_init_sched(struct amdgpu_device *adev);
|
||||||
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -129,7 +129,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
|
||||||
sh_bank = 0xFFFFFFFF;
|
sh_bank = 0xFFFFFFFF;
|
||||||
if (instance_bank == 0x3FF)
|
if (instance_bank == 0x3FF)
|
||||||
instance_bank = 0xFFFFFFFF;
|
instance_bank = 0xFFFFFFFF;
|
||||||
use_bank = 1;
|
use_bank = true;
|
||||||
} else if (*pos & (1ULL << 61)) {
|
} else if (*pos & (1ULL << 61)) {
|
||||||
|
|
||||||
me = (*pos & GENMASK_ULL(33, 24)) >> 24;
|
me = (*pos & GENMASK_ULL(33, 24)) >> 24;
|
||||||
|
@ -137,9 +137,9 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
|
||||||
queue = (*pos & GENMASK_ULL(53, 44)) >> 44;
|
queue = (*pos & GENMASK_ULL(53, 44)) >> 44;
|
||||||
vmid = (*pos & GENMASK_ULL(58, 54)) >> 54;
|
vmid = (*pos & GENMASK_ULL(58, 54)) >> 54;
|
||||||
|
|
||||||
use_ring = 1;
|
use_ring = true;
|
||||||
} else {
|
} else {
|
||||||
use_bank = use_ring = 0;
|
use_bank = use_ring = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
*pos &= (1UL << 22) - 1;
|
*pos &= (1UL << 22) - 1;
|
||||||
|
|
|
@ -66,6 +66,7 @@
|
||||||
#include "amdgpu_pmu.h"
|
#include "amdgpu_pmu.h"
|
||||||
|
|
||||||
#include <linux/suspend.h>
|
#include <linux/suspend.h>
|
||||||
|
#include <drm/task_barrier.h>
|
||||||
|
|
||||||
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
|
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
|
||||||
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
|
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
|
||||||
|
@ -1031,8 +1032,6 @@ def_value:
|
||||||
*/
|
*/
|
||||||
static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
|
static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
if (amdgpu_sched_jobs < 4) {
|
if (amdgpu_sched_jobs < 4) {
|
||||||
dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
|
dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
|
||||||
amdgpu_sched_jobs);
|
amdgpu_sched_jobs);
|
||||||
|
@ -1072,7 +1071,7 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
|
||||||
|
|
||||||
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
|
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
|
||||||
|
|
||||||
return ret;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1810,6 +1809,7 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
|
||||||
r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
|
r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
|
@ -2439,7 +2439,8 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
|
||||||
AMD_IP_BLOCK_TYPE_GFX,
|
AMD_IP_BLOCK_TYPE_GFX,
|
||||||
AMD_IP_BLOCK_TYPE_SDMA,
|
AMD_IP_BLOCK_TYPE_SDMA,
|
||||||
AMD_IP_BLOCK_TYPE_UVD,
|
AMD_IP_BLOCK_TYPE_UVD,
|
||||||
AMD_IP_BLOCK_TYPE_VCE
|
AMD_IP_BLOCK_TYPE_VCE,
|
||||||
|
AMD_IP_BLOCK_TYPE_VCN
|
||||||
};
|
};
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
|
for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
|
||||||
|
@ -2454,7 +2455,11 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
|
||||||
block->status.hw)
|
block->status.hw)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
|
||||||
|
r = block->version->funcs->resume(adev);
|
||||||
|
else
|
||||||
r = block->version->funcs->hw_init(adev);
|
r = block->version->funcs->hw_init(adev);
|
||||||
|
|
||||||
DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
|
DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
@ -2663,14 +2668,38 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev =
|
struct amdgpu_device *adev =
|
||||||
container_of(__work, struct amdgpu_device, xgmi_reset_work);
|
container_of(__work, struct amdgpu_device, xgmi_reset_work);
|
||||||
|
struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
|
||||||
|
|
||||||
if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)
|
/* It's a bug to not have a hive within this function */
|
||||||
adev->asic_reset_res = (adev->in_baco == false) ?
|
if (WARN_ON(!hive))
|
||||||
amdgpu_device_baco_enter(adev->ddev) :
|
return;
|
||||||
amdgpu_device_baco_exit(adev->ddev);
|
|
||||||
else
|
/*
|
||||||
|
* Use task barrier to synchronize all xgmi reset works across the
|
||||||
|
* hive. task_barrier_enter and task_barrier_exit will block
|
||||||
|
* until all the threads running the xgmi reset works reach
|
||||||
|
* those points. task_barrier_full will do both blocks.
|
||||||
|
*/
|
||||||
|
if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
|
||||||
|
|
||||||
|
task_barrier_enter(&hive->tb);
|
||||||
|
adev->asic_reset_res = amdgpu_device_baco_enter(adev->ddev);
|
||||||
|
|
||||||
|
if (adev->asic_reset_res)
|
||||||
|
goto fail;
|
||||||
|
|
||||||
|
task_barrier_exit(&hive->tb);
|
||||||
|
adev->asic_reset_res = amdgpu_device_baco_exit(adev->ddev);
|
||||||
|
|
||||||
|
if (adev->asic_reset_res)
|
||||||
|
goto fail;
|
||||||
|
} else {
|
||||||
|
|
||||||
|
task_barrier_full(&hive->tb);
|
||||||
adev->asic_reset_res = amdgpu_asic_reset(adev);
|
adev->asic_reset_res = amdgpu_asic_reset(adev);
|
||||||
|
}
|
||||||
|
|
||||||
|
fail:
|
||||||
if (adev->asic_reset_res)
|
if (adev->asic_reset_res)
|
||||||
DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
|
DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
|
||||||
adev->asic_reset_res, adev->ddev->unique);
|
adev->asic_reset_res, adev->ddev->unique);
|
||||||
|
@ -2785,7 +2814,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||||
adev->mman.buffer_funcs = NULL;
|
adev->mman.buffer_funcs = NULL;
|
||||||
adev->mman.buffer_funcs_ring = NULL;
|
adev->mman.buffer_funcs_ring = NULL;
|
||||||
adev->vm_manager.vm_pte_funcs = NULL;
|
adev->vm_manager.vm_pte_funcs = NULL;
|
||||||
adev->vm_manager.vm_pte_num_rqs = 0;
|
adev->vm_manager.vm_pte_num_scheds = 0;
|
||||||
adev->gmc.gmc_funcs = NULL;
|
adev->gmc.gmc_funcs = NULL;
|
||||||
adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
|
adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
|
||||||
bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
|
bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
|
||||||
|
@ -3029,6 +3058,14 @@ fence_driver_init:
|
||||||
goto failed;
|
goto failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
DRM_DEBUG("SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
|
||||||
|
adev->gfx.config.max_shader_engines,
|
||||||
|
adev->gfx.config.max_sh_per_se,
|
||||||
|
adev->gfx.config.max_cu_per_sh,
|
||||||
|
adev->gfx.cu_info.number);
|
||||||
|
|
||||||
|
amdgpu_ctx_init_sched(adev);
|
||||||
|
|
||||||
adev->accel_working = true;
|
adev->accel_working = true;
|
||||||
|
|
||||||
amdgpu_vm_check_compute_bug(adev);
|
amdgpu_vm_check_compute_bug(adev);
|
||||||
|
@ -3660,8 +3697,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
amdgpu_amdkfd_pre_reset(adev);
|
|
||||||
|
|
||||||
/* Resume IP prior to SMC */
|
/* Resume IP prior to SMC */
|
||||||
r = amdgpu_device_ip_reinit_early_sriov(adev);
|
r = amdgpu_device_ip_reinit_early_sriov(adev);
|
||||||
if (r)
|
if (r)
|
||||||
|
@ -3790,18 +3825,13 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amdgpu_do_asic_reset(struct amdgpu_device *adev,
|
static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
|
||||||
struct amdgpu_hive_info *hive,
|
|
||||||
struct list_head *device_list_handle,
|
struct list_head *device_list_handle,
|
||||||
bool *need_full_reset_arg)
|
bool *need_full_reset_arg)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *tmp_adev = NULL;
|
struct amdgpu_device *tmp_adev = NULL;
|
||||||
bool need_full_reset = *need_full_reset_arg, vram_lost = false;
|
bool need_full_reset = *need_full_reset_arg, vram_lost = false;
|
||||||
int r = 0;
|
int r = 0;
|
||||||
int cpu = smp_processor_id();
|
|
||||||
bool use_baco =
|
|
||||||
(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) ?
|
|
||||||
true : false;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ASIC reset has to be done on all HGMI hive nodes ASAP
|
* ASIC reset has to be done on all HGMI hive nodes ASAP
|
||||||
|
@ -3809,24 +3839,21 @@ static int amdgpu_do_asic_reset(struct amdgpu_device *adev,
|
||||||
*/
|
*/
|
||||||
if (need_full_reset) {
|
if (need_full_reset) {
|
||||||
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
|
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
|
||||||
/*
|
/* For XGMI run all resets in parallel to speed up the process */
|
||||||
* For XGMI run all resets in parallel to speed up the
|
|
||||||
* process by scheduling the highpri wq on different
|
|
||||||
* cpus. For XGMI with baco reset, all nodes must enter
|
|
||||||
* baco within close proximity before anyone exit.
|
|
||||||
*/
|
|
||||||
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
|
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
|
||||||
if (!queue_work_on(cpu, system_highpri_wq,
|
if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
|
||||||
&tmp_adev->xgmi_reset_work))
|
|
||||||
r = -EALREADY;
|
r = -EALREADY;
|
||||||
cpu = cpumask_next(cpu, cpu_online_mask);
|
|
||||||
} else
|
} else
|
||||||
r = amdgpu_asic_reset(tmp_adev);
|
r = amdgpu_asic_reset(tmp_adev);
|
||||||
if (r)
|
|
||||||
|
if (r) {
|
||||||
|
DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s",
|
||||||
|
r, tmp_adev->ddev->unique);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* For XGMI wait for all work to complete before proceed */
|
/* For XGMI wait for all resets to complete before proceed */
|
||||||
if (!r) {
|
if (!r) {
|
||||||
list_for_each_entry(tmp_adev, device_list_handle,
|
list_for_each_entry(tmp_adev, device_list_handle,
|
||||||
gmc.xgmi.head) {
|
gmc.xgmi.head) {
|
||||||
|
@ -3835,53 +3862,9 @@ static int amdgpu_do_asic_reset(struct amdgpu_device *adev,
|
||||||
r = tmp_adev->asic_reset_res;
|
r = tmp_adev->asic_reset_res;
|
||||||
if (r)
|
if (r)
|
||||||
break;
|
break;
|
||||||
if (use_baco)
|
|
||||||
tmp_adev->in_baco = true;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* For XGMI with baco reset, need exit baco phase by scheduling
|
|
||||||
* xgmi_reset_work one more time. PSP reset and sGPU skips this
|
|
||||||
* phase. Not assume the situation that PSP reset and baco reset
|
|
||||||
* coexist within an XGMI hive.
|
|
||||||
*/
|
|
||||||
|
|
||||||
if (!r && use_baco) {
|
|
||||||
cpu = smp_processor_id();
|
|
||||||
list_for_each_entry(tmp_adev, device_list_handle,
|
|
||||||
gmc.xgmi.head) {
|
|
||||||
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
|
|
||||||
if (!queue_work_on(cpu,
|
|
||||||
system_highpri_wq,
|
|
||||||
&tmp_adev->xgmi_reset_work))
|
|
||||||
r = -EALREADY;
|
|
||||||
if (r)
|
|
||||||
break;
|
|
||||||
cpu = cpumask_next(cpu, cpu_online_mask);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!r && use_baco) {
|
|
||||||
list_for_each_entry(tmp_adev, device_list_handle,
|
|
||||||
gmc.xgmi.head) {
|
|
||||||
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
|
|
||||||
flush_work(&tmp_adev->xgmi_reset_work);
|
|
||||||
r = tmp_adev->asic_reset_res;
|
|
||||||
if (r)
|
|
||||||
break;
|
|
||||||
tmp_adev->in_baco = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (r) {
|
|
||||||
DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s",
|
|
||||||
r, tmp_adev->ddev->unique);
|
|
||||||
goto end;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!r && amdgpu_ras_intr_triggered())
|
if (!r && amdgpu_ras_intr_triggered())
|
||||||
|
@ -3974,7 +3957,7 @@ static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock)
|
||||||
mutex_lock(&adev->lock_reset);
|
mutex_lock(&adev->lock_reset);
|
||||||
|
|
||||||
atomic_inc(&adev->gpu_reset_counter);
|
atomic_inc(&adev->gpu_reset_counter);
|
||||||
adev->in_gpu_reset = 1;
|
adev->in_gpu_reset = true;
|
||||||
switch (amdgpu_asic_reset_method(adev)) {
|
switch (amdgpu_asic_reset_method(adev)) {
|
||||||
case AMD_RESET_METHOD_MODE1:
|
case AMD_RESET_METHOD_MODE1:
|
||||||
adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
|
adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
|
||||||
|
@ -3994,7 +3977,7 @@ static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
amdgpu_vf_error_trans_all(adev);
|
amdgpu_vf_error_trans_all(adev);
|
||||||
adev->mp1_state = PP_MP1_STATE_NONE;
|
adev->mp1_state = PP_MP1_STATE_NONE;
|
||||||
adev->in_gpu_reset = 0;
|
adev->in_gpu_reset = false;
|
||||||
mutex_unlock(&adev->lock_reset);
|
mutex_unlock(&adev->lock_reset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4175,8 +4158,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
|
||||||
if (r)
|
if (r)
|
||||||
adev->asic_reset_res = r;
|
adev->asic_reset_res = r;
|
||||||
} else {
|
} else {
|
||||||
r = amdgpu_do_asic_reset(adev, hive, device_list_handle,
|
r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset);
|
||||||
&need_full_reset);
|
|
||||||
if (r && r == -EAGAIN)
|
if (r && r == -EAGAIN)
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
|
|
|
@ -951,16 +951,31 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
|
||||||
case AMD_IP_BLOCK_TYPE_VCN:
|
case AMD_IP_BLOCK_TYPE_VCN:
|
||||||
case AMD_IP_BLOCK_TYPE_VCE:
|
case AMD_IP_BLOCK_TYPE_VCE:
|
||||||
case AMD_IP_BLOCK_TYPE_SDMA:
|
case AMD_IP_BLOCK_TYPE_SDMA:
|
||||||
if (swsmu)
|
if (swsmu) {
|
||||||
ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
|
ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
|
||||||
else
|
} else {
|
||||||
|
if (adev->powerplay.pp_funcs &&
|
||||||
|
adev->powerplay.pp_funcs->set_powergating_by_smu) {
|
||||||
|
mutex_lock(&adev->pm.mutex);
|
||||||
ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
|
ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
|
||||||
(adev)->powerplay.pp_handle, block_type, gate));
|
(adev)->powerplay.pp_handle, block_type, gate));
|
||||||
|
mutex_unlock(&adev->pm.mutex);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case AMD_IP_BLOCK_TYPE_JPEG:
|
||||||
|
if (swsmu)
|
||||||
|
ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
|
||||||
break;
|
break;
|
||||||
case AMD_IP_BLOCK_TYPE_GMC:
|
case AMD_IP_BLOCK_TYPE_GMC:
|
||||||
case AMD_IP_BLOCK_TYPE_ACP:
|
case AMD_IP_BLOCK_TYPE_ACP:
|
||||||
|
if (adev->powerplay.pp_funcs &&
|
||||||
|
adev->powerplay.pp_funcs->set_powergating_by_smu) {
|
||||||
|
mutex_lock(&adev->pm.mutex);
|
||||||
ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
|
ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
|
||||||
(adev)->powerplay.pp_handle, block_type, gate));
|
(adev)->powerplay.pp_handle, block_type, gate));
|
||||||
|
mutex_unlock(&adev->pm.mutex);
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -142,7 +142,7 @@ int amdgpu_async_gfx_ring = 1;
|
||||||
int amdgpu_mcbp = 0;
|
int amdgpu_mcbp = 0;
|
||||||
int amdgpu_discovery = -1;
|
int amdgpu_discovery = -1;
|
||||||
int amdgpu_mes = 0;
|
int amdgpu_mes = 0;
|
||||||
int amdgpu_noretry = 1;
|
int amdgpu_noretry;
|
||||||
int amdgpu_force_asic_type = -1;
|
int amdgpu_force_asic_type = -1;
|
||||||
|
|
||||||
struct amdgpu_mgpu_info mgpu_info = {
|
struct amdgpu_mgpu_info mgpu_info = {
|
||||||
|
@ -588,7 +588,7 @@ MODULE_PARM_DESC(mes,
|
||||||
module_param_named(mes, amdgpu_mes, int, 0444);
|
module_param_named(mes, amdgpu_mes, int, 0444);
|
||||||
|
|
||||||
MODULE_PARM_DESC(noretry,
|
MODULE_PARM_DESC(noretry,
|
||||||
"Disable retry faults (0 = retry enabled, 1 = retry disabled (default))");
|
"Disable retry faults (0 = retry enabled (default), 1 = retry disabled)");
|
||||||
module_param_named(noretry, amdgpu_noretry, int, 0644);
|
module_param_named(noretry, amdgpu_noretry, int, 0644);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1203,13 +1203,23 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
|
||||||
struct pci_dev *pdev = to_pci_dev(dev);
|
struct pci_dev *pdev = to_pci_dev(dev);
|
||||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||||
struct amdgpu_device *adev = drm_dev->dev_private;
|
struct amdgpu_device *adev = drm_dev->dev_private;
|
||||||
int ret;
|
int ret, i;
|
||||||
|
|
||||||
if (!adev->runpm) {
|
if (!adev->runpm) {
|
||||||
pm_runtime_forbid(dev);
|
pm_runtime_forbid(dev);
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* wait for all rings to drain before suspending */
|
||||||
|
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
|
||||||
|
struct amdgpu_ring *ring = adev->rings[i];
|
||||||
|
if (ring && ring->sched.ready) {
|
||||||
|
ret = amdgpu_fence_wait_empty(ring);
|
||||||
|
if (ret)
|
||||||
|
return -EBUSY;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (amdgpu_device_supports_boco(drm_dev))
|
if (amdgpu_device_supports_boco(drm_dev))
|
||||||
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
||||||
drm_kms_helper_poll_disable(drm_dev);
|
drm_kms_helper_poll_disable(drm_dev);
|
||||||
|
@ -1381,7 +1391,8 @@ static struct drm_driver kms_driver = {
|
||||||
.driver_features =
|
.driver_features =
|
||||||
DRIVER_USE_AGP | DRIVER_ATOMIC |
|
DRIVER_USE_AGP | DRIVER_ATOMIC |
|
||||||
DRIVER_GEM |
|
DRIVER_GEM |
|
||||||
DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ,
|
DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ |
|
||||||
|
DRIVER_SYNCOBJ_TIMELINE,
|
||||||
.load = amdgpu_driver_load_kms,
|
.load = amdgpu_driver_load_kms,
|
||||||
.open = amdgpu_driver_open_kms,
|
.open = amdgpu_driver_open_kms,
|
||||||
.postclose = amdgpu_driver_postclose_kms,
|
.postclose = amdgpu_driver_postclose_kms,
|
||||||
|
|
|
@ -34,6 +34,7 @@
|
||||||
#include <linux/kref.h>
|
#include <linux/kref.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/firmware.h>
|
#include <linux/firmware.h>
|
||||||
|
#include <linux/pm_runtime.h>
|
||||||
|
|
||||||
#include <drm/drm_debugfs.h>
|
#include <drm/drm_debugfs.h>
|
||||||
|
|
||||||
|
@ -154,7 +155,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
|
||||||
seq);
|
seq);
|
||||||
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
|
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
|
||||||
seq, flags | AMDGPU_FENCE_FLAG_INT);
|
seq, flags | AMDGPU_FENCE_FLAG_INT);
|
||||||
|
pm_runtime_get_noresume(adev->ddev->dev);
|
||||||
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
|
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
|
||||||
if (unlikely(rcu_dereference_protected(*ptr, 1))) {
|
if (unlikely(rcu_dereference_protected(*ptr, 1))) {
|
||||||
struct dma_fence *old;
|
struct dma_fence *old;
|
||||||
|
@ -234,6 +235,7 @@ static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
|
||||||
bool amdgpu_fence_process(struct amdgpu_ring *ring)
|
bool amdgpu_fence_process(struct amdgpu_ring *ring)
|
||||||
{
|
{
|
||||||
struct amdgpu_fence_driver *drv = &ring->fence_drv;
|
struct amdgpu_fence_driver *drv = &ring->fence_drv;
|
||||||
|
struct amdgpu_device *adev = ring->adev;
|
||||||
uint32_t seq, last_seq;
|
uint32_t seq, last_seq;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
@ -274,6 +276,8 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
|
||||||
BUG();
|
BUG();
|
||||||
|
|
||||||
dma_fence_put(fence);
|
dma_fence_put(fence);
|
||||||
|
pm_runtime_mark_last_busy(adev->ddev->dev);
|
||||||
|
pm_runtime_put_autosuspend(adev->ddev->dev);
|
||||||
} while (last_seq != seq);
|
} while (last_seq != seq);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
|
|
@ -641,7 +641,7 @@ int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
|
||||||
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
|
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
|
||||||
if (adev->gfx.funcs->query_ras_error_count)
|
if (adev->gfx.funcs->query_ras_error_count)
|
||||||
adev->gfx.funcs->query_ras_error_count(adev, err_data);
|
adev->gfx.funcs->query_ras_error_count(adev, err_data);
|
||||||
amdgpu_ras_reset_gpu(adev, 0);
|
amdgpu_ras_reset_gpu(adev);
|
||||||
}
|
}
|
||||||
return AMDGPU_RAS_SUCCESS;
|
return AMDGPU_RAS_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
|
@ -269,8 +269,12 @@ struct amdgpu_gfx {
|
||||||
bool me_fw_write_wait;
|
bool me_fw_write_wait;
|
||||||
bool cp_fw_write_wait;
|
bool cp_fw_write_wait;
|
||||||
struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
|
struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
|
||||||
|
struct drm_gpu_scheduler *gfx_sched[AMDGPU_MAX_GFX_RINGS];
|
||||||
|
uint32_t num_gfx_sched;
|
||||||
unsigned num_gfx_rings;
|
unsigned num_gfx_rings;
|
||||||
struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
|
struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
|
||||||
|
struct drm_gpu_scheduler *compute_sched[AMDGPU_MAX_COMPUTE_RINGS];
|
||||||
|
uint32_t num_compute_sched;
|
||||||
unsigned num_compute_rings;
|
unsigned num_compute_rings;
|
||||||
struct amdgpu_irq_src eop_irq;
|
struct amdgpu_irq_src eop_irq;
|
||||||
struct amdgpu_irq_src priv_reg_irq;
|
struct amdgpu_irq_src priv_reg_irq;
|
||||||
|
|
|
@ -223,7 +223,7 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
|
||||||
u64 size_af, size_bf;
|
u64 size_af, size_bf;
|
||||||
|
|
||||||
if (amdgpu_sriov_vf(adev)) {
|
if (amdgpu_sriov_vf(adev)) {
|
||||||
mc->agp_start = 0xffffffff;
|
mc->agp_start = 0xffffffffffff;
|
||||||
mc->agp_end = 0x0;
|
mc->agp_end = 0x0;
|
||||||
mc->agp_size = 0;
|
mc->agp_size = 0;
|
||||||
|
|
||||||
|
@ -333,3 +333,43 @@ void amdgpu_gmc_ras_fini(struct amdgpu_device *adev)
|
||||||
amdgpu_mmhub_ras_fini(adev);
|
amdgpu_mmhub_ras_fini(adev);
|
||||||
amdgpu_xgmi_ras_fini(adev);
|
amdgpu_xgmi_ras_fini(adev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The latest engine allocation on gfx9/10 is:
|
||||||
|
* Engine 2, 3: firmware
|
||||||
|
* Engine 0, 1, 4~16: amdgpu ring,
|
||||||
|
* subject to change when ring number changes
|
||||||
|
* Engine 17: Gart flushes
|
||||||
|
*/
|
||||||
|
#define GFXHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
|
||||||
|
#define MMHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
|
||||||
|
|
||||||
|
int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
struct amdgpu_ring *ring;
|
||||||
|
unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
|
||||||
|
{GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP,
|
||||||
|
GFXHUB_FREE_VM_INV_ENGS_BITMAP};
|
||||||
|
unsigned i;
|
||||||
|
unsigned vmhub, inv_eng;
|
||||||
|
|
||||||
|
for (i = 0; i < adev->num_rings; ++i) {
|
||||||
|
ring = adev->rings[i];
|
||||||
|
vmhub = ring->funcs->vmhub;
|
||||||
|
|
||||||
|
inv_eng = ffs(vm_inv_engs[vmhub]);
|
||||||
|
if (!inv_eng) {
|
||||||
|
dev_err(adev->dev, "no VM inv eng for ring %s\n",
|
||||||
|
ring->name);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
ring->vm_inv_eng = inv_eng - 1;
|
||||||
|
vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
|
||||||
|
|
||||||
|
dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
|
||||||
|
ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
|
@ -267,5 +267,6 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
|
||||||
uint16_t pasid, uint64_t timestamp);
|
uint16_t pasid, uint64_t timestamp);
|
||||||
int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev);
|
int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev);
|
||||||
void amdgpu_gmc_ras_fini(struct amdgpu_device *adev);
|
void amdgpu_gmc_ras_fini(struct amdgpu_device *adev);
|
||||||
|
int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -43,6 +43,8 @@ struct amdgpu_jpeg {
|
||||||
uint8_t num_jpeg_inst;
|
uint8_t num_jpeg_inst;
|
||||||
struct amdgpu_jpeg_inst inst[AMDGPU_MAX_JPEG_INSTANCES];
|
struct amdgpu_jpeg_inst inst[AMDGPU_MAX_JPEG_INSTANCES];
|
||||||
struct amdgpu_jpeg_reg internal;
|
struct amdgpu_jpeg_reg internal;
|
||||||
|
struct drm_gpu_scheduler *jpeg_sched[AMDGPU_MAX_JPEG_INSTANCES];
|
||||||
|
uint32_t num_jpeg_sched;
|
||||||
unsigned harvest_config;
|
unsigned harvest_config;
|
||||||
struct delayed_work idle_work;
|
struct delayed_work idle_work;
|
||||||
enum amd_powergating_state cur_state;
|
enum amd_powergating_state cur_state;
|
||||||
|
|
|
@ -2762,17 +2762,12 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
|
||||||
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
|
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
if (is_support_sw_smu(adev)) {
|
|
||||||
ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_UVD, enable);
|
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
|
||||||
if (ret)
|
if (ret)
|
||||||
DRM_ERROR("[SW SMU]: dpm enable uvd failed, state = %s, ret = %d. \n",
|
DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
|
||||||
enable ? "true" : "false", ret);
|
enable ? "enable" : "disable", ret);
|
||||||
} else if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
|
|
||||||
/* enable/disable UVD */
|
|
||||||
mutex_lock(&adev->pm.mutex);
|
|
||||||
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
|
|
||||||
mutex_unlock(&adev->pm.mutex);
|
|
||||||
}
|
|
||||||
/* enable/disable Low Memory PState for UVD (4k videos) */
|
/* enable/disable Low Memory PState for UVD (4k videos) */
|
||||||
if (adev->asic_type == CHIP_STONEY &&
|
if (adev->asic_type == CHIP_STONEY &&
|
||||||
adev->uvd.decode_image_width >= WIDTH_4K) {
|
adev->uvd.decode_image_width >= WIDTH_4K) {
|
||||||
|
@ -2789,17 +2784,11 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
|
||||||
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
|
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
if (is_support_sw_smu(adev)) {
|
|
||||||
ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_VCE, enable);
|
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
|
||||||
if (ret)
|
if (ret)
|
||||||
DRM_ERROR("[SW SMU]: dpm enable vce failed, state = %s, ret = %d. \n",
|
DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
|
||||||
enable ? "true" : "false", ret);
|
enable ? "enable" : "disable", ret);
|
||||||
} else if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
|
|
||||||
/* enable/disable VCE */
|
|
||||||
mutex_lock(&adev->pm.mutex);
|
|
||||||
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
|
|
||||||
mutex_unlock(&adev->pm.mutex);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
|
void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
|
||||||
|
@ -2818,12 +2807,10 @@ void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (is_support_sw_smu(adev)) {
|
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
|
||||||
ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_JPEG, enable);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
DRM_ERROR("[SW SMU]: dpm enable jpeg failed, state = %s, ret = %d. \n",
|
DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
|
||||||
enable ? "true" : "false", ret);
|
enable ? "enable" : "disable", ret);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
|
int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
|
||||||
|
|
|
@ -107,7 +107,7 @@ static void amdgpu_perf_read(struct perf_event *event)
|
||||||
default:
|
default:
|
||||||
count = 0;
|
count = 0;
|
||||||
break;
|
break;
|
||||||
};
|
}
|
||||||
} while (local64_cmpxchg(&hwc->prev_count, prev, count) != prev);
|
} while (local64_cmpxchg(&hwc->prev_count, prev, count) != prev);
|
||||||
|
|
||||||
local64_add(count - prev, &event->count);
|
local64_add(count - prev, &event->count);
|
||||||
|
@ -130,7 +130,7 @@ static void amdgpu_perf_stop(struct perf_event *event, int flags)
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
};
|
}
|
||||||
|
|
||||||
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
|
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
|
||||||
hwc->state |= PERF_HES_STOPPED;
|
hwc->state |= PERF_HES_STOPPED;
|
||||||
|
@ -160,7 +160,7 @@ static int amdgpu_perf_add(struct perf_event *event, int flags)
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return 0;
|
return 0;
|
||||||
};
|
}
|
||||||
|
|
||||||
if (retval)
|
if (retval)
|
||||||
return retval;
|
return retval;
|
||||||
|
@ -188,7 +188,7 @@ static void amdgpu_perf_del(struct perf_event *event, int flags)
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
};
|
}
|
||||||
|
|
||||||
perf_event_update_userpage(event);
|
perf_event_update_userpage(event);
|
||||||
}
|
}
|
||||||
|
|
|
@ -191,9 +191,9 @@ psp_cmd_submit_buf(struct psp_context *psp,
|
||||||
if (ucode)
|
if (ucode)
|
||||||
DRM_WARN("failed to load ucode id (%d) ",
|
DRM_WARN("failed to load ucode id (%d) ",
|
||||||
ucode->ucode_id);
|
ucode->ucode_id);
|
||||||
DRM_DEBUG_DRIVER("psp command (0x%X) failed and response status is (0x%X)\n",
|
DRM_WARN("psp command (0x%X) failed and response status is (0x%X)\n",
|
||||||
psp->cmd_buf_mem->cmd_id,
|
psp->cmd_buf_mem->cmd_id,
|
||||||
psp->cmd_buf_mem->resp.status & GFX_CMD_STATUS_MASK);
|
psp->cmd_buf_mem->resp.status);
|
||||||
if (!timeout) {
|
if (!timeout) {
|
||||||
mutex_unlock(&psp->mutex);
|
mutex_unlock(&psp->mutex);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -365,11 +365,11 @@ static int psp_asd_load(struct psp_context *psp)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void psp_prep_asd_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
|
static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
|
||||||
uint32_t asd_session_id)
|
uint32_t session_id)
|
||||||
{
|
{
|
||||||
cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
|
cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
|
||||||
cmd->cmd.cmd_unload_ta.session_id = asd_session_id;
|
cmd->cmd.cmd_unload_ta.session_id = session_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int psp_asd_unload(struct psp_context *psp)
|
static int psp_asd_unload(struct psp_context *psp)
|
||||||
|
@ -387,7 +387,7 @@ static int psp_asd_unload(struct psp_context *psp)
|
||||||
if (!cmd)
|
if (!cmd)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
psp_prep_asd_unload_cmd_buf(cmd, psp->asd_context.session_id);
|
psp_prep_ta_unload_cmd_buf(cmd, psp->asd_context.session_id);
|
||||||
|
|
||||||
ret = psp_cmd_submit_buf(psp, NULL, cmd,
|
ret = psp_cmd_submit_buf(psp, NULL, cmd,
|
||||||
psp->fence_buf_mc_addr);
|
psp->fence_buf_mc_addr);
|
||||||
|
@ -427,18 +427,20 @@ int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void psp_prep_xgmi_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
|
static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
|
||||||
uint64_t xgmi_ta_mc, uint64_t xgmi_mc_shared,
|
uint64_t ta_bin_mc,
|
||||||
uint32_t xgmi_ta_size, uint32_t shared_size)
|
uint32_t ta_bin_size,
|
||||||
|
uint64_t ta_shared_mc,
|
||||||
|
uint32_t ta_shared_size)
|
||||||
{
|
{
|
||||||
cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
|
cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
|
||||||
cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(xgmi_ta_mc);
|
cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc);
|
||||||
cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(xgmi_ta_mc);
|
cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc);
|
||||||
cmd->cmd.cmd_load_ta.app_len = xgmi_ta_size;
|
cmd->cmd.cmd_load_ta.app_len = ta_bin_size;
|
||||||
|
|
||||||
cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(xgmi_mc_shared);
|
cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ta_shared_mc);
|
||||||
cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(xgmi_mc_shared);
|
cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ta_shared_mc);
|
||||||
cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
|
cmd->cmd.cmd_load_ta.cmd_buf_len = ta_shared_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int psp_xgmi_init_shared_buf(struct psp_context *psp)
|
static int psp_xgmi_init_shared_buf(struct psp_context *psp)
|
||||||
|
@ -458,6 +460,36 @@ static int psp_xgmi_init_shared_buf(struct psp_context *psp)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
|
||||||
|
uint32_t ta_cmd_id,
|
||||||
|
uint32_t session_id)
|
||||||
|
{
|
||||||
|
cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
|
||||||
|
cmd->cmd.cmd_invoke_cmd.session_id = session_id;
|
||||||
|
cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
int psp_ta_invoke(struct psp_context *psp,
|
||||||
|
uint32_t ta_cmd_id,
|
||||||
|
uint32_t session_id)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
struct psp_gfx_cmd_resp *cmd;
|
||||||
|
|
||||||
|
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
||||||
|
if (!cmd)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, session_id);
|
||||||
|
|
||||||
|
ret = psp_cmd_submit_buf(psp, NULL, cmd,
|
||||||
|
psp->fence_buf_mc_addr);
|
||||||
|
|
||||||
|
kfree(cmd);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static int psp_xgmi_load(struct psp_context *psp)
|
static int psp_xgmi_load(struct psp_context *psp)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -466,8 +498,6 @@ static int psp_xgmi_load(struct psp_context *psp)
|
||||||
/*
|
/*
|
||||||
* TODO: bypass the loading in sriov for now
|
* TODO: bypass the loading in sriov for now
|
||||||
*/
|
*/
|
||||||
if (amdgpu_sriov_vf(psp->adev))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
||||||
if (!cmd)
|
if (!cmd)
|
||||||
|
@ -476,9 +506,11 @@ static int psp_xgmi_load(struct psp_context *psp)
|
||||||
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
|
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
|
||||||
memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size);
|
memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size);
|
||||||
|
|
||||||
psp_prep_xgmi_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
|
psp_prep_ta_load_cmd_buf(cmd,
|
||||||
|
psp->fw_pri_mc_addr,
|
||||||
|
psp->ta_xgmi_ucode_size,
|
||||||
psp->xgmi_context.xgmi_shared_mc_addr,
|
psp->xgmi_context.xgmi_shared_mc_addr,
|
||||||
psp->ta_xgmi_ucode_size, PSP_XGMI_SHARED_MEM_SIZE);
|
PSP_XGMI_SHARED_MEM_SIZE);
|
||||||
|
|
||||||
ret = psp_cmd_submit_buf(psp, NULL, cmd,
|
ret = psp_cmd_submit_buf(psp, NULL, cmd,
|
||||||
psp->fence_buf_mc_addr);
|
psp->fence_buf_mc_addr);
|
||||||
|
@ -493,13 +525,6 @@ static int psp_xgmi_load(struct psp_context *psp)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void psp_prep_xgmi_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
|
|
||||||
uint32_t xgmi_session_id)
|
|
||||||
{
|
|
||||||
cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
|
|
||||||
cmd->cmd.cmd_unload_ta.session_id = xgmi_session_id;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int psp_xgmi_unload(struct psp_context *psp)
|
static int psp_xgmi_unload(struct psp_context *psp)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -508,14 +533,12 @@ static int psp_xgmi_unload(struct psp_context *psp)
|
||||||
/*
|
/*
|
||||||
* TODO: bypass the unloading in sriov for now
|
* TODO: bypass the unloading in sriov for now
|
||||||
*/
|
*/
|
||||||
if (amdgpu_sriov_vf(psp->adev))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
||||||
if (!cmd)
|
if (!cmd)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
psp_prep_xgmi_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id);
|
psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id);
|
||||||
|
|
||||||
ret = psp_cmd_submit_buf(psp, NULL, cmd,
|
ret = psp_cmd_submit_buf(psp, NULL, cmd,
|
||||||
psp->fence_buf_mc_addr);
|
psp->fence_buf_mc_addr);
|
||||||
|
@ -525,40 +548,9 @@ static int psp_xgmi_unload(struct psp_context *psp)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void psp_prep_xgmi_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
|
|
||||||
uint32_t ta_cmd_id,
|
|
||||||
uint32_t xgmi_session_id)
|
|
||||||
{
|
|
||||||
cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
|
|
||||||
cmd->cmd.cmd_invoke_cmd.session_id = xgmi_session_id;
|
|
||||||
cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
|
|
||||||
/* Note: cmd_invoke_cmd.buf is not used for now */
|
|
||||||
}
|
|
||||||
|
|
||||||
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
|
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
|
||||||
{
|
{
|
||||||
int ret;
|
return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.session_id);
|
||||||
struct psp_gfx_cmd_resp *cmd;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* TODO: bypass the loading in sriov for now
|
|
||||||
*/
|
|
||||||
if (amdgpu_sriov_vf(psp->adev))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
|
||||||
if (!cmd)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
psp_prep_xgmi_ta_invoke_cmd_buf(cmd, ta_cmd_id,
|
|
||||||
psp->xgmi_context.session_id);
|
|
||||||
|
|
||||||
ret = psp_cmd_submit_buf(psp, NULL, cmd,
|
|
||||||
psp->fence_buf_mc_addr);
|
|
||||||
|
|
||||||
kfree(cmd);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int psp_xgmi_terminate(struct psp_context *psp)
|
static int psp_xgmi_terminate(struct psp_context *psp)
|
||||||
|
@ -614,20 +606,6 @@ static int psp_xgmi_initialize(struct psp_context *psp)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ras begin
|
// ras begin
|
||||||
static void psp_prep_ras_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
|
|
||||||
uint64_t ras_ta_mc, uint64_t ras_mc_shared,
|
|
||||||
uint32_t ras_ta_size, uint32_t shared_size)
|
|
||||||
{
|
|
||||||
cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
|
|
||||||
cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ras_ta_mc);
|
|
||||||
cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ras_ta_mc);
|
|
||||||
cmd->cmd.cmd_load_ta.app_len = ras_ta_size;
|
|
||||||
|
|
||||||
cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ras_mc_shared);
|
|
||||||
cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ras_mc_shared);
|
|
||||||
cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int psp_ras_init_shared_buf(struct psp_context *psp)
|
static int psp_ras_init_shared_buf(struct psp_context *psp)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -663,15 +641,17 @@ static int psp_ras_load(struct psp_context *psp)
|
||||||
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
|
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
|
||||||
memcpy(psp->fw_pri_buf, psp->ta_ras_start_addr, psp->ta_ras_ucode_size);
|
memcpy(psp->fw_pri_buf, psp->ta_ras_start_addr, psp->ta_ras_ucode_size);
|
||||||
|
|
||||||
psp_prep_ras_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
|
psp_prep_ta_load_cmd_buf(cmd,
|
||||||
|
psp->fw_pri_mc_addr,
|
||||||
|
psp->ta_ras_ucode_size,
|
||||||
psp->ras.ras_shared_mc_addr,
|
psp->ras.ras_shared_mc_addr,
|
||||||
psp->ta_ras_ucode_size, PSP_RAS_SHARED_MEM_SIZE);
|
PSP_RAS_SHARED_MEM_SIZE);
|
||||||
|
|
||||||
ret = psp_cmd_submit_buf(psp, NULL, cmd,
|
ret = psp_cmd_submit_buf(psp, NULL, cmd,
|
||||||
psp->fence_buf_mc_addr);
|
psp->fence_buf_mc_addr);
|
||||||
|
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
psp->ras.ras_initialized = 1;
|
psp->ras.ras_initialized = true;
|
||||||
psp->ras.session_id = cmd->resp.session_id;
|
psp->ras.session_id = cmd->resp.session_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -680,13 +660,6 @@ static int psp_ras_load(struct psp_context *psp)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void psp_prep_ras_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
|
|
||||||
uint32_t ras_session_id)
|
|
||||||
{
|
|
||||||
cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
|
|
||||||
cmd->cmd.cmd_unload_ta.session_id = ras_session_id;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int psp_ras_unload(struct psp_context *psp)
|
static int psp_ras_unload(struct psp_context *psp)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -702,7 +675,7 @@ static int psp_ras_unload(struct psp_context *psp)
|
||||||
if (!cmd)
|
if (!cmd)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
psp_prep_ras_ta_unload_cmd_buf(cmd, psp->ras.session_id);
|
psp_prep_ta_unload_cmd_buf(cmd, psp->ras.session_id);
|
||||||
|
|
||||||
ret = psp_cmd_submit_buf(psp, NULL, cmd,
|
ret = psp_cmd_submit_buf(psp, NULL, cmd,
|
||||||
psp->fence_buf_mc_addr);
|
psp->fence_buf_mc_addr);
|
||||||
|
@ -712,40 +685,15 @@ static int psp_ras_unload(struct psp_context *psp)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void psp_prep_ras_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
|
|
||||||
uint32_t ta_cmd_id,
|
|
||||||
uint32_t ras_session_id)
|
|
||||||
{
|
|
||||||
cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
|
|
||||||
cmd->cmd.cmd_invoke_cmd.session_id = ras_session_id;
|
|
||||||
cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
|
|
||||||
/* Note: cmd_invoke_cmd.buf is not used for now */
|
|
||||||
}
|
|
||||||
|
|
||||||
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
|
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
|
||||||
{
|
{
|
||||||
int ret;
|
|
||||||
struct psp_gfx_cmd_resp *cmd;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TODO: bypass the loading in sriov for now
|
* TODO: bypass the loading in sriov for now
|
||||||
*/
|
*/
|
||||||
if (amdgpu_sriov_vf(psp->adev))
|
if (amdgpu_sriov_vf(psp->adev))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
return psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id);
|
||||||
if (!cmd)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
psp_prep_ras_ta_invoke_cmd_buf(cmd, ta_cmd_id,
|
|
||||||
psp->ras.session_id);
|
|
||||||
|
|
||||||
ret = psp_cmd_submit_buf(psp, NULL, cmd,
|
|
||||||
psp->fence_buf_mc_addr);
|
|
||||||
|
|
||||||
kfree(cmd);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int psp_ras_enable_features(struct psp_context *psp,
|
int psp_ras_enable_features(struct psp_context *psp,
|
||||||
|
@ -791,7 +739,7 @@ static int psp_ras_terminate(struct psp_context *psp)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
psp->ras.ras_initialized = 0;
|
psp->ras.ras_initialized = false;
|
||||||
|
|
||||||
/* free ras shared memory */
|
/* free ras shared memory */
|
||||||
amdgpu_bo_free_kernel(&psp->ras.ras_shared_bo,
|
amdgpu_bo_free_kernel(&psp->ras.ras_shared_bo,
|
||||||
|
@ -832,24 +780,6 @@ static int psp_ras_initialize(struct psp_context *psp)
|
||||||
// ras end
|
// ras end
|
||||||
|
|
||||||
// HDCP start
|
// HDCP start
|
||||||
static void psp_prep_hdcp_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
|
|
||||||
uint64_t hdcp_ta_mc,
|
|
||||||
uint64_t hdcp_mc_shared,
|
|
||||||
uint32_t hdcp_ta_size,
|
|
||||||
uint32_t shared_size)
|
|
||||||
{
|
|
||||||
cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
|
|
||||||
cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(hdcp_ta_mc);
|
|
||||||
cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(hdcp_ta_mc);
|
|
||||||
cmd->cmd.cmd_load_ta.app_len = hdcp_ta_size;
|
|
||||||
|
|
||||||
cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
|
|
||||||
lower_32_bits(hdcp_mc_shared);
|
|
||||||
cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
|
|
||||||
upper_32_bits(hdcp_mc_shared);
|
|
||||||
cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int psp_hdcp_init_shared_buf(struct psp_context *psp)
|
static int psp_hdcp_init_shared_buf(struct psp_context *psp)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -886,15 +816,16 @@ static int psp_hdcp_load(struct psp_context *psp)
|
||||||
memcpy(psp->fw_pri_buf, psp->ta_hdcp_start_addr,
|
memcpy(psp->fw_pri_buf, psp->ta_hdcp_start_addr,
|
||||||
psp->ta_hdcp_ucode_size);
|
psp->ta_hdcp_ucode_size);
|
||||||
|
|
||||||
psp_prep_hdcp_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
|
psp_prep_ta_load_cmd_buf(cmd,
|
||||||
psp->hdcp_context.hdcp_shared_mc_addr,
|
psp->fw_pri_mc_addr,
|
||||||
psp->ta_hdcp_ucode_size,
|
psp->ta_hdcp_ucode_size,
|
||||||
|
psp->hdcp_context.hdcp_shared_mc_addr,
|
||||||
PSP_HDCP_SHARED_MEM_SIZE);
|
PSP_HDCP_SHARED_MEM_SIZE);
|
||||||
|
|
||||||
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
|
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
|
||||||
|
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
psp->hdcp_context.hdcp_initialized = 1;
|
psp->hdcp_context.hdcp_initialized = true;
|
||||||
psp->hdcp_context.session_id = cmd->resp.session_id;
|
psp->hdcp_context.session_id = cmd->resp.session_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -930,12 +861,6 @@ static int psp_hdcp_initialize(struct psp_context *psp)
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
static void psp_prep_hdcp_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
|
|
||||||
uint32_t hdcp_session_id)
|
|
||||||
{
|
|
||||||
cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
|
|
||||||
cmd->cmd.cmd_unload_ta.session_id = hdcp_session_id;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int psp_hdcp_unload(struct psp_context *psp)
|
static int psp_hdcp_unload(struct psp_context *psp)
|
||||||
{
|
{
|
||||||
|
@ -952,7 +877,7 @@ static int psp_hdcp_unload(struct psp_context *psp)
|
||||||
if (!cmd)
|
if (!cmd)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
psp_prep_hdcp_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id);
|
psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id);
|
||||||
|
|
||||||
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
|
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
|
||||||
|
|
||||||
|
@ -961,39 +886,15 @@ static int psp_hdcp_unload(struct psp_context *psp)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void psp_prep_hdcp_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
|
|
||||||
uint32_t ta_cmd_id,
|
|
||||||
uint32_t hdcp_session_id)
|
|
||||||
{
|
|
||||||
cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
|
|
||||||
cmd->cmd.cmd_invoke_cmd.session_id = hdcp_session_id;
|
|
||||||
cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
|
|
||||||
/* Note: cmd_invoke_cmd.buf is not used for now */
|
|
||||||
}
|
|
||||||
|
|
||||||
int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
|
int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
|
||||||
{
|
{
|
||||||
int ret;
|
|
||||||
struct psp_gfx_cmd_resp *cmd;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TODO: bypass the loading in sriov for now
|
* TODO: bypass the loading in sriov for now
|
||||||
*/
|
*/
|
||||||
if (amdgpu_sriov_vf(psp->adev))
|
if (amdgpu_sriov_vf(psp->adev))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
return psp_ta_invoke(psp, ta_cmd_id, psp->hdcp_context.session_id);
|
||||||
if (!cmd)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
psp_prep_hdcp_ta_invoke_cmd_buf(cmd, ta_cmd_id,
|
|
||||||
psp->hdcp_context.session_id);
|
|
||||||
|
|
||||||
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
|
|
||||||
|
|
||||||
kfree(cmd);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int psp_hdcp_terminate(struct psp_context *psp)
|
static int psp_hdcp_terminate(struct psp_context *psp)
|
||||||
|
@ -1013,7 +914,7 @@ static int psp_hdcp_terminate(struct psp_context *psp)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
psp->hdcp_context.hdcp_initialized = 0;
|
psp->hdcp_context.hdcp_initialized = false;
|
||||||
|
|
||||||
/* free hdcp shared memory */
|
/* free hdcp shared memory */
|
||||||
amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo,
|
amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo,
|
||||||
|
@ -1025,22 +926,6 @@ static int psp_hdcp_terminate(struct psp_context *psp)
|
||||||
// HDCP end
|
// HDCP end
|
||||||
|
|
||||||
// DTM start
|
// DTM start
|
||||||
static void psp_prep_dtm_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
|
|
||||||
uint64_t dtm_ta_mc,
|
|
||||||
uint64_t dtm_mc_shared,
|
|
||||||
uint32_t dtm_ta_size,
|
|
||||||
uint32_t shared_size)
|
|
||||||
{
|
|
||||||
cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
|
|
||||||
cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(dtm_ta_mc);
|
|
||||||
cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(dtm_ta_mc);
|
|
||||||
cmd->cmd.cmd_load_ta.app_len = dtm_ta_size;
|
|
||||||
|
|
||||||
cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(dtm_mc_shared);
|
|
||||||
cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(dtm_mc_shared);
|
|
||||||
cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int psp_dtm_init_shared_buf(struct psp_context *psp)
|
static int psp_dtm_init_shared_buf(struct psp_context *psp)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -1076,15 +961,16 @@ static int psp_dtm_load(struct psp_context *psp)
|
||||||
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
|
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
|
||||||
memcpy(psp->fw_pri_buf, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size);
|
memcpy(psp->fw_pri_buf, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size);
|
||||||
|
|
||||||
psp_prep_dtm_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
|
psp_prep_ta_load_cmd_buf(cmd,
|
||||||
psp->dtm_context.dtm_shared_mc_addr,
|
psp->fw_pri_mc_addr,
|
||||||
psp->ta_dtm_ucode_size,
|
psp->ta_dtm_ucode_size,
|
||||||
|
psp->dtm_context.dtm_shared_mc_addr,
|
||||||
PSP_DTM_SHARED_MEM_SIZE);
|
PSP_DTM_SHARED_MEM_SIZE);
|
||||||
|
|
||||||
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
|
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
|
||||||
|
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
psp->dtm_context.dtm_initialized = 1;
|
psp->dtm_context.dtm_initialized = true;
|
||||||
psp->dtm_context.session_id = cmd->resp.session_id;
|
psp->dtm_context.session_id = cmd->resp.session_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1122,39 +1008,15 @@ static int psp_dtm_initialize(struct psp_context *psp)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void psp_prep_dtm_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
|
|
||||||
uint32_t ta_cmd_id,
|
|
||||||
uint32_t dtm_session_id)
|
|
||||||
{
|
|
||||||
cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
|
|
||||||
cmd->cmd.cmd_invoke_cmd.session_id = dtm_session_id;
|
|
||||||
cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
|
|
||||||
/* Note: cmd_invoke_cmd.buf is not used for now */
|
|
||||||
}
|
|
||||||
|
|
||||||
int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
|
int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
|
||||||
{
|
{
|
||||||
int ret;
|
|
||||||
struct psp_gfx_cmd_resp *cmd;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TODO: bypass the loading in sriov for now
|
* TODO: bypass the loading in sriov for now
|
||||||
*/
|
*/
|
||||||
if (amdgpu_sriov_vf(psp->adev))
|
if (amdgpu_sriov_vf(psp->adev))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
return psp_ta_invoke(psp, ta_cmd_id, psp->dtm_context.session_id);
|
||||||
if (!cmd)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
psp_prep_dtm_ta_invoke_cmd_buf(cmd, ta_cmd_id,
|
|
||||||
psp->dtm_context.session_id);
|
|
||||||
|
|
||||||
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
|
|
||||||
|
|
||||||
kfree(cmd);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int psp_dtm_terminate(struct psp_context *psp)
|
static int psp_dtm_terminate(struct psp_context *psp)
|
||||||
|
@ -1174,7 +1036,7 @@ static int psp_dtm_terminate(struct psp_context *psp)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
psp->dtm_context.dtm_initialized = 0;
|
psp->dtm_context.dtm_initialized = false;
|
||||||
|
|
||||||
/* free hdcp shared memory */
|
/* free hdcp shared memory */
|
||||||
amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo,
|
amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo,
|
||||||
|
@ -1310,6 +1172,9 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
|
||||||
case AMDGPU_UCODE_ID_VCN:
|
case AMDGPU_UCODE_ID_VCN:
|
||||||
*type = GFX_FW_TYPE_VCN;
|
*type = GFX_FW_TYPE_VCN;
|
||||||
break;
|
break;
|
||||||
|
case AMDGPU_UCODE_ID_VCN1:
|
||||||
|
*type = GFX_FW_TYPE_VCN1;
|
||||||
|
break;
|
||||||
case AMDGPU_UCODE_ID_DMCU_ERAM:
|
case AMDGPU_UCODE_ID_DMCU_ERAM:
|
||||||
*type = GFX_FW_TYPE_DMCU_ERAM;
|
*type = GFX_FW_TYPE_DMCU_ERAM;
|
||||||
break;
|
break;
|
||||||
|
@ -1454,7 +1319,8 @@ out:
|
||||||
|| ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G
|
|| ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G
|
||||||
|| ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
|
|| ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
|
||||||
|| ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
|
|| ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
|
||||||
|| ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM))
|
|| ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
|
||||||
|
|| ucode->ucode_id == AMDGPU_UCODE_ID_SMC))
|
||||||
/*skip ucode loading in SRIOV VF */
|
/*skip ucode loading in SRIOV VF */
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
@ -1472,7 +1338,7 @@ out:
|
||||||
|
|
||||||
/* Start rlc autoload after psp recieved all the gfx firmware */
|
/* Start rlc autoload after psp recieved all the gfx firmware */
|
||||||
if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
|
if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
|
||||||
AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM)) {
|
AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) {
|
||||||
ret = psp_rlc_autoload(psp);
|
ret = psp_rlc_autoload(psp);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRM_ERROR("Failed to start rlc autoload\n");
|
DRM_ERROR("Failed to start rlc autoload\n");
|
||||||
|
@ -1503,8 +1369,6 @@ static int psp_load_fw(struct amdgpu_device *adev)
|
||||||
if (!psp->cmd)
|
if (!psp->cmd)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
/* this fw pri bo is not used under SRIOV */
|
|
||||||
if (!amdgpu_sriov_vf(psp->adev)) {
|
|
||||||
ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
|
ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
|
||||||
AMDGPU_GEM_DOMAIN_GTT,
|
AMDGPU_GEM_DOMAIN_GTT,
|
||||||
&psp->fw_pri_bo,
|
&psp->fw_pri_bo,
|
||||||
|
@ -1512,7 +1376,6 @@ static int psp_load_fw(struct amdgpu_device *adev)
|
||||||
&psp->fw_pri_buf);
|
&psp->fw_pri_buf);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto failed;
|
goto failed;
|
||||||
}
|
|
||||||
|
|
||||||
ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
|
ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
|
||||||
AMDGPU_GEM_DOMAIN_VRAM,
|
AMDGPU_GEM_DOMAIN_VRAM,
|
||||||
|
|
|
@ -202,7 +202,6 @@ struct psp_memory_training_context {
|
||||||
|
|
||||||
/*vram offset of the p2c training data*/
|
/*vram offset of the p2c training data*/
|
||||||
u64 p2c_train_data_offset;
|
u64 p2c_train_data_offset;
|
||||||
struct amdgpu_bo *p2c_bo;
|
|
||||||
|
|
||||||
/*vram offset of the c2p training data*/
|
/*vram offset of the c2p training data*/
|
||||||
u64 c2p_train_data_offset;
|
u64 c2p_train_data_offset;
|
||||||
|
|
|
@ -315,7 +315,7 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
|
||||||
default:
|
default:
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
break;
|
break;
|
||||||
};
|
}
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -1311,6 +1311,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
|
||||||
data = con->eh_data;
|
data = con->eh_data;
|
||||||
if (!data || data->count == 0) {
|
if (!data || data->count == 0) {
|
||||||
*bps = NULL;
|
*bps = NULL;
|
||||||
|
ret = -EINVAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1870,7 +1871,7 @@ void amdgpu_ras_resume(struct amdgpu_device *adev)
|
||||||
* See feature_enable_on_boot
|
* See feature_enable_on_boot
|
||||||
*/
|
*/
|
||||||
amdgpu_ras_disable_all_features(adev, 1);
|
amdgpu_ras_disable_all_features(adev, 1);
|
||||||
amdgpu_ras_reset_gpu(adev, 0);
|
amdgpu_ras_reset_gpu(adev);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1933,6 +1934,6 @@ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
|
||||||
if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
|
if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
|
||||||
DRM_WARN("RAS event of type ERREVENT_ATHUB_INTERRUPT detected!\n");
|
DRM_WARN("RAS event of type ERREVENT_ATHUB_INTERRUPT detected!\n");
|
||||||
|
|
||||||
amdgpu_ras_reset_gpu(adev, false);
|
amdgpu_ras_reset_gpu(adev);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -494,8 +494,7 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
|
||||||
|
|
||||||
int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev);
|
int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev);
|
||||||
|
|
||||||
static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev,
|
static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
|
||||||
bool is_baco)
|
|
||||||
{
|
{
|
||||||
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
|
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
|
||||||
|
|
||||||
|
|
|
@ -160,7 +160,7 @@ int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
|
||||||
struct amdgpu_iv_entry *entry)
|
struct amdgpu_iv_entry *entry)
|
||||||
{
|
{
|
||||||
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
|
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
|
||||||
amdgpu_ras_reset_gpu(adev, 0);
|
amdgpu_ras_reset_gpu(adev);
|
||||||
|
|
||||||
return AMDGPU_RAS_SUCCESS;
|
return AMDGPU_RAS_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
|
@ -52,6 +52,8 @@ struct amdgpu_sdma_instance {
|
||||||
|
|
||||||
struct amdgpu_sdma {
|
struct amdgpu_sdma {
|
||||||
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
|
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
|
||||||
|
struct drm_gpu_scheduler *sdma_sched[AMDGPU_MAX_SDMA_INSTANCES];
|
||||||
|
uint32_t num_sdma_sched;
|
||||||
struct amdgpu_irq_src trap_irq;
|
struct amdgpu_irq_src trap_irq;
|
||||||
struct amdgpu_irq_src illegal_inst_irq;
|
struct amdgpu_irq_src illegal_inst_irq;
|
||||||
struct amdgpu_irq_src ecc_irq;
|
struct amdgpu_irq_src ecc_irq;
|
||||||
|
|
|
@ -1714,12 +1714,17 @@ static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
|
||||||
amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
|
amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
|
||||||
ctx->c2p_bo = NULL;
|
ctx->c2p_bo = NULL;
|
||||||
|
|
||||||
amdgpu_bo_free_kernel(&ctx->p2c_bo, NULL, NULL);
|
|
||||||
ctx->p2c_bo = NULL;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u64 amdgpu_ttm_training_get_c2p_offset(u64 vram_size)
|
||||||
|
{
|
||||||
|
if ((vram_size & (SZ_1M - 1)) < (SZ_4K + 1) )
|
||||||
|
vram_size -= SZ_1M;
|
||||||
|
|
||||||
|
return ALIGN(vram_size, SZ_1M);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_ttm_training_reserve_vram_init - create bo vram reservation from memory training
|
* amdgpu_ttm_training_reserve_vram_init - create bo vram reservation from memory training
|
||||||
*
|
*
|
||||||
|
@ -1738,7 +1743,7 @@ static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device *adev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx->c2p_train_data_offset = adev->fw_vram_usage.mem_train_fb_loc;
|
ctx->c2p_train_data_offset = amdgpu_ttm_training_get_c2p_offset(adev->gmc.mc_vram_size);
|
||||||
ctx->p2c_train_data_offset = (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
|
ctx->p2c_train_data_offset = (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
|
||||||
ctx->train_data_size = GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
|
ctx->train_data_size = GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
|
||||||
|
|
||||||
|
@ -1747,17 +1752,6 @@ static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device *adev)
|
||||||
ctx->p2c_train_data_offset,
|
ctx->p2c_train_data_offset,
|
||||||
ctx->c2p_train_data_offset);
|
ctx->c2p_train_data_offset);
|
||||||
|
|
||||||
ret = amdgpu_bo_create_kernel_at(adev,
|
|
||||||
ctx->p2c_train_data_offset,
|
|
||||||
ctx->train_data_size,
|
|
||||||
AMDGPU_GEM_DOMAIN_VRAM,
|
|
||||||
&ctx->p2c_bo,
|
|
||||||
NULL);
|
|
||||||
if (ret) {
|
|
||||||
DRM_ERROR("alloc p2c_bo failed(%d)!\n", ret);
|
|
||||||
goto Err_out;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = amdgpu_bo_create_kernel_at(adev,
|
ret = amdgpu_bo_create_kernel_at(adev,
|
||||||
ctx->c2p_train_data_offset,
|
ctx->c2p_train_data_offset,
|
||||||
ctx->train_data_size,
|
ctx->train_data_size,
|
||||||
|
@ -1766,15 +1760,12 @@ static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device *adev)
|
||||||
NULL);
|
NULL);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
|
DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
|
||||||
goto Err_out;
|
amdgpu_ttm_training_reserve_vram_fini(adev);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
|
ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
Err_out:
|
|
||||||
amdgpu_ttm_training_reserve_vram_fini(adev);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1987,11 +1978,13 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
|
||||||
|
|
||||||
if (enable) {
|
if (enable) {
|
||||||
struct amdgpu_ring *ring;
|
struct amdgpu_ring *ring;
|
||||||
struct drm_sched_rq *rq;
|
struct drm_gpu_scheduler *sched;
|
||||||
|
|
||||||
ring = adev->mman.buffer_funcs_ring;
|
ring = adev->mman.buffer_funcs_ring;
|
||||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
|
sched = &ring->sched;
|
||||||
r = drm_sched_entity_init(&adev->mman.entity, &rq, 1, NULL);
|
r = drm_sched_entity_init(&adev->mman.entity,
|
||||||
|
DRM_SCHED_PRIORITY_KERNEL, &sched,
|
||||||
|
1, NULL);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
|
DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
|
||||||
r);
|
r);
|
||||||
|
|
|
@ -300,10 +300,10 @@ enum AMDGPU_UCODE_ID {
|
||||||
AMDGPU_UCODE_ID_CP_MEC2_JT,
|
AMDGPU_UCODE_ID_CP_MEC2_JT,
|
||||||
AMDGPU_UCODE_ID_CP_MES,
|
AMDGPU_UCODE_ID_CP_MES,
|
||||||
AMDGPU_UCODE_ID_CP_MES_DATA,
|
AMDGPU_UCODE_ID_CP_MES_DATA,
|
||||||
AMDGPU_UCODE_ID_RLC_G,
|
|
||||||
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL,
|
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL,
|
||||||
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM,
|
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM,
|
||||||
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM,
|
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM,
|
||||||
|
AMDGPU_UCODE_ID_RLC_G,
|
||||||
AMDGPU_UCODE_ID_STORAGE,
|
AMDGPU_UCODE_ID_STORAGE,
|
||||||
AMDGPU_UCODE_ID_SMC,
|
AMDGPU_UCODE_ID_SMC,
|
||||||
AMDGPU_UCODE_ID_UVD,
|
AMDGPU_UCODE_ID_UVD,
|
||||||
|
|
|
@ -95,13 +95,6 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
|
||||||
{
|
{
|
||||||
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
|
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
|
||||||
|
|
||||||
/* When “Full RAS” is enabled, the per-IP interrupt sources should
|
|
||||||
* be disabled and the driver should only look for the aggregated
|
|
||||||
* interrupt via sync flood
|
|
||||||
*/
|
|
||||||
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
|
|
||||||
return AMDGPU_RAS_SUCCESS;
|
|
||||||
|
|
||||||
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
|
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
|
||||||
if (adev->umc.funcs &&
|
if (adev->umc.funcs &&
|
||||||
adev->umc.funcs->query_ras_error_count)
|
adev->umc.funcs->query_ras_error_count)
|
||||||
|
@ -113,6 +106,7 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
|
||||||
err_data->err_addr =
|
err_data->err_addr =
|
||||||
kcalloc(adev->umc.max_ras_err_cnt_per_query,
|
kcalloc(adev->umc.max_ras_err_cnt_per_query,
|
||||||
sizeof(struct eeprom_table_record), GFP_KERNEL);
|
sizeof(struct eeprom_table_record), GFP_KERNEL);
|
||||||
|
|
||||||
/* still call query_ras_error_address to clear error status
|
/* still call query_ras_error_address to clear error status
|
||||||
* even NOMEM error is encountered
|
* even NOMEM error is encountered
|
||||||
*/
|
*/
|
||||||
|
@ -132,7 +126,7 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
|
||||||
err_data->err_addr_cnt))
|
err_data->err_addr_cnt))
|
||||||
DRM_WARN("Failed to add ras bad page!\n");
|
DRM_WARN("Failed to add ras bad page!\n");
|
||||||
|
|
||||||
amdgpu_ras_reset_gpu(adev, 0);
|
amdgpu_ras_reset_gpu(adev);
|
||||||
}
|
}
|
||||||
|
|
||||||
kfree(err_data->err_addr);
|
kfree(err_data->err_addr);
|
||||||
|
|
|
@ -21,38 +21,6 @@
|
||||||
#ifndef __AMDGPU_UMC_H__
|
#ifndef __AMDGPU_UMC_H__
|
||||||
#define __AMDGPU_UMC_H__
|
#define __AMDGPU_UMC_H__
|
||||||
|
|
||||||
/* implement 64 bits REG operations via 32 bits interface */
|
|
||||||
#define RREG64_UMC(reg) (RREG32(reg) | \
|
|
||||||
((uint64_t)RREG32((reg) + 1) << 32))
|
|
||||||
#define WREG64_UMC(reg, v) \
|
|
||||||
do { \
|
|
||||||
WREG32((reg), lower_32_bits(v)); \
|
|
||||||
WREG32((reg) + 1, upper_32_bits(v)); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* void (*func)(struct amdgpu_device *adev, struct ras_err_data *err_data,
|
|
||||||
* uint32_t umc_reg_offset, uint32_t channel_index)
|
|
||||||
*/
|
|
||||||
#define amdgpu_umc_for_each_channel(func) \
|
|
||||||
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; \
|
|
||||||
uint32_t umc_inst, channel_inst, umc_reg_offset, channel_index; \
|
|
||||||
for (umc_inst = 0; umc_inst < adev->umc.umc_inst_num; umc_inst++) { \
|
|
||||||
/* enable the index mode to query eror count per channel */ \
|
|
||||||
adev->umc.funcs->enable_umc_index_mode(adev, umc_inst); \
|
|
||||||
for (channel_inst = 0; \
|
|
||||||
channel_inst < adev->umc.channel_inst_num; \
|
|
||||||
channel_inst++) { \
|
|
||||||
/* calc the register offset according to channel instance */ \
|
|
||||||
umc_reg_offset = adev->umc.channel_offs * channel_inst; \
|
|
||||||
/* get channel index of interleaved memory */ \
|
|
||||||
channel_index = adev->umc.channel_idx_tbl[ \
|
|
||||||
umc_inst * adev->umc.channel_inst_num + channel_inst]; \
|
|
||||||
(func)(adev, err_data, umc_reg_offset, channel_index); \
|
|
||||||
} \
|
|
||||||
} \
|
|
||||||
adev->umc.funcs->disable_umc_index_mode(adev);
|
|
||||||
|
|
||||||
struct amdgpu_umc_funcs {
|
struct amdgpu_umc_funcs {
|
||||||
void (*err_cnt_init)(struct amdgpu_device *adev);
|
void (*err_cnt_init)(struct amdgpu_device *adev);
|
||||||
int (*ras_late_init)(struct amdgpu_device *adev);
|
int (*ras_late_init)(struct amdgpu_device *adev);
|
||||||
|
@ -60,9 +28,6 @@ struct amdgpu_umc_funcs {
|
||||||
void *ras_error_status);
|
void *ras_error_status);
|
||||||
void (*query_ras_error_address)(struct amdgpu_device *adev,
|
void (*query_ras_error_address)(struct amdgpu_device *adev,
|
||||||
void *ras_error_status);
|
void *ras_error_status);
|
||||||
void (*enable_umc_index_mode)(struct amdgpu_device *adev,
|
|
||||||
uint32_t umc_instance);
|
|
||||||
void (*disable_umc_index_mode)(struct amdgpu_device *adev);
|
|
||||||
void (*init_registers)(struct amdgpu_device *adev);
|
void (*init_registers)(struct amdgpu_device *adev);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -330,12 +330,13 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
|
||||||
int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
|
int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct amdgpu_ring *ring;
|
struct amdgpu_ring *ring;
|
||||||
struct drm_sched_rq *rq;
|
struct drm_gpu_scheduler *sched;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
ring = &adev->uvd.inst[0].ring;
|
ring = &adev->uvd.inst[0].ring;
|
||||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
|
sched = &ring->sched;
|
||||||
r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL);
|
r = drm_sched_entity_init(&adev->uvd.entity, DRM_SCHED_PRIORITY_NORMAL,
|
||||||
|
&sched, 1, NULL);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("Failed setting up UVD kernel entity.\n");
|
DRM_ERROR("Failed setting up UVD kernel entity.\n");
|
||||||
return r;
|
return r;
|
||||||
|
|
|
@ -240,12 +240,13 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
|
||||||
int amdgpu_vce_entity_init(struct amdgpu_device *adev)
|
int amdgpu_vce_entity_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct amdgpu_ring *ring;
|
struct amdgpu_ring *ring;
|
||||||
struct drm_sched_rq *rq;
|
struct drm_gpu_scheduler *sched;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
ring = &adev->vce.ring[0];
|
ring = &adev->vce.ring[0];
|
||||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
|
sched = &ring->sched;
|
||||||
r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
|
r = drm_sched_entity_init(&adev->vce.entity, DRM_SCHED_PRIORITY_NORMAL,
|
||||||
|
&sched, 1, NULL);
|
||||||
if (r != 0) {
|
if (r != 0) {
|
||||||
DRM_ERROR("Failed setting up VCE run queue.\n");
|
DRM_ERROR("Failed setting up VCE run queue.\n");
|
||||||
return r;
|
return r;
|
||||||
|
|
|
@ -28,19 +28,10 @@
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
|
|
||||||
#include <drm/drm.h>
|
|
||||||
|
|
||||||
#include "amdgpu.h"
|
#include "amdgpu.h"
|
||||||
#include "amdgpu_pm.h"
|
#include "amdgpu_pm.h"
|
||||||
#include "amdgpu_vcn.h"
|
#include "amdgpu_vcn.h"
|
||||||
#include "soc15d.h"
|
#include "soc15d.h"
|
||||||
#include "soc15_common.h"
|
|
||||||
|
|
||||||
#include "vcn/vcn_1_0_offset.h"
|
|
||||||
#include "vcn/vcn_1_0_sh_mask.h"
|
|
||||||
|
|
||||||
/* 1 second timeout */
|
|
||||||
#define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
|
|
||||||
|
|
||||||
/* Firmware Names */
|
/* Firmware Names */
|
||||||
#define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
|
#define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
|
||||||
|
@ -294,6 +285,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
|
||||||
for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
|
for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
|
||||||
if (adev->vcn.harvest_config & (1 << j))
|
if (adev->vcn.harvest_config & (1 << j))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
|
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
|
||||||
fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
|
fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
|
||||||
}
|
}
|
||||||
|
@ -306,24 +298,15 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
|
||||||
else
|
else
|
||||||
new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
|
new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
|
||||||
|
|
||||||
if (amdgpu_fence_count_emitted(&adev->jpeg.inst[j].ring_dec))
|
|
||||||
new_state.jpeg = VCN_DPG_STATE__PAUSE;
|
|
||||||
else
|
|
||||||
new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
|
|
||||||
|
|
||||||
adev->vcn.pause_dpg_mode(adev, &new_state);
|
adev->vcn.pause_dpg_mode(adev, &new_state);
|
||||||
}
|
}
|
||||||
|
|
||||||
fence[j] += amdgpu_fence_count_emitted(&adev->jpeg.inst[j].ring_dec);
|
|
||||||
fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
|
fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
|
||||||
fences += fence[j];
|
fences += fence[j];
|
||||||
}
|
}
|
||||||
|
|
||||||
if (fences == 0) {
|
if (fences == 0) {
|
||||||
amdgpu_gfx_off_ctrl(adev, true);
|
amdgpu_gfx_off_ctrl(adev, true);
|
||||||
if (adev->asic_type < CHIP_ARCTURUS && adev->pm.dpm_enabled)
|
|
||||||
amdgpu_dpm_enable_uvd(adev, false);
|
|
||||||
else
|
|
||||||
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
|
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
|
||||||
AMD_PG_STATE_GATE);
|
AMD_PG_STATE_GATE);
|
||||||
} else {
|
} else {
|
||||||
|
@ -338,9 +321,6 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
|
||||||
|
|
||||||
if (set_clocks) {
|
if (set_clocks) {
|
||||||
amdgpu_gfx_off_ctrl(adev, false);
|
amdgpu_gfx_off_ctrl(adev, false);
|
||||||
if (adev->asic_type < CHIP_ARCTURUS && adev->pm.dpm_enabled)
|
|
||||||
amdgpu_dpm_enable_uvd(adev, true);
|
|
||||||
else
|
|
||||||
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
|
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
|
||||||
AMD_PG_STATE_UNGATE);
|
AMD_PG_STATE_UNGATE);
|
||||||
}
|
}
|
||||||
|
@ -358,15 +338,8 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
|
||||||
else
|
else
|
||||||
new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
|
new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
|
||||||
|
|
||||||
if (amdgpu_fence_count_emitted(&adev->jpeg.inst[ring->me].ring_dec))
|
|
||||||
new_state.jpeg = VCN_DPG_STATE__PAUSE;
|
|
||||||
else
|
|
||||||
new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
|
|
||||||
|
|
||||||
if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
|
if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
|
||||||
new_state.fw_based = VCN_DPG_STATE__PAUSE;
|
new_state.fw_based = VCN_DPG_STATE__PAUSE;
|
||||||
else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
|
|
||||||
new_state.jpeg = VCN_DPG_STATE__PAUSE;
|
|
||||||
|
|
||||||
adev->vcn.pause_dpg_mode(adev, &new_state);
|
adev->vcn.pause_dpg_mode(adev, &new_state);
|
||||||
}
|
}
|
||||||
|
@ -518,9 +491,14 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
|
||||||
|
|
||||||
int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = ring->adev;
|
||||||
struct dma_fence *fence;
|
struct dma_fence *fence;
|
||||||
long r;
|
long r;
|
||||||
|
|
||||||
|
/* temporarily disable ib test for sriov */
|
||||||
|
if (amdgpu_sriov_vf(adev))
|
||||||
|
return 0;
|
||||||
|
|
||||||
r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
|
r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
|
||||||
if (r)
|
if (r)
|
||||||
goto error;
|
goto error;
|
||||||
|
@ -676,10 +654,15 @@ err:
|
||||||
|
|
||||||
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = ring->adev;
|
||||||
struct dma_fence *fence = NULL;
|
struct dma_fence *fence = NULL;
|
||||||
struct amdgpu_bo *bo = NULL;
|
struct amdgpu_bo *bo = NULL;
|
||||||
long r;
|
long r;
|
||||||
|
|
||||||
|
/* temporarily disable ib test for sriov */
|
||||||
|
if (amdgpu_sriov_vf(adev))
|
||||||
|
return 0;
|
||||||
|
|
||||||
r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
|
r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
|
||||||
AMDGPU_GEM_DOMAIN_VRAM,
|
AMDGPU_GEM_DOMAIN_VRAM,
|
||||||
&bo, NULL, NULL);
|
&bo, NULL, NULL);
|
||||||
|
|
|
@ -31,6 +31,7 @@
|
||||||
#define AMDGPU_VCN_MAX_ENC_RINGS 3
|
#define AMDGPU_VCN_MAX_ENC_RINGS 3
|
||||||
|
|
||||||
#define AMDGPU_MAX_VCN_INSTANCES 2
|
#define AMDGPU_MAX_VCN_INSTANCES 2
|
||||||
|
#define AMDGPU_MAX_VCN_ENC_RINGS AMDGPU_VCN_MAX_ENC_RINGS * AMDGPU_MAX_VCN_INSTANCES
|
||||||
|
|
||||||
#define AMDGPU_VCN_HARVEST_VCN0 (1 << 0)
|
#define AMDGPU_VCN_HARVEST_VCN0 (1 << 0)
|
||||||
#define AMDGPU_VCN_HARVEST_VCN1 (1 << 1)
|
#define AMDGPU_VCN_HARVEST_VCN1 (1 << 1)
|
||||||
|
@ -56,6 +57,9 @@
|
||||||
#define VCN_VID_IP_ADDRESS_2_0 0x0
|
#define VCN_VID_IP_ADDRESS_2_0 0x0
|
||||||
#define VCN_AON_IP_ADDRESS_2_0 0x30000
|
#define VCN_AON_IP_ADDRESS_2_0 0x30000
|
||||||
|
|
||||||
|
/* 1 second timeout */
|
||||||
|
#define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
|
||||||
|
|
||||||
#define RREG32_SOC15_DPG_MODE(ip, inst, reg, mask, sram_sel) \
|
#define RREG32_SOC15_DPG_MODE(ip, inst, reg, mask, sram_sel) \
|
||||||
({ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); \
|
({ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); \
|
||||||
WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \
|
WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \
|
||||||
|
@ -188,6 +192,10 @@ struct amdgpu_vcn {
|
||||||
uint8_t num_vcn_inst;
|
uint8_t num_vcn_inst;
|
||||||
struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES];
|
struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES];
|
||||||
struct amdgpu_vcn_reg internal;
|
struct amdgpu_vcn_reg internal;
|
||||||
|
struct drm_gpu_scheduler *vcn_enc_sched[AMDGPU_MAX_VCN_ENC_RINGS];
|
||||||
|
struct drm_gpu_scheduler *vcn_dec_sched[AMDGPU_MAX_VCN_INSTANCES];
|
||||||
|
uint32_t num_vcn_enc_sched;
|
||||||
|
uint32_t num_vcn_dec_sched;
|
||||||
|
|
||||||
unsigned harvest_config;
|
unsigned harvest_config;
|
||||||
int (*pause_dpg_mode)(struct amdgpu_device *adev,
|
int (*pause_dpg_mode)(struct amdgpu_device *adev,
|
||||||
|
|
|
@ -2753,14 +2753,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||||
spin_lock_init(&vm->invalidated_lock);
|
spin_lock_init(&vm->invalidated_lock);
|
||||||
INIT_LIST_HEAD(&vm->freed);
|
INIT_LIST_HEAD(&vm->freed);
|
||||||
|
|
||||||
|
|
||||||
/* create scheduler entities for page table updates */
|
/* create scheduler entities for page table updates */
|
||||||
r = drm_sched_entity_init(&vm->direct, adev->vm_manager.vm_pte_rqs,
|
r = drm_sched_entity_init(&vm->direct, DRM_SCHED_PRIORITY_NORMAL,
|
||||||
adev->vm_manager.vm_pte_num_rqs, NULL);
|
adev->vm_manager.vm_pte_scheds,
|
||||||
|
adev->vm_manager.vm_pte_num_scheds, NULL);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
r = drm_sched_entity_init(&vm->delayed, adev->vm_manager.vm_pte_rqs,
|
r = drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
|
||||||
adev->vm_manager.vm_pte_num_rqs, NULL);
|
adev->vm_manager.vm_pte_scheds,
|
||||||
|
adev->vm_manager.vm_pte_num_scheds, NULL);
|
||||||
if (r)
|
if (r)
|
||||||
goto error_free_direct;
|
goto error_free_direct;
|
||||||
|
|
||||||
|
|
|
@ -327,8 +327,8 @@ struct amdgpu_vm_manager {
|
||||||
u64 vram_base_offset;
|
u64 vram_base_offset;
|
||||||
/* vm pte handling */
|
/* vm pte handling */
|
||||||
const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
|
const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
|
||||||
struct drm_sched_rq *vm_pte_rqs[AMDGPU_MAX_RINGS];
|
struct drm_gpu_scheduler *vm_pte_scheds[AMDGPU_MAX_RINGS];
|
||||||
unsigned vm_pte_num_rqs;
|
unsigned vm_pte_num_scheds;
|
||||||
struct amdgpu_ring *page_fault;
|
struct amdgpu_ring *page_fault;
|
||||||
|
|
||||||
/* partial resident texture handling */
|
/* partial resident texture handling */
|
||||||
|
|
|
@ -261,6 +261,7 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lo
|
||||||
INIT_LIST_HEAD(&tmp->device_list);
|
INIT_LIST_HEAD(&tmp->device_list);
|
||||||
mutex_init(&tmp->hive_lock);
|
mutex_init(&tmp->hive_lock);
|
||||||
mutex_init(&tmp->reset_lock);
|
mutex_init(&tmp->reset_lock);
|
||||||
|
task_barrier_init(&tmp->tb);
|
||||||
|
|
||||||
if (lock)
|
if (lock)
|
||||||
mutex_lock(&tmp->hive_lock);
|
mutex_lock(&tmp->hive_lock);
|
||||||
|
@ -408,6 +409,8 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
|
||||||
top_info->num_nodes = count;
|
top_info->num_nodes = count;
|
||||||
hive->number_devices = count;
|
hive->number_devices = count;
|
||||||
|
|
||||||
|
task_barrier_add_task(&hive->tb);
|
||||||
|
|
||||||
if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
|
if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
|
||||||
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
|
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
|
||||||
/* update node list for other device in the hive */
|
/* update node list for other device in the hive */
|
||||||
|
@ -470,6 +473,7 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
|
||||||
mutex_destroy(&hive->hive_lock);
|
mutex_destroy(&hive->hive_lock);
|
||||||
mutex_destroy(&hive->reset_lock);
|
mutex_destroy(&hive->reset_lock);
|
||||||
} else {
|
} else {
|
||||||
|
task_barrier_rem_task(&hive->tb);
|
||||||
amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
|
amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
|
||||||
mutex_unlock(&hive->hive_lock);
|
mutex_unlock(&hive->hive_lock);
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
#ifndef __AMDGPU_XGMI_H__
|
#ifndef __AMDGPU_XGMI_H__
|
||||||
#define __AMDGPU_XGMI_H__
|
#define __AMDGPU_XGMI_H__
|
||||||
|
|
||||||
|
#include <drm/task_barrier.h>
|
||||||
#include "amdgpu_psp.h"
|
#include "amdgpu_psp.h"
|
||||||
|
|
||||||
struct amdgpu_hive_info {
|
struct amdgpu_hive_info {
|
||||||
|
@ -33,6 +34,7 @@ struct amdgpu_hive_info {
|
||||||
struct device_attribute dev_attr;
|
struct device_attribute dev_attr;
|
||||||
struct amdgpu_device *adev;
|
struct amdgpu_device *adev;
|
||||||
int pstate; /*0 -- low , 1 -- high , -1 unknown*/
|
int pstate; /*0 -- low , 1 -- high , -1 unknown*/
|
||||||
|
struct task_barrier tb;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock);
|
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock);
|
||||||
|
|
|
@ -228,7 +228,7 @@ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
|
||||||
u32 extra_bits = vmid & 0xf;
|
u32 extra_bits = vmid & 0xf;
|
||||||
|
|
||||||
/* IB packet must end on a 8 DW boundary */
|
/* IB packet must end on a 8 DW boundary */
|
||||||
cik_sdma_ring_insert_nop(ring, (12 - (lower_32_bits(ring->wptr) & 7)) % 8);
|
cik_sdma_ring_insert_nop(ring, (4 - lower_32_bits(ring->wptr)) & 7);
|
||||||
|
|
||||||
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
|
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
|
||||||
amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
|
amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
|
||||||
|
@ -811,7 +811,7 @@ static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
|
||||||
u32 pad_count;
|
u32 pad_count;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
pad_count = (8 - (ib->length_dw & 0x7)) % 8;
|
pad_count = (-ib->length_dw) & 7;
|
||||||
for (i = 0; i < pad_count; i++)
|
for (i = 0; i < pad_count; i++)
|
||||||
if (sdma && sdma->burst_nop && (i == 0))
|
if (sdma && sdma->burst_nop && (i == 0))
|
||||||
ib->ptr[ib->length_dw++] =
|
ib->ptr[ib->length_dw++] =
|
||||||
|
@ -1372,16 +1372,14 @@ static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
|
||||||
|
|
||||||
static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
|
static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct drm_gpu_scheduler *sched;
|
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
|
adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
|
||||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||||
sched = &adev->sdma.instance[i].ring.sched;
|
adev->vm_manager.vm_pte_scheds[i] =
|
||||||
adev->vm_manager.vm_pte_rqs[i] =
|
&adev->sdma.instance[i].ring.sched;
|
||||||
&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
|
|
||||||
}
|
}
|
||||||
adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
|
adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct amdgpu_ip_block_version cik_sdma_ip_block =
|
const struct amdgpu_ip_block_version cik_sdma_ip_block =
|
||||||
|
|
|
@ -183,6 +183,61 @@ static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr,
|
||||||
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
|
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* same as perfmon_wreg but return status on write value check */
|
||||||
|
static int df_v3_6_perfmon_arm_with_status(struct amdgpu_device *adev,
|
||||||
|
uint32_t lo_addr, uint32_t lo_val,
|
||||||
|
uint32_t hi_addr, uint32_t hi_val)
|
||||||
|
{
|
||||||
|
unsigned long flags, address, data;
|
||||||
|
uint32_t lo_val_rb, hi_val_rb;
|
||||||
|
|
||||||
|
address = adev->nbio.funcs->get_pcie_index_offset(adev);
|
||||||
|
data = adev->nbio.funcs->get_pcie_data_offset(adev);
|
||||||
|
|
||||||
|
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
|
||||||
|
WREG32(address, lo_addr);
|
||||||
|
WREG32(data, lo_val);
|
||||||
|
WREG32(address, hi_addr);
|
||||||
|
WREG32(data, hi_val);
|
||||||
|
|
||||||
|
WREG32(address, lo_addr);
|
||||||
|
lo_val_rb = RREG32(data);
|
||||||
|
WREG32(address, hi_addr);
|
||||||
|
hi_val_rb = RREG32(data);
|
||||||
|
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
|
||||||
|
|
||||||
|
if (!(lo_val == lo_val_rb && hi_val == hi_val_rb))
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* retry arming counters every 100 usecs within 1 millisecond interval.
|
||||||
|
* if retry fails after time out, return error.
|
||||||
|
*/
|
||||||
|
#define ARM_RETRY_USEC_TIMEOUT 1000
|
||||||
|
#define ARM_RETRY_USEC_INTERVAL 100
|
||||||
|
static int df_v3_6_perfmon_arm_with_retry(struct amdgpu_device *adev,
|
||||||
|
uint32_t lo_addr, uint32_t lo_val,
|
||||||
|
uint32_t hi_addr, uint32_t hi_val)
|
||||||
|
{
|
||||||
|
int countdown = ARM_RETRY_USEC_TIMEOUT;
|
||||||
|
|
||||||
|
while (countdown) {
|
||||||
|
|
||||||
|
if (!df_v3_6_perfmon_arm_with_status(adev, lo_addr, lo_val,
|
||||||
|
hi_addr, hi_val))
|
||||||
|
break;
|
||||||
|
|
||||||
|
countdown -= ARM_RETRY_USEC_INTERVAL;
|
||||||
|
udelay(ARM_RETRY_USEC_INTERVAL);
|
||||||
|
}
|
||||||
|
|
||||||
|
return countdown > 0 ? 0 : -ETIME;
|
||||||
|
}
|
||||||
|
|
||||||
/* get the number of df counters available */
|
/* get the number of df counters available */
|
||||||
static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev,
|
static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev,
|
||||||
struct device_attribute *attr,
|
struct device_attribute *attr,
|
||||||
|
@ -334,20 +389,20 @@ static void df_v3_6_pmc_get_addr(struct amdgpu_device *adev,
|
||||||
switch (target_cntr) {
|
switch (target_cntr) {
|
||||||
|
|
||||||
case 0:
|
case 0:
|
||||||
*lo_base_addr = is_ctrl ? smnPerfMonCtlLo0 : smnPerfMonCtrLo0;
|
*lo_base_addr = is_ctrl ? smnPerfMonCtlLo4 : smnPerfMonCtrLo4;
|
||||||
*hi_base_addr = is_ctrl ? smnPerfMonCtlHi0 : smnPerfMonCtrHi0;
|
*hi_base_addr = is_ctrl ? smnPerfMonCtlHi4 : smnPerfMonCtrHi4;
|
||||||
break;
|
break;
|
||||||
case 1:
|
case 1:
|
||||||
*lo_base_addr = is_ctrl ? smnPerfMonCtlLo1 : smnPerfMonCtrLo1;
|
*lo_base_addr = is_ctrl ? smnPerfMonCtlLo5 : smnPerfMonCtrLo5;
|
||||||
*hi_base_addr = is_ctrl ? smnPerfMonCtlHi1 : smnPerfMonCtrHi1;
|
*hi_base_addr = is_ctrl ? smnPerfMonCtlHi5 : smnPerfMonCtrHi5;
|
||||||
break;
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
*lo_base_addr = is_ctrl ? smnPerfMonCtlLo2 : smnPerfMonCtrLo2;
|
*lo_base_addr = is_ctrl ? smnPerfMonCtlLo6 : smnPerfMonCtrLo6;
|
||||||
*hi_base_addr = is_ctrl ? smnPerfMonCtlHi2 : smnPerfMonCtrHi2;
|
*hi_base_addr = is_ctrl ? smnPerfMonCtlHi6 : smnPerfMonCtrHi6;
|
||||||
break;
|
break;
|
||||||
case 3:
|
case 3:
|
||||||
*lo_base_addr = is_ctrl ? smnPerfMonCtlLo3 : smnPerfMonCtrLo3;
|
*lo_base_addr = is_ctrl ? smnPerfMonCtlLo7 : smnPerfMonCtrLo7;
|
||||||
*hi_base_addr = is_ctrl ? smnPerfMonCtlHi3 : smnPerfMonCtrHi3;
|
*hi_base_addr = is_ctrl ? smnPerfMonCtlHi7 : smnPerfMonCtrHi7;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -422,6 +477,44 @@ static int df_v3_6_pmc_add_cntr(struct amdgpu_device *adev,
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define DEFERRED_ARM_MASK (1 << 31)
|
||||||
|
static int df_v3_6_pmc_set_deferred(struct amdgpu_device *adev,
|
||||||
|
uint64_t config, bool is_deferred)
|
||||||
|
{
|
||||||
|
int target_cntr;
|
||||||
|
|
||||||
|
target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
|
||||||
|
|
||||||
|
if (target_cntr < 0)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (is_deferred)
|
||||||
|
adev->df_perfmon_config_assign_mask[target_cntr] |=
|
||||||
|
DEFERRED_ARM_MASK;
|
||||||
|
else
|
||||||
|
adev->df_perfmon_config_assign_mask[target_cntr] &=
|
||||||
|
~DEFERRED_ARM_MASK;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool df_v3_6_pmc_is_deferred(struct amdgpu_device *adev,
|
||||||
|
uint64_t config)
|
||||||
|
{
|
||||||
|
int target_cntr;
|
||||||
|
|
||||||
|
target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* we never get target_cntr < 0 since this funciton is only called in
|
||||||
|
* pmc_count for now but we should check anyways.
|
||||||
|
*/
|
||||||
|
return (target_cntr >= 0 &&
|
||||||
|
(adev->df_perfmon_config_assign_mask[target_cntr]
|
||||||
|
& DEFERRED_ARM_MASK));
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
/* release performance counter */
|
/* release performance counter */
|
||||||
static void df_v3_6_pmc_release_cntr(struct amdgpu_device *adev,
|
static void df_v3_6_pmc_release_cntr(struct amdgpu_device *adev,
|
||||||
uint64_t config)
|
uint64_t config)
|
||||||
|
@ -451,16 +544,15 @@ static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
|
||||||
int is_enable)
|
int is_enable)
|
||||||
{
|
{
|
||||||
uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
|
uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
|
||||||
int ret = 0;
|
int err = 0, ret = 0;
|
||||||
|
|
||||||
switch (adev->asic_type) {
|
switch (adev->asic_type) {
|
||||||
case CHIP_VEGA20:
|
case CHIP_VEGA20:
|
||||||
|
if (is_enable)
|
||||||
|
return df_v3_6_pmc_add_cntr(adev, config);
|
||||||
|
|
||||||
df_v3_6_reset_perfmon_cntr(adev, config);
|
df_v3_6_reset_perfmon_cntr(adev, config);
|
||||||
|
|
||||||
if (is_enable) {
|
|
||||||
ret = df_v3_6_pmc_add_cntr(adev, config);
|
|
||||||
} else {
|
|
||||||
ret = df_v3_6_pmc_get_ctrl_settings(adev,
|
ret = df_v3_6_pmc_get_ctrl_settings(adev,
|
||||||
config,
|
config,
|
||||||
&lo_base_addr,
|
&lo_base_addr,
|
||||||
|
@ -471,9 +563,14 @@ static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
df_v3_6_perfmon_wreg(adev, lo_base_addr, lo_val,
|
err = df_v3_6_perfmon_arm_with_retry(adev,
|
||||||
hi_base_addr, hi_val);
|
lo_base_addr,
|
||||||
}
|
lo_val,
|
||||||
|
hi_base_addr,
|
||||||
|
hi_val);
|
||||||
|
|
||||||
|
if (err)
|
||||||
|
ret = df_v3_6_pmc_set_deferred(adev, config, true);
|
||||||
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -501,7 +598,7 @@ static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
df_v3_6_perfmon_wreg(adev, lo_base_addr, 0, hi_base_addr, 0);
|
df_v3_6_reset_perfmon_cntr(adev, config);
|
||||||
|
|
||||||
if (is_disable)
|
if (is_disable)
|
||||||
df_v3_6_pmc_release_cntr(adev, config);
|
df_v3_6_pmc_release_cntr(adev, config);
|
||||||
|
@ -518,18 +615,29 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
|
||||||
uint64_t config,
|
uint64_t config,
|
||||||
uint64_t *count)
|
uint64_t *count)
|
||||||
{
|
{
|
||||||
uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
|
uint32_t lo_base_addr, hi_base_addr, lo_val = 0, hi_val = 0;
|
||||||
*count = 0;
|
*count = 0;
|
||||||
|
|
||||||
switch (adev->asic_type) {
|
switch (adev->asic_type) {
|
||||||
case CHIP_VEGA20:
|
case CHIP_VEGA20:
|
||||||
|
|
||||||
df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr,
|
df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr,
|
||||||
&hi_base_addr);
|
&hi_base_addr);
|
||||||
|
|
||||||
if ((lo_base_addr == 0) || (hi_base_addr == 0))
|
if ((lo_base_addr == 0) || (hi_base_addr == 0))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
/* rearm the counter or throw away count value on failure */
|
||||||
|
if (df_v3_6_pmc_is_deferred(adev, config)) {
|
||||||
|
int rearm_err = df_v3_6_perfmon_arm_with_status(adev,
|
||||||
|
lo_base_addr, lo_val,
|
||||||
|
hi_base_addr, hi_val);
|
||||||
|
|
||||||
|
if (rearm_err)
|
||||||
|
return;
|
||||||
|
|
||||||
|
df_v3_6_pmc_set_deferred(adev, config, false);
|
||||||
|
}
|
||||||
|
|
||||||
df_v3_6_perfmon_rreg(adev, lo_base_addr, &lo_val,
|
df_v3_6_perfmon_rreg(adev, lo_base_addr, &lo_val,
|
||||||
hi_base_addr, &hi_val);
|
hi_base_addr, &hi_val);
|
||||||
|
|
||||||
|
@ -542,7 +650,6 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
|
||||||
config, lo_base_addr, hi_base_addr, lo_val, hi_val);
|
config, lo_base_addr, hi_base_addr, lo_val, hi_val);
|
||||||
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -471,18 +471,10 @@ static int gfx_v10_0_ring_test_ring(struct amdgpu_ring *ring)
|
||||||
else
|
else
|
||||||
udelay(1);
|
udelay(1);
|
||||||
}
|
}
|
||||||
if (i < adev->usec_timeout) {
|
|
||||||
if (amdgpu_emu_mode == 1)
|
if (i >= adev->usec_timeout)
|
||||||
DRM_INFO("ring test on %d succeeded in %d msecs\n",
|
r = -ETIMEDOUT;
|
||||||
ring->idx, i);
|
|
||||||
else
|
|
||||||
DRM_INFO("ring test on %d succeeded in %d usecs\n",
|
|
||||||
ring->idx, i);
|
|
||||||
} else {
|
|
||||||
DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
|
|
||||||
ring->idx, scratch, tmp);
|
|
||||||
r = -EINVAL;
|
|
||||||
}
|
|
||||||
amdgpu_gfx_scratch_free(adev, scratch);
|
amdgpu_gfx_scratch_free(adev, scratch);
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
|
@ -532,14 +524,10 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
tmp = RREG32(scratch);
|
tmp = RREG32(scratch);
|
||||||
if (tmp == 0xDEADBEEF) {
|
if (tmp == 0xDEADBEEF)
|
||||||
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
|
|
||||||
r = 0;
|
r = 0;
|
||||||
} else {
|
else
|
||||||
DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
|
|
||||||
scratch, tmp);
|
|
||||||
r = -EINVAL;
|
r = -EINVAL;
|
||||||
}
|
|
||||||
err2:
|
err2:
|
||||||
amdgpu_ib_free(adev, &ib, NULL);
|
amdgpu_ib_free(adev, &ib, NULL);
|
||||||
dma_fence_put(f);
|
dma_fence_put(f);
|
||||||
|
@ -588,8 +576,7 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (adev->gfx.cp_fw_write_wait == false)
|
if (adev->gfx.cp_fw_write_wait == false)
|
||||||
DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \
|
DRM_WARN_ONCE("CP firmware version too old, please update!");
|
||||||
GRBM requires 1-cycle delay in cp firmware\n");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1963,7 +1950,7 @@ static int gfx_v10_0_parse_rlc_toc(struct amdgpu_device *adev)
|
||||||
rlc_autoload_info[rlc_toc->id].size = rlc_toc->size * 4;
|
rlc_autoload_info[rlc_toc->id].size = rlc_toc->size * 4;
|
||||||
|
|
||||||
rlc_toc++;
|
rlc_toc++;
|
||||||
};
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -3606,23 +3593,16 @@ static int gfx_v10_0_cp_resume(struct amdgpu_device *adev)
|
||||||
|
|
||||||
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
|
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
|
||||||
ring = &adev->gfx.gfx_ring[i];
|
ring = &adev->gfx.gfx_ring[i];
|
||||||
DRM_INFO("gfx %d ring me %d pipe %d q %d\n",
|
r = amdgpu_ring_test_helper(ring);
|
||||||
i, ring->me, ring->pipe, ring->queue);
|
if (r)
|
||||||
r = amdgpu_ring_test_ring(ring);
|
|
||||||
if (r) {
|
|
||||||
ring->sched.ready = false;
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
||||||
ring = &adev->gfx.compute_ring[i];
|
ring = &adev->gfx.compute_ring[i];
|
||||||
ring->sched.ready = true;
|
r = amdgpu_ring_test_helper(ring);
|
||||||
DRM_INFO("compute ring %d mec %d pipe %d q %d\n",
|
|
||||||
i, ring->me, ring->pipe, ring->queue);
|
|
||||||
r = amdgpu_ring_test_ring(ring);
|
|
||||||
if (r)
|
if (r)
|
||||||
ring->sched.ready = false;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -48,8 +48,15 @@
|
||||||
|
|
||||||
#include "amdgpu_ras.h"
|
#include "amdgpu_ras.h"
|
||||||
|
|
||||||
#include "sdma0/sdma0_4_0_offset.h"
|
#include "sdma0/sdma0_4_2_offset.h"
|
||||||
#include "sdma1/sdma1_4_0_offset.h"
|
#include "sdma1/sdma1_4_2_offset.h"
|
||||||
|
#include "sdma2/sdma2_4_2_2_offset.h"
|
||||||
|
#include "sdma3/sdma3_4_2_2_offset.h"
|
||||||
|
#include "sdma4/sdma4_4_2_2_offset.h"
|
||||||
|
#include "sdma5/sdma5_4_2_2_offset.h"
|
||||||
|
#include "sdma6/sdma6_4_2_2_offset.h"
|
||||||
|
#include "sdma7/sdma7_4_2_2_offset.h"
|
||||||
|
|
||||||
#define GFX9_NUM_GFX_RINGS 1
|
#define GFX9_NUM_GFX_RINGS 1
|
||||||
#define GFX9_MEC_HPD_SIZE 4096
|
#define GFX9_MEC_HPD_SIZE 4096
|
||||||
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
|
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
|
||||||
|
@ -981,8 +988,7 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
|
||||||
(adev->gfx.mec_feature_version < 46) ||
|
(adev->gfx.mec_feature_version < 46) ||
|
||||||
(adev->gfx.pfp_fw_version < 0x000000b7) ||
|
(adev->gfx.pfp_fw_version < 0x000000b7) ||
|
||||||
(adev->gfx.pfp_feature_version < 46))
|
(adev->gfx.pfp_feature_version < 46))
|
||||||
DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \
|
DRM_WARN_ONCE("CP firmware version too old, please update!");
|
||||||
GRBM requires 1-cycle delay in cp firmware\n");
|
|
||||||
|
|
||||||
switch (adev->asic_type) {
|
switch (adev->asic_type) {
|
||||||
case CHIP_VEGA10:
|
case CHIP_VEGA10:
|
||||||
|
@ -1042,17 +1048,10 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
|
||||||
case CHIP_VEGA20:
|
case CHIP_VEGA20:
|
||||||
break;
|
break;
|
||||||
case CHIP_RAVEN:
|
case CHIP_RAVEN:
|
||||||
/* Disable GFXOFF on original raven. There are combinations
|
if (!(adev->rev_id >= 0x8 ||
|
||||||
* of sbios and platforms that are not stable.
|
adev->pdev->device == 0x15d8) &&
|
||||||
*/
|
(adev->pm.fw_version < 0x41e2b || /* not raven1 fresh */
|
||||||
if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8))
|
!adev->gfx.rlc.is_rlc_v2_1)) /* without rlc save restore ucodes */
|
||||||
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
|
|
||||||
else if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
|
|
||||||
&&((adev->gfx.rlc_fw_version != 106 &&
|
|
||||||
adev->gfx.rlc_fw_version < 531) ||
|
|
||||||
(adev->gfx.rlc_fw_version == 53815) ||
|
|
||||||
(adev->gfx.rlc_feature_version < 1) ||
|
|
||||||
!adev->gfx.rlc.is_rlc_v2_1))
|
|
||||||
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
|
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
|
||||||
|
|
||||||
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
|
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
|
||||||
|
@ -3933,43 +3932,58 @@ static const u32 sgpr_init_compute_shader[] =
|
||||||
0xbe800080, 0xbf810000,
|
0xbe800080, 0xbf810000,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* When below register arrays changed, please update gpr_reg_size,
|
||||||
|
and sec_ded_counter_reg_size in function gfx_v9_0_do_edc_gpr_workarounds,
|
||||||
|
to cover all gfx9 ASICs */
|
||||||
static const struct soc15_reg_entry vgpr_init_regs[] = {
|
static const struct soc15_reg_entry vgpr_init_regs[] = {
|
||||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
|
|
||||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
|
|
||||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
|
|
||||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
|
|
||||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
|
||||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
|
||||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
|
||||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
|
||||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x3f },
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x3f },
|
||||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */
|
||||||
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
|
||||||
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
|
||||||
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
|
||||||
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
|
||||||
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
|
||||||
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
|
||||||
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
|
||||||
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct soc15_reg_entry sgpr1_init_regs[] = {
|
static const struct soc15_reg_entry sgpr1_init_regs[] = {
|
||||||
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
|
||||||
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
|
||||||
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
|
||||||
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
|
||||||
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
|
||||||
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
|
||||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x000000ff },
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x000000ff },
|
||||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x000000ff },
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x000000ff },
|
||||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x000000ff },
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x000000ff },
|
||||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x000000ff },
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x000000ff },
|
||||||
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x000000ff },
|
||||||
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x000000ff },
|
||||||
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x000000ff },
|
||||||
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x000000ff },
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct soc15_reg_entry sgpr2_init_regs[] = {
|
||||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
|
||||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
|
||||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
|
||||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
|
||||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
|
||||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
|
||||||
};
|
|
||||||
|
|
||||||
static const struct soc15_reg_entry sgpr2_init_regs[] = {
|
|
||||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x0000ff00 },
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x0000ff00 },
|
||||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x0000ff00 },
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x0000ff00 },
|
||||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x0000ff00 },
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x0000ff00 },
|
||||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x0000ff00 },
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x0000ff00 },
|
||||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x0000ff00 },
|
||||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x0000ff00 },
|
||||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x0000ff00 },
|
||||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
|
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x0000ff00 },
|
||||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
|
|
||||||
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct soc15_reg_entry sec_ded_counter_registers[] = {
|
static const struct soc15_reg_entry sec_ded_counter_registers[] = {
|
||||||
|
@ -4006,9 +4020,15 @@ static const struct soc15_reg_entry sec_ded_counter_registers[] = {
|
||||||
{ SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16},
|
{ SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16},
|
||||||
{ SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2},
|
{ SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2},
|
||||||
{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6},
|
{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6},
|
||||||
|
{ SOC15_REG_ENTRY(HDP, 0, mmHDP_EDC_CNT), 0, 1, 1},
|
||||||
{ SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), 0, 1, 1},
|
{ SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), 0, 1, 1},
|
||||||
{ SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_EDC_COUNTER), 0, 1, 1},
|
{ SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_EDC_COUNTER), 0, 1, 1},
|
||||||
{ SOC15_REG_ENTRY(HDP, 0, mmHDP_EDC_CNT), 0, 1, 1},
|
{ SOC15_REG_ENTRY(SDMA2, 0, mmSDMA2_EDC_COUNTER), 0, 1, 1},
|
||||||
|
{ SOC15_REG_ENTRY(SDMA3, 0, mmSDMA3_EDC_COUNTER), 0, 1, 1},
|
||||||
|
{ SOC15_REG_ENTRY(SDMA4, 0, mmSDMA4_EDC_COUNTER), 0, 1, 1},
|
||||||
|
{ SOC15_REG_ENTRY(SDMA5, 0, mmSDMA5_EDC_COUNTER), 0, 1, 1},
|
||||||
|
{ SOC15_REG_ENTRY(SDMA6, 0, mmSDMA6_EDC_COUNTER), 0, 1, 1},
|
||||||
|
{ SOC15_REG_ENTRY(SDMA7, 0, mmSDMA7_EDC_COUNTER), 0, 1, 1},
|
||||||
};
|
};
|
||||||
|
|
||||||
static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
|
static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
|
||||||
|
@ -4067,6 +4087,13 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
|
||||||
unsigned total_size, vgpr_offset, sgpr_offset;
|
unsigned total_size, vgpr_offset, sgpr_offset;
|
||||||
u64 gpu_addr;
|
u64 gpu_addr;
|
||||||
|
|
||||||
|
int compute_dim_x = adev->gfx.config.max_shader_engines *
|
||||||
|
adev->gfx.config.max_cu_per_sh *
|
||||||
|
adev->gfx.config.max_sh_per_se;
|
||||||
|
int sgpr_work_group_size = 5;
|
||||||
|
int gpr_reg_size = compute_dim_x / 16 + 6;
|
||||||
|
int sec_ded_counter_reg_size = adev->sdma.num_instances + 34;
|
||||||
|
|
||||||
/* only support when RAS is enabled */
|
/* only support when RAS is enabled */
|
||||||
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
|
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -4076,11 +4103,11 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
total_size =
|
total_size =
|
||||||
((ARRAY_SIZE(vgpr_init_regs) * 3) + 4 + 5 + 2) * 4;
|
(gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* VGPRS */
|
||||||
total_size +=
|
total_size +=
|
||||||
((ARRAY_SIZE(sgpr1_init_regs) * 3) + 4 + 5 + 2) * 4;
|
(gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS1 */
|
||||||
total_size +=
|
total_size +=
|
||||||
((ARRAY_SIZE(sgpr2_init_regs) * 3) + 4 + 5 + 2) * 4;
|
(gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS2 */
|
||||||
total_size = ALIGN(total_size, 256);
|
total_size = ALIGN(total_size, 256);
|
||||||
vgpr_offset = total_size;
|
vgpr_offset = total_size;
|
||||||
total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256);
|
total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256);
|
||||||
|
@ -4107,7 +4134,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
|
||||||
|
|
||||||
/* VGPR */
|
/* VGPR */
|
||||||
/* write the register state for the compute dispatch */
|
/* write the register state for the compute dispatch */
|
||||||
for (i = 0; i < ARRAY_SIZE(vgpr_init_regs); i++) {
|
for (i = 0; i < gpr_reg_size; i++) {
|
||||||
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
|
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
|
||||||
ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs[i])
|
ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs[i])
|
||||||
- PACKET3_SET_SH_REG_START;
|
- PACKET3_SET_SH_REG_START;
|
||||||
|
@ -4123,7 +4150,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
|
||||||
|
|
||||||
/* write dispatch packet */
|
/* write dispatch packet */
|
||||||
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
|
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
|
||||||
ib.ptr[ib.length_dw++] = 0x40*2; /* x */
|
ib.ptr[ib.length_dw++] = compute_dim_x; /* x */
|
||||||
ib.ptr[ib.length_dw++] = 1; /* y */
|
ib.ptr[ib.length_dw++] = 1; /* y */
|
||||||
ib.ptr[ib.length_dw++] = 1; /* z */
|
ib.ptr[ib.length_dw++] = 1; /* z */
|
||||||
ib.ptr[ib.length_dw++] =
|
ib.ptr[ib.length_dw++] =
|
||||||
|
@ -4135,7 +4162,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
|
||||||
|
|
||||||
/* SGPR1 */
|
/* SGPR1 */
|
||||||
/* write the register state for the compute dispatch */
|
/* write the register state for the compute dispatch */
|
||||||
for (i = 0; i < ARRAY_SIZE(sgpr1_init_regs); i++) {
|
for (i = 0; i < gpr_reg_size; i++) {
|
||||||
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
|
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
|
||||||
ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr1_init_regs[i])
|
ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr1_init_regs[i])
|
||||||
- PACKET3_SET_SH_REG_START;
|
- PACKET3_SET_SH_REG_START;
|
||||||
|
@ -4151,7 +4178,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
|
||||||
|
|
||||||
/* write dispatch packet */
|
/* write dispatch packet */
|
||||||
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
|
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
|
||||||
ib.ptr[ib.length_dw++] = 0xA0*2; /* x */
|
ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
|
||||||
ib.ptr[ib.length_dw++] = 1; /* y */
|
ib.ptr[ib.length_dw++] = 1; /* y */
|
||||||
ib.ptr[ib.length_dw++] = 1; /* z */
|
ib.ptr[ib.length_dw++] = 1; /* z */
|
||||||
ib.ptr[ib.length_dw++] =
|
ib.ptr[ib.length_dw++] =
|
||||||
|
@ -4163,7 +4190,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
|
||||||
|
|
||||||
/* SGPR2 */
|
/* SGPR2 */
|
||||||
/* write the register state for the compute dispatch */
|
/* write the register state for the compute dispatch */
|
||||||
for (i = 0; i < ARRAY_SIZE(sgpr2_init_regs); i++) {
|
for (i = 0; i < gpr_reg_size; i++) {
|
||||||
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
|
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
|
||||||
ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr2_init_regs[i])
|
ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr2_init_regs[i])
|
||||||
- PACKET3_SET_SH_REG_START;
|
- PACKET3_SET_SH_REG_START;
|
||||||
|
@ -4179,7 +4206,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
|
||||||
|
|
||||||
/* write dispatch packet */
|
/* write dispatch packet */
|
||||||
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
|
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
|
||||||
ib.ptr[ib.length_dw++] = 0xA0*2; /* x */
|
ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
|
||||||
ib.ptr[ib.length_dw++] = 1; /* y */
|
ib.ptr[ib.length_dw++] = 1; /* y */
|
||||||
ib.ptr[ib.length_dw++] = 1; /* z */
|
ib.ptr[ib.length_dw++] = 1; /* z */
|
||||||
ib.ptr[ib.length_dw++] =
|
ib.ptr[ib.length_dw++] =
|
||||||
|
@ -4205,7 +4232,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
|
||||||
|
|
||||||
/* read back registers to clear the counters */
|
/* read back registers to clear the counters */
|
||||||
mutex_lock(&adev->grbm_idx_mutex);
|
mutex_lock(&adev->grbm_idx_mutex);
|
||||||
for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) {
|
for (i = 0; i < sec_ded_counter_reg_size; i++) {
|
||||||
for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) {
|
for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) {
|
||||||
for (k = 0; k < sec_ded_counter_registers[i].instance; k++) {
|
for (k = 0; k < sec_ded_counter_registers[i].instance; k++) {
|
||||||
gfx_v9_0_select_se_sh(adev, j, 0x0, k);
|
gfx_v9_0_select_se_sh(adev, j, 0x0, k);
|
||||||
|
|
|
@ -75,27 +75,31 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
|
||||||
WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
|
WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
|
||||||
WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
|
WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
|
||||||
|
|
||||||
|
if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) {
|
||||||
/* Program the system aperture low logical page number. */
|
/* Program the system aperture low logical page number. */
|
||||||
WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
|
WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
|
||||||
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
|
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
|
||||||
|
|
||||||
if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
|
if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
|
||||||
/*
|
/*
|
||||||
* Raven2 has a HW issue that it is unable to use the vram which
|
* Raven2 has a HW issue that it is unable to use the
|
||||||
* is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
|
* vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
|
||||||
* workaround that increase system aperture high address (add 1)
|
* So here is the workaround that increase system
|
||||||
* to get rid of the VM fault and hardware hang.
|
* aperture high address (add 1) to get rid of the VM
|
||||||
|
* fault and hardware hang.
|
||||||
*/
|
*/
|
||||||
WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
|
WREG32_SOC15_RLC(GC, 0,
|
||||||
|
mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
|
||||||
max((adev->gmc.fb_end >> 18) + 0x1,
|
max((adev->gmc.fb_end >> 18) + 0x1,
|
||||||
adev->gmc.agp_end >> 18));
|
adev->gmc.agp_end >> 18));
|
||||||
else
|
else
|
||||||
WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
|
WREG32_SOC15_RLC(
|
||||||
|
GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
|
||||||
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
|
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
|
||||||
|
|
||||||
/* Set default page address. */
|
/* Set default page address. */
|
||||||
value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
|
value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
|
||||||
+ adev->vm_manager.vram_base_offset;
|
adev->vm_manager.vram_base_offset;
|
||||||
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
|
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
|
||||||
(u32)(value >> 12));
|
(u32)(value >> 12));
|
||||||
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
|
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
|
||||||
|
@ -110,6 +114,7 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
|
||||||
WREG32_FIELD15(GC, 0, VM_L2_PROTECTION_FAULT_CNTL2,
|
WREG32_FIELD15(GC, 0, VM_L2_PROTECTION_FAULT_CNTL2,
|
||||||
ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
|
ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
|
static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
|
@ -264,7 +269,7 @@ static void gfxhub_v1_0_program_invalidation(struct amdgpu_device *adev)
|
||||||
|
|
||||||
int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
|
int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
if (amdgpu_sriov_vf(adev)) {
|
if (amdgpu_sriov_vf(adev) && adev->asic_type != CHIP_ARCTURUS) {
|
||||||
/*
|
/*
|
||||||
* MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
|
* MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
|
||||||
* VF copy registers so vbios post doesn't program them, for
|
* VF copy registers so vbios post doesn't program them, for
|
||||||
|
@ -280,9 +285,11 @@ int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
|
||||||
gfxhub_v1_0_init_gart_aperture_regs(adev);
|
gfxhub_v1_0_init_gart_aperture_regs(adev);
|
||||||
gfxhub_v1_0_init_system_aperture_regs(adev);
|
gfxhub_v1_0_init_system_aperture_regs(adev);
|
||||||
gfxhub_v1_0_init_tlb_regs(adev);
|
gfxhub_v1_0_init_tlb_regs(adev);
|
||||||
|
if (!amdgpu_sriov_vf(adev))
|
||||||
gfxhub_v1_0_init_cache_regs(adev);
|
gfxhub_v1_0_init_cache_regs(adev);
|
||||||
|
|
||||||
gfxhub_v1_0_enable_system_domain(adev);
|
gfxhub_v1_0_enable_system_domain(adev);
|
||||||
|
if (!amdgpu_sriov_vf(adev))
|
||||||
gfxhub_v1_0_disable_identity_aperture(adev);
|
gfxhub_v1_0_disable_identity_aperture(adev);
|
||||||
gfxhub_v1_0_setup_vmid_config(adev);
|
gfxhub_v1_0_setup_vmid_config(adev);
|
||||||
gfxhub_v1_0_program_invalidation(adev);
|
gfxhub_v1_0_program_invalidation(adev);
|
||||||
|
|
|
@ -564,22 +564,11 @@ static int gmc_v10_0_early_init(void *handle)
|
||||||
static int gmc_v10_0_late_init(void *handle)
|
static int gmc_v10_0_late_init(void *handle)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
|
int r;
|
||||||
unsigned i;
|
|
||||||
|
|
||||||
for(i = 0; i < adev->num_rings; ++i) {
|
r = amdgpu_gmc_allocate_vm_inv_eng(adev);
|
||||||
struct amdgpu_ring *ring = adev->rings[i];
|
if (r)
|
||||||
unsigned vmhub = ring->funcs->vmhub;
|
return r;
|
||||||
|
|
||||||
ring->vm_inv_eng = vm_inv_eng[vmhub]++;
|
|
||||||
dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
|
|
||||||
ring->idx, ring->name, ring->vm_inv_eng,
|
|
||||||
ring->funcs->vmhub);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Engine 17 is used for GART flushes */
|
|
||||||
for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
|
|
||||||
BUG_ON(vm_inv_eng[i] > 17);
|
|
||||||
|
|
||||||
return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
|
return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
|
||||||
}
|
}
|
||||||
|
|
|
@ -207,6 +207,11 @@ static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
|
||||||
{
|
{
|
||||||
u32 bits, i, tmp, reg;
|
u32 bits, i, tmp, reg;
|
||||||
|
|
||||||
|
/* Devices newer then VEGA10/12 shall have these programming
|
||||||
|
sequences performed by PSP BL */
|
||||||
|
if (adev->asic_type >= CHIP_VEGA20)
|
||||||
|
return 0;
|
||||||
|
|
||||||
bits = 0x7f;
|
bits = 0x7f;
|
||||||
|
|
||||||
switch (state) {
|
switch (state) {
|
||||||
|
@ -393,9 +398,11 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
|
||||||
adev->gmc.vm_fault.num_types = 1;
|
adev->gmc.vm_fault.num_types = 1;
|
||||||
adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
|
adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
|
||||||
|
|
||||||
|
if (!amdgpu_sriov_vf(adev)) {
|
||||||
adev->gmc.ecc_irq.num_types = 1;
|
adev->gmc.ecc_irq.num_types = 1;
|
||||||
adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
|
adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
|
static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
|
||||||
uint32_t flush_type)
|
uint32_t flush_type)
|
||||||
|
@ -790,36 +797,6 @@ static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
struct amdgpu_ring *ring;
|
|
||||||
unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
|
|
||||||
{GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP,
|
|
||||||
GFXHUB_FREE_VM_INV_ENGS_BITMAP};
|
|
||||||
unsigned i;
|
|
||||||
unsigned vmhub, inv_eng;
|
|
||||||
|
|
||||||
for (i = 0; i < adev->num_rings; ++i) {
|
|
||||||
ring = adev->rings[i];
|
|
||||||
vmhub = ring->funcs->vmhub;
|
|
||||||
|
|
||||||
inv_eng = ffs(vm_inv_engs[vmhub]);
|
|
||||||
if (!inv_eng) {
|
|
||||||
dev_err(adev->dev, "no VM inv eng for ring %s\n",
|
|
||||||
ring->name);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
ring->vm_inv_eng = inv_eng - 1;
|
|
||||||
vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
|
|
||||||
|
|
||||||
dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
|
|
||||||
ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int gmc_v9_0_late_init(void *handle)
|
static int gmc_v9_0_late_init(void *handle)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
@ -828,7 +805,7 @@ static int gmc_v9_0_late_init(void *handle)
|
||||||
if (!gmc_v9_0_keep_stolen_memory(adev))
|
if (!gmc_v9_0_keep_stolen_memory(adev))
|
||||||
amdgpu_bo_late_init(adev);
|
amdgpu_bo_late_init(adev);
|
||||||
|
|
||||||
r = gmc_v9_0_allocate_vm_inv_eng(adev);
|
r = amdgpu_gmc_allocate_vm_inv_eng(adev);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
/* Check if ecc is available */
|
/* Check if ecc is available */
|
||||||
|
@ -1112,11 +1089,13 @@ static int gmc_v9_0_sw_init(void *handle)
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
if (!amdgpu_sriov_vf(adev)) {
|
||||||
/* interrupt sent to DF. */
|
/* interrupt sent to DF. */
|
||||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
|
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
|
||||||
&adev->gmc.ecc_irq);
|
&adev->gmc.ecc_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
/* Set the internal MC address mask
|
/* Set the internal MC address mask
|
||||||
* This is the max address of the GPU's
|
* This is the max address of the GPU's
|
||||||
|
@ -1302,12 +1281,13 @@ static int gmc_v9_0_hw_init(void *handle)
|
||||||
else
|
else
|
||||||
value = true;
|
value = true;
|
||||||
|
|
||||||
|
if (!amdgpu_sriov_vf(adev)) {
|
||||||
gfxhub_v1_0_set_fault_enable_default(adev, value);
|
gfxhub_v1_0_set_fault_enable_default(adev, value);
|
||||||
if (adev->asic_type == CHIP_ARCTURUS)
|
if (adev->asic_type == CHIP_ARCTURUS)
|
||||||
mmhub_v9_4_set_fault_enable_default(adev, value);
|
mmhub_v9_4_set_fault_enable_default(adev, value);
|
||||||
else
|
else
|
||||||
mmhub_v1_0_set_fault_enable_default(adev, value);
|
mmhub_v1_0_set_fault_enable_default(adev, value);
|
||||||
|
}
|
||||||
for (i = 0; i < adev->num_vmhubs; ++i)
|
for (i = 0; i < adev->num_vmhubs; ++i)
|
||||||
gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
|
gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
|
||||||
|
|
||||||
|
|
|
@ -24,16 +24,6 @@
|
||||||
#ifndef __GMC_V9_0_H__
|
#ifndef __GMC_V9_0_H__
|
||||||
#define __GMC_V9_0_H__
|
#define __GMC_V9_0_H__
|
||||||
|
|
||||||
/*
|
|
||||||
* The latest engine allocation on gfx9 is:
|
|
||||||
* Engine 2, 3: firmware
|
|
||||||
* Engine 0, 1, 4~16: amdgpu ring,
|
|
||||||
* subject to change when ring number changes
|
|
||||||
* Engine 17: Gart flushes
|
|
||||||
*/
|
|
||||||
#define GFXHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
|
|
||||||
#define MMHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
|
|
||||||
|
|
||||||
extern const struct amd_ip_funcs gmc_v9_0_ip_funcs;
|
extern const struct amd_ip_funcs gmc_v9_0_ip_funcs;
|
||||||
extern const struct amdgpu_ip_block_version gmc_v9_0_ip_block;
|
extern const struct amdgpu_ip_block_version gmc_v9_0_ip_block;
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -25,6 +25,7 @@
|
||||||
#include "amdgpu_jpeg.h"
|
#include "amdgpu_jpeg.h"
|
||||||
#include "soc15.h"
|
#include "soc15.h"
|
||||||
#include "soc15d.h"
|
#include "soc15d.h"
|
||||||
|
#include "vcn_v1_0.h"
|
||||||
|
|
||||||
#include "vcn/vcn_1_0_offset.h"
|
#include "vcn/vcn_1_0_offset.h"
|
||||||
#include "vcn/vcn_1_0_sh_mask.h"
|
#include "vcn/vcn_1_0_sh_mask.h"
|
||||||
|
@ -561,7 +562,7 @@ static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = {
|
||||||
.insert_start = jpeg_v1_0_decode_ring_insert_start,
|
.insert_start = jpeg_v1_0_decode_ring_insert_start,
|
||||||
.insert_end = jpeg_v1_0_decode_ring_insert_end,
|
.insert_end = jpeg_v1_0_decode_ring_insert_end,
|
||||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||||
.begin_use = amdgpu_vcn_ring_begin_use,
|
.begin_use = vcn_v1_0_ring_begin_use,
|
||||||
.end_use = amdgpu_vcn_ring_end_use,
|
.end_use = amdgpu_vcn_ring_end_use,
|
||||||
.emit_wreg = jpeg_v1_0_decode_ring_emit_wreg,
|
.emit_wreg = jpeg_v1_0_decode_ring_emit_wreg,
|
||||||
.emit_reg_wait = jpeg_v1_0_decode_ring_emit_reg_wait,
|
.emit_reg_wait = jpeg_v1_0_decode_ring_emit_reg_wait,
|
||||||
|
|
|
@ -128,45 +128,53 @@ static void mmhub_v9_4_init_system_aperture_regs(struct amdgpu_device *adev,
|
||||||
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
|
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
|
||||||
adev->gmc.agp_start >> 24);
|
adev->gmc.agp_start >> 24);
|
||||||
|
|
||||||
|
if (!amdgpu_sriov_vf(adev)) {
|
||||||
/* Program the system aperture low logical page number. */
|
/* Program the system aperture low logical page number. */
|
||||||
WREG32_SOC15_OFFSET(MMHUB, 0,
|
WREG32_SOC15_OFFSET(
|
||||||
mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR,
|
MMHUB, 0, mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR,
|
||||||
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
|
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
|
||||||
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
|
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
|
||||||
WREG32_SOC15_OFFSET(MMHUB, 0,
|
WREG32_SOC15_OFFSET(
|
||||||
mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
|
MMHUB, 0, mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
|
||||||
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
|
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
|
||||||
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
|
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
|
||||||
|
|
||||||
/* Set default page address. */
|
/* Set default page address. */
|
||||||
value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
|
value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
|
||||||
adev->vm_manager.vram_base_offset;
|
adev->vm_manager.vram_base_offset;
|
||||||
WREG32_SOC15_OFFSET(MMHUB, 0,
|
WREG32_SOC15_OFFSET(
|
||||||
|
MMHUB, 0,
|
||||||
mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
|
mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
|
||||||
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
|
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
|
||||||
(u32)(value >> 12));
|
(u32)(value >> 12));
|
||||||
WREG32_SOC15_OFFSET(MMHUB, 0,
|
WREG32_SOC15_OFFSET(
|
||||||
|
MMHUB, 0,
|
||||||
mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
|
mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
|
||||||
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
|
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
|
||||||
(u32)(value >> 44));
|
(u32)(value >> 44));
|
||||||
|
|
||||||
/* Program "protection fault". */
|
/* Program "protection fault". */
|
||||||
WREG32_SOC15_OFFSET(MMHUB, 0,
|
WREG32_SOC15_OFFSET(
|
||||||
|
MMHUB, 0,
|
||||||
mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
|
mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
|
||||||
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
|
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
|
||||||
(u32)(adev->dummy_page_addr >> 12));
|
(u32)(adev->dummy_page_addr >> 12));
|
||||||
WREG32_SOC15_OFFSET(MMHUB, 0,
|
WREG32_SOC15_OFFSET(
|
||||||
|
MMHUB, 0,
|
||||||
mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
|
mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
|
||||||
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
|
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
|
||||||
(u32)((u64)adev->dummy_page_addr >> 44));
|
(u32)((u64)adev->dummy_page_addr >> 44));
|
||||||
|
|
||||||
tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
|
tmp = RREG32_SOC15_OFFSET(
|
||||||
mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
|
MMHUB, 0, mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
|
||||||
hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
|
hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
|
||||||
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
|
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
|
||||||
ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
|
ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
|
||||||
WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
|
WREG32_SOC15_OFFSET(MMHUB, 0,
|
||||||
hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
|
mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
|
||||||
|
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
|
||||||
|
tmp);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mmhub_v9_4_init_tlb_regs(struct amdgpu_device *adev, int hubid)
|
static void mmhub_v9_4_init_tlb_regs(struct amdgpu_device *adev, int hubid)
|
||||||
|
@ -368,29 +376,15 @@ int mmhub_v9_4_gart_enable(struct amdgpu_device *adev)
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
|
for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
|
||||||
if (amdgpu_sriov_vf(adev)) {
|
|
||||||
/*
|
|
||||||
* MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase
|
|
||||||
* they are VF copy registers so vbios post doesn't
|
|
||||||
* program them, for SRIOV driver need to program them
|
|
||||||
*/
|
|
||||||
WREG32_SOC15_OFFSET(MMHUB, 0,
|
|
||||||
mmVMSHAREDVC0_MC_VM_FB_LOCATION_BASE,
|
|
||||||
i * MMHUB_INSTANCE_REGISTER_OFFSET,
|
|
||||||
adev->gmc.vram_start >> 24);
|
|
||||||
WREG32_SOC15_OFFSET(MMHUB, 0,
|
|
||||||
mmVMSHAREDVC0_MC_VM_FB_LOCATION_TOP,
|
|
||||||
i * MMHUB_INSTANCE_REGISTER_OFFSET,
|
|
||||||
adev->gmc.vram_end >> 24);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* GART Enable. */
|
/* GART Enable. */
|
||||||
mmhub_v9_4_init_gart_aperture_regs(adev, i);
|
mmhub_v9_4_init_gart_aperture_regs(adev, i);
|
||||||
mmhub_v9_4_init_system_aperture_regs(adev, i);
|
mmhub_v9_4_init_system_aperture_regs(adev, i);
|
||||||
mmhub_v9_4_init_tlb_regs(adev, i);
|
mmhub_v9_4_init_tlb_regs(adev, i);
|
||||||
|
if (!amdgpu_sriov_vf(adev))
|
||||||
mmhub_v9_4_init_cache_regs(adev, i);
|
mmhub_v9_4_init_cache_regs(adev, i);
|
||||||
|
|
||||||
mmhub_v9_4_enable_system_domain(adev, i);
|
mmhub_v9_4_enable_system_domain(adev, i);
|
||||||
|
if (!amdgpu_sriov_vf(adev))
|
||||||
mmhub_v9_4_disable_identity_aperture(adev, i);
|
mmhub_v9_4_disable_identity_aperture(adev, i);
|
||||||
mmhub_v9_4_setup_vmid_config(adev, i);
|
mmhub_v9_4_setup_vmid_config(adev, i);
|
||||||
mmhub_v9_4_program_invalidation(adev, i);
|
mmhub_v9_4_program_invalidation(adev, i);
|
||||||
|
|
|
@ -47,6 +47,18 @@ struct mmsch_v1_0_init_header {
|
||||||
uint32_t uvd_table_size;
|
uint32_t uvd_table_size;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct mmsch_vf_eng_init_header {
|
||||||
|
uint32_t init_status;
|
||||||
|
uint32_t table_offset;
|
||||||
|
uint32_t table_size;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct mmsch_v1_1_init_header {
|
||||||
|
uint32_t version;
|
||||||
|
uint32_t total_size;
|
||||||
|
struct mmsch_vf_eng_init_header eng[2];
|
||||||
|
};
|
||||||
|
|
||||||
struct mmsch_v1_0_cmd_direct_reg_header {
|
struct mmsch_v1_0_cmd_direct_reg_header {
|
||||||
uint32_t reg_offset : 28;
|
uint32_t reg_offset : 28;
|
||||||
uint32_t command_type : 4;
|
uint32_t command_type : 4;
|
||||||
|
|
|
@ -250,7 +250,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
|
||||||
*/
|
*/
|
||||||
locked = mutex_trylock(&adev->lock_reset);
|
locked = mutex_trylock(&adev->lock_reset);
|
||||||
if (locked)
|
if (locked)
|
||||||
adev->in_gpu_reset = 1;
|
adev->in_gpu_reset = true;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
|
if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
|
||||||
|
@ -262,7 +262,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
|
||||||
|
|
||||||
flr_done:
|
flr_done:
|
||||||
if (locked) {
|
if (locked) {
|
||||||
adev->in_gpu_reset = 0;
|
adev->in_gpu_reset = false;
|
||||||
mutex_unlock(&adev->lock_reset);
|
mutex_unlock(&adev->lock_reset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -252,7 +252,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
|
||||||
*/
|
*/
|
||||||
locked = mutex_trylock(&adev->lock_reset);
|
locked = mutex_trylock(&adev->lock_reset);
|
||||||
if (locked)
|
if (locked)
|
||||||
adev->in_gpu_reset = 1;
|
adev->in_gpu_reset = true;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
|
if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
|
||||||
|
@ -264,12 +264,16 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
|
||||||
|
|
||||||
flr_done:
|
flr_done:
|
||||||
if (locked) {
|
if (locked) {
|
||||||
adev->in_gpu_reset = 0;
|
adev->in_gpu_reset = false;
|
||||||
mutex_unlock(&adev->lock_reset);
|
mutex_unlock(&adev->lock_reset);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Trigger recovery for world switch failure if no TDR */
|
/* Trigger recovery for world switch failure if no TDR */
|
||||||
if (amdgpu_device_should_recover_gpu(adev))
|
if (amdgpu_device_should_recover_gpu(adev)
|
||||||
|
&& (adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
|
||||||
|
adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
|
||||||
|
adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
|
||||||
|
adev->video_timeout == MAX_SCHEDULE_TIMEOUT))
|
||||||
amdgpu_device_gpu_recover(adev, NULL);
|
amdgpu_device_gpu_recover(adev, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -110,7 +110,6 @@ static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl
|
||||||
static int navi10_ih_irq_init(struct amdgpu_device *adev)
|
static int navi10_ih_irq_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct amdgpu_ih_ring *ih = &adev->irq.ih;
|
struct amdgpu_ih_ring *ih = &adev->irq.ih;
|
||||||
int ret = 0;
|
|
||||||
u32 ih_rb_cntl, ih_doorbell_rtpr, ih_chicken;
|
u32 ih_rb_cntl, ih_doorbell_rtpr, ih_chicken;
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
|
|
||||||
|
@ -179,7 +178,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
|
||||||
/* enable interrupts */
|
/* enable interrupts */
|
||||||
navi10_ih_enable_interrupts(adev);
|
navi10_ih_enable_interrupts(adev);
|
||||||
|
|
||||||
return ret;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -339,7 +339,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
|
||||||
/* ras_controller_int is dedicated for nbif ras error,
|
/* ras_controller_int is dedicated for nbif ras error,
|
||||||
* not the global interrupt for sync flood
|
* not the global interrupt for sync flood
|
||||||
*/
|
*/
|
||||||
amdgpu_ras_reset_gpu(adev, true);
|
amdgpu_ras_reset_gpu(adev);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -456,10 +456,8 @@ static int nbio_v7_4_init_ras_controller_interrupt (struct amdgpu_device *adev)
|
||||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
|
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
|
||||||
NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT,
|
NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT,
|
||||||
&adev->nbio.ras_controller_irq);
|
&adev->nbio.ras_controller_irq);
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
|
|
||||||
return 0;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *adev)
|
static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *adev)
|
||||||
|
@ -476,10 +474,8 @@ static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *a
|
||||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
|
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
|
||||||
NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT,
|
NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT,
|
||||||
&adev->nbio.ras_err_event_athub_irq);
|
&adev->nbio.ras_err_event_athub_irq);
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
|
|
||||||
return 0;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define smnPARITY_ERROR_STATUS_UNCORR_GRP2 0x13a20030
|
#define smnPARITY_ERROR_STATUS_UNCORR_GRP2 0x13a20030
|
||||||
|
|
|
@ -242,6 +242,7 @@ enum psp_gfx_fw_type {
|
||||||
GFX_FW_TYPE_SDMA5 = 55, /* SDMA5 MI */
|
GFX_FW_TYPE_SDMA5 = 55, /* SDMA5 MI */
|
||||||
GFX_FW_TYPE_SDMA6 = 56, /* SDMA6 MI */
|
GFX_FW_TYPE_SDMA6 = 56, /* SDMA6 MI */
|
||||||
GFX_FW_TYPE_SDMA7 = 57, /* SDMA7 MI */
|
GFX_FW_TYPE_SDMA7 = 57, /* SDMA7 MI */
|
||||||
|
GFX_FW_TYPE_VCN1 = 58, /* VCN1 MI */
|
||||||
GFX_FW_TYPE_MAX
|
GFX_FW_TYPE_MAX
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -233,6 +233,29 @@ out:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int psp_v11_0_wait_for_bootloader(struct psp_context *psp)
|
||||||
|
{
|
||||||
|
struct amdgpu_device *adev = psp->adev;
|
||||||
|
|
||||||
|
int ret;
|
||||||
|
int retry_loop;
|
||||||
|
|
||||||
|
for (retry_loop = 0; retry_loop < 10; retry_loop++) {
|
||||||
|
/* Wait for bootloader to signify that is
|
||||||
|
ready having bit 31 of C2PMSG_35 set to 1 */
|
||||||
|
ret = psp_wait_for(psp,
|
||||||
|
SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
|
||||||
|
0x80000000,
|
||||||
|
0x80000000,
|
||||||
|
false);
|
||||||
|
|
||||||
|
if (ret == 0)
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static bool psp_v11_0_is_sos_alive(struct psp_context *psp)
|
static bool psp_v11_0_is_sos_alive(struct psp_context *psp)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = psp->adev;
|
struct amdgpu_device *adev = psp->adev;
|
||||||
|
@ -258,9 +281,7 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
|
ret = psp_v11_0_wait_for_bootloader(psp);
|
||||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
|
|
||||||
0x80000000, 0x80000000, false);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -276,9 +297,7 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
|
||||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
|
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
|
||||||
psp_gfxdrv_command_reg);
|
psp_gfxdrv_command_reg);
|
||||||
|
|
||||||
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1*/
|
ret = psp_v11_0_wait_for_bootloader(psp);
|
||||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
|
|
||||||
0x80000000, 0x80000000, false);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -298,9 +317,7 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
|
ret = psp_v11_0_wait_for_bootloader(psp);
|
||||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
|
|
||||||
0x80000000, 0x80000000, false);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -319,8 +336,7 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
|
||||||
/* there might be handshake issue with hardware which needs delay */
|
/* there might be handshake issue with hardware which needs delay */
|
||||||
mdelay(20);
|
mdelay(20);
|
||||||
|
|
||||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
|
ret = psp_v11_0_wait_for_bootloader(psp);
|
||||||
0x80000000, 0x80000000, false);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -337,9 +353,7 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
|
||||||
if (psp_v11_0_is_sos_alive(psp))
|
if (psp_v11_0_is_sos_alive(psp))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
|
ret = psp_v11_0_wait_for_bootloader(psp);
|
||||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
|
|
||||||
0x80000000, 0x80000000, false);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
|
|
@ -255,7 +255,7 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
|
||||||
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||||
|
|
||||||
/* IB packet must end on a 8 DW boundary */
|
/* IB packet must end on a 8 DW boundary */
|
||||||
sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
|
sdma_v2_4_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
|
||||||
|
|
||||||
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
|
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
|
||||||
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
|
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
|
||||||
|
@ -750,7 +750,7 @@ static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
|
||||||
u32 pad_count;
|
u32 pad_count;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
pad_count = (8 - (ib->length_dw & 0x7)) % 8;
|
pad_count = (-ib->length_dw) & 7;
|
||||||
for (i = 0; i < pad_count; i++)
|
for (i = 0; i < pad_count; i++)
|
||||||
if (sdma && sdma->burst_nop && (i == 0))
|
if (sdma && sdma->burst_nop && (i == 0))
|
||||||
ib->ptr[ib->length_dw++] =
|
ib->ptr[ib->length_dw++] =
|
||||||
|
@ -1260,16 +1260,14 @@ static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
|
||||||
|
|
||||||
static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
|
static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct drm_gpu_scheduler *sched;
|
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
|
adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
|
||||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||||
sched = &adev->sdma.instance[i].ring.sched;
|
adev->vm_manager.vm_pte_scheds[i] =
|
||||||
adev->vm_manager.vm_pte_rqs[i] =
|
&adev->sdma.instance[i].ring.sched;
|
||||||
&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
|
|
||||||
}
|
}
|
||||||
adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
|
adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct amdgpu_ip_block_version sdma_v2_4_ip_block =
|
const struct amdgpu_ip_block_version sdma_v2_4_ip_block =
|
||||||
|
|
|
@ -429,7 +429,7 @@ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
|
||||||
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||||
|
|
||||||
/* IB packet must end on a 8 DW boundary */
|
/* IB packet must end on a 8 DW boundary */
|
||||||
sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
|
sdma_v3_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
|
||||||
|
|
||||||
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
|
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
|
||||||
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
|
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
|
||||||
|
@ -1021,7 +1021,7 @@ static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
|
||||||
u32 pad_count;
|
u32 pad_count;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
pad_count = (8 - (ib->length_dw & 0x7)) % 8;
|
pad_count = (-ib->length_dw) & 7;
|
||||||
for (i = 0; i < pad_count; i++)
|
for (i = 0; i < pad_count; i++)
|
||||||
if (sdma && sdma->burst_nop && (i == 0))
|
if (sdma && sdma->burst_nop && (i == 0))
|
||||||
ib->ptr[ib->length_dw++] =
|
ib->ptr[ib->length_dw++] =
|
||||||
|
@ -1698,16 +1698,14 @@ static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
|
||||||
|
|
||||||
static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
|
static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct drm_gpu_scheduler *sched;
|
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
|
adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
|
||||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||||
sched = &adev->sdma.instance[i].ring.sched;
|
adev->vm_manager.vm_pte_scheds[i] =
|
||||||
adev->vm_manager.vm_pte_rqs[i] =
|
&adev->sdma.instance[i].ring.sched;
|
||||||
&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
|
|
||||||
}
|
}
|
||||||
adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
|
adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
|
const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
|
||||||
|
|
|
@ -698,7 +698,7 @@ static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
|
||||||
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||||
|
|
||||||
/* IB packet must end on a 8 DW boundary */
|
/* IB packet must end on a 8 DW boundary */
|
||||||
sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
|
sdma_v4_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
|
||||||
|
|
||||||
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
|
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
|
||||||
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
|
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
|
||||||
|
@ -1579,7 +1579,7 @@ static void sdma_v4_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
|
||||||
u32 pad_count;
|
u32 pad_count;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
pad_count = (8 - (ib->length_dw & 0x7)) % 8;
|
pad_count = (-ib->length_dw) & 7;
|
||||||
for (i = 0; i < pad_count; i++)
|
for (i = 0; i < pad_count; i++)
|
||||||
if (sdma && sdma->burst_nop && (i == 0))
|
if (sdma && sdma->burst_nop && (i == 0))
|
||||||
ib->ptr[ib->length_dw++] =
|
ib->ptr[ib->length_dw++] =
|
||||||
|
@ -2409,10 +2409,9 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
|
||||||
sched = &adev->sdma.instance[i].page.sched;
|
sched = &adev->sdma.instance[i].page.sched;
|
||||||
else
|
else
|
||||||
sched = &adev->sdma.instance[i].ring.sched;
|
sched = &adev->sdma.instance[i].ring.sched;
|
||||||
adev->vm_manager.vm_pte_rqs[i] =
|
adev->vm_manager.vm_pte_scheds[i] = sched;
|
||||||
&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
|
|
||||||
}
|
}
|
||||||
adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
|
adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
|
const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
|
||||||
|
|
|
@ -382,8 +382,15 @@ static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
|
||||||
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||||
uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
|
uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
|
||||||
|
|
||||||
/* IB packet must end on a 8 DW boundary */
|
/* An IB packet must end on a 8 DW boundary--the next dword
|
||||||
sdma_v5_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
|
* must be on a 8-dword boundary. Our IB packet below is 6
|
||||||
|
* dwords long, thus add x number of NOPs, such that, in
|
||||||
|
* modular arithmetic,
|
||||||
|
* wptr + 6 + x = 8k, k >= 0, which in C is,
|
||||||
|
* (wptr + 6 + x) % 8 = 0.
|
||||||
|
* The expression below, is a solution of x.
|
||||||
|
*/
|
||||||
|
sdma_v5_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
|
||||||
|
|
||||||
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
|
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
|
||||||
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
|
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
|
||||||
|
@ -907,16 +914,9 @@ static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring)
|
||||||
udelay(1);
|
udelay(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i < adev->usec_timeout) {
|
if (i >= adev->usec_timeout)
|
||||||
if (amdgpu_emu_mode == 1)
|
r = -ETIMEDOUT;
|
||||||
DRM_INFO("ring test on %d succeeded in %d msecs\n", ring->idx, i);
|
|
||||||
else
|
|
||||||
DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
|
|
||||||
} else {
|
|
||||||
DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
|
|
||||||
ring->idx, tmp);
|
|
||||||
r = -EINVAL;
|
|
||||||
}
|
|
||||||
amdgpu_device_wb_free(adev, index);
|
amdgpu_device_wb_free(adev, index);
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
|
@ -981,13 +981,10 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||||
goto err1;
|
goto err1;
|
||||||
}
|
}
|
||||||
tmp = le32_to_cpu(adev->wb.wb[index]);
|
tmp = le32_to_cpu(adev->wb.wb[index]);
|
||||||
if (tmp == 0xDEADBEEF) {
|
if (tmp == 0xDEADBEEF)
|
||||||
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
|
|
||||||
r = 0;
|
r = 0;
|
||||||
} else {
|
else
|
||||||
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
|
|
||||||
r = -EINVAL;
|
r = -EINVAL;
|
||||||
}
|
|
||||||
|
|
||||||
err1:
|
err1:
|
||||||
amdgpu_ib_free(adev, &ib, NULL);
|
amdgpu_ib_free(adev, &ib, NULL);
|
||||||
|
@ -1086,10 +1083,10 @@ static void sdma_v5_0_vm_set_pte_pde(struct amdgpu_ib *ib,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sdma_v5_0_ring_pad_ib - pad the IB to the required number of dw
|
* sdma_v5_0_ring_pad_ib - pad the IB
|
||||||
*
|
|
||||||
* @ib: indirect buffer to fill with padding
|
* @ib: indirect buffer to fill with padding
|
||||||
*
|
*
|
||||||
|
* Pad the IB with NOPs to a boundary multiple of 8.
|
||||||
*/
|
*/
|
||||||
static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
|
static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
|
||||||
{
|
{
|
||||||
|
@ -1097,7 +1094,7 @@ static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
|
||||||
u32 pad_count;
|
u32 pad_count;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
pad_count = (8 - (ib->length_dw & 0x7)) % 8;
|
pad_count = (-ib->length_dw) & 0x7;
|
||||||
for (i = 0; i < pad_count; i++)
|
for (i = 0; i < pad_count; i++)
|
||||||
if (sdma && sdma->burst_nop && (i == 0))
|
if (sdma && sdma->burst_nop && (i == 0))
|
||||||
ib->ptr[ib->length_dw++] =
|
ib->ptr[ib->length_dw++] =
|
||||||
|
@ -1721,17 +1718,15 @@ static const struct amdgpu_vm_pte_funcs sdma_v5_0_vm_pte_funcs = {
|
||||||
|
|
||||||
static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev)
|
static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct drm_gpu_scheduler *sched;
|
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
if (adev->vm_manager.vm_pte_funcs == NULL) {
|
if (adev->vm_manager.vm_pte_funcs == NULL) {
|
||||||
adev->vm_manager.vm_pte_funcs = &sdma_v5_0_vm_pte_funcs;
|
adev->vm_manager.vm_pte_funcs = &sdma_v5_0_vm_pte_funcs;
|
||||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||||
sched = &adev->sdma.instance[i].ring.sched;
|
adev->vm_manager.vm_pte_scheds[i] =
|
||||||
adev->vm_manager.vm_pte_rqs[i] =
|
&adev->sdma.instance[i].ring.sched;
|
||||||
&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
|
|
||||||
}
|
}
|
||||||
adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
|
adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -834,16 +834,14 @@ static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
|
||||||
|
|
||||||
static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
|
static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct drm_gpu_scheduler *sched;
|
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
|
adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
|
||||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||||
sched = &adev->sdma.instance[i].ring.sched;
|
adev->vm_manager.vm_pte_scheds[i] =
|
||||||
adev->vm_manager.vm_pte_rqs[i] =
|
&adev->sdma.instance[i].ring.sched;
|
||||||
&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
|
|
||||||
}
|
}
|
||||||
adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
|
adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct amdgpu_ip_block_version si_dma_ip_block =
|
const struct amdgpu_ip_block_version si_dma_ip_block =
|
||||||
|
|
|
@ -613,6 +613,7 @@ static bool soc15_supports_baco(struct amdgpu_device *adev)
|
||||||
switch (adev->asic_type) {
|
switch (adev->asic_type) {
|
||||||
case CHIP_VEGA10:
|
case CHIP_VEGA10:
|
||||||
case CHIP_VEGA12:
|
case CHIP_VEGA12:
|
||||||
|
case CHIP_ARCTURUS:
|
||||||
soc15_asic_get_baco_capability(adev, &baco_support);
|
soc15_asic_get_baco_capability(adev, &baco_support);
|
||||||
break;
|
break;
|
||||||
case CHIP_VEGA20:
|
case CHIP_VEGA20:
|
||||||
|
@ -827,11 +828,15 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
|
||||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||||
amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
|
amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
|
||||||
amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
|
amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
|
||||||
if (!amdgpu_sriov_vf(adev))
|
|
||||||
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
|
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
|
||||||
|
|
||||||
|
if (amdgpu_sriov_vf(adev)) {
|
||||||
|
if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
|
||||||
|
amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
|
||||||
|
} else {
|
||||||
if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT))
|
if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT))
|
||||||
amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
|
amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
|
||||||
|
}
|
||||||
if (!amdgpu_sriov_vf(adev))
|
if (!amdgpu_sriov_vf(adev))
|
||||||
amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
|
amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -28,16 +28,11 @@
|
||||||
#include "rsmu/rsmu_0_0_2_sh_mask.h"
|
#include "rsmu/rsmu_0_0_2_sh_mask.h"
|
||||||
#include "umc/umc_6_1_1_offset.h"
|
#include "umc/umc_6_1_1_offset.h"
|
||||||
#include "umc/umc_6_1_1_sh_mask.h"
|
#include "umc/umc_6_1_1_sh_mask.h"
|
||||||
|
#include "umc/umc_6_1_2_offset.h"
|
||||||
|
|
||||||
#define smnMCA_UMC0_MCUMC_ADDRT0 0x50f10
|
#define smnMCA_UMC0_MCUMC_ADDRT0 0x50f10
|
||||||
|
|
||||||
/* UMC 6_1_2 register offsets */
|
#define UMC_6_INST_DIST 0x40000
|
||||||
#define mmUMCCH0_0_EccErrCntSel_ARCT 0x0360
|
|
||||||
#define mmUMCCH0_0_EccErrCntSel_ARCT_BASE_IDX 1
|
|
||||||
#define mmUMCCH0_0_EccErrCnt_ARCT 0x0361
|
|
||||||
#define mmUMCCH0_0_EccErrCnt_ARCT_BASE_IDX 1
|
|
||||||
#define mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT 0x03c2
|
|
||||||
#define mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT_BASE_IDX 1
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* (addr / 256) * 8192, the higher 26 bits in ErrorAddr
|
* (addr / 256) * 8192, the higher 26 bits in ErrorAddr
|
||||||
|
@ -49,6 +44,10 @@
|
||||||
/* offset in 256B block */
|
/* offset in 256B block */
|
||||||
#define OFFSET_IN_256B_BLOCK(addr) ((addr) & 0xffULL)
|
#define OFFSET_IN_256B_BLOCK(addr) ((addr) & 0xffULL)
|
||||||
|
|
||||||
|
#define LOOP_UMC_INST(umc_inst) for ((umc_inst) = 0; (umc_inst) < adev->umc.umc_inst_num; (umc_inst)++)
|
||||||
|
#define LOOP_UMC_CH_INST(ch_inst) for ((ch_inst) = 0; (ch_inst) < adev->umc.channel_inst_num; (ch_inst)++)
|
||||||
|
#define LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) LOOP_UMC_INST((umc_inst)) LOOP_UMC_CH_INST((ch_inst))
|
||||||
|
|
||||||
const uint32_t
|
const uint32_t
|
||||||
umc_v6_1_channel_idx_tbl[UMC_V6_1_UMC_INSTANCE_NUM][UMC_V6_1_CHANNEL_INSTANCE_NUM] = {
|
umc_v6_1_channel_idx_tbl[UMC_V6_1_UMC_INSTANCE_NUM][UMC_V6_1_CHANNEL_INSTANCE_NUM] = {
|
||||||
{2, 18, 11, 27}, {4, 20, 13, 29},
|
{2, 18, 11, 27}, {4, 20, 13, 29},
|
||||||
|
@ -57,41 +56,17 @@ const uint32_t
|
||||||
{9, 25, 0, 16}, {15, 31, 6, 22}
|
{9, 25, 0, 16}, {15, 31, 6, 22}
|
||||||
};
|
};
|
||||||
|
|
||||||
static void umc_v6_1_enable_umc_index_mode(struct amdgpu_device *adev,
|
|
||||||
uint32_t umc_instance)
|
|
||||||
{
|
|
||||||
uint32_t rsmu_umc_index;
|
|
||||||
|
|
||||||
rsmu_umc_index = RREG32_SOC15(RSMU, 0,
|
|
||||||
mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
|
|
||||||
rsmu_umc_index = REG_SET_FIELD(rsmu_umc_index,
|
|
||||||
RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
|
|
||||||
RSMU_UMC_INDEX_MODE_EN, 1);
|
|
||||||
rsmu_umc_index = REG_SET_FIELD(rsmu_umc_index,
|
|
||||||
RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
|
|
||||||
RSMU_UMC_INDEX_INSTANCE, umc_instance);
|
|
||||||
rsmu_umc_index = REG_SET_FIELD(rsmu_umc_index,
|
|
||||||
RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
|
|
||||||
RSMU_UMC_INDEX_WREN, 1 << umc_instance);
|
|
||||||
WREG32_SOC15(RSMU, 0, mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
|
|
||||||
rsmu_umc_index);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void umc_v6_1_disable_umc_index_mode(struct amdgpu_device *adev)
|
static void umc_v6_1_disable_umc_index_mode(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
WREG32_FIELD15(RSMU, 0, RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
|
WREG32_FIELD15(RSMU, 0, RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
|
||||||
RSMU_UMC_INDEX_MODE_EN, 0);
|
RSMU_UMC_INDEX_MODE_EN, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint32_t umc_v6_1_get_umc_inst(struct amdgpu_device *adev)
|
static inline uint32_t get_umc_6_reg_offset(struct amdgpu_device *adev,
|
||||||
|
uint32_t umc_inst,
|
||||||
|
uint32_t ch_inst)
|
||||||
{
|
{
|
||||||
uint32_t rsmu_umc_index;
|
return adev->umc.channel_offs*ch_inst + UMC_6_INST_DIST*umc_inst;
|
||||||
|
|
||||||
rsmu_umc_index = RREG32_SOC15(RSMU, 0,
|
|
||||||
mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
|
|
||||||
return REG_GET_FIELD(rsmu_umc_index,
|
|
||||||
RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
|
|
||||||
RSMU_UMC_INDEX_INSTANCE);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev,
|
static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev,
|
||||||
|
@ -105,7 +80,6 @@ static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev,
|
||||||
|
|
||||||
if (adev->asic_type == CHIP_ARCTURUS) {
|
if (adev->asic_type == CHIP_ARCTURUS) {
|
||||||
/* UMC 6_1_2 registers */
|
/* UMC 6_1_2 registers */
|
||||||
|
|
||||||
ecc_err_cnt_sel_addr =
|
ecc_err_cnt_sel_addr =
|
||||||
SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel_ARCT);
|
SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel_ARCT);
|
||||||
ecc_err_cnt_addr =
|
ecc_err_cnt_addr =
|
||||||
|
@ -114,7 +88,6 @@ static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev,
|
||||||
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT);
|
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT);
|
||||||
} else {
|
} else {
|
||||||
/* UMC 6_1_1 registers */
|
/* UMC 6_1_1 registers */
|
||||||
|
|
||||||
ecc_err_cnt_sel_addr =
|
ecc_err_cnt_sel_addr =
|
||||||
SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel);
|
SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel);
|
||||||
ecc_err_cnt_addr =
|
ecc_err_cnt_addr =
|
||||||
|
@ -124,31 +97,31 @@ static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* select the lower chip and check the error count */
|
/* select the lower chip and check the error count */
|
||||||
ecc_err_cnt_sel = RREG32(ecc_err_cnt_sel_addr + umc_reg_offset);
|
ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4);
|
||||||
ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
|
ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
|
||||||
EccErrCntCsSel, 0);
|
EccErrCntCsSel, 0);
|
||||||
WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel);
|
WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
|
||||||
ecc_err_cnt = RREG32(ecc_err_cnt_addr + umc_reg_offset);
|
ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
|
||||||
*error_count +=
|
*error_count +=
|
||||||
(REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
|
(REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
|
||||||
UMC_V6_1_CE_CNT_INIT);
|
UMC_V6_1_CE_CNT_INIT);
|
||||||
/* clear the lower chip err count */
|
/* clear the lower chip err count */
|
||||||
WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT);
|
WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
|
||||||
|
|
||||||
/* select the higher chip and check the err counter */
|
/* select the higher chip and check the err counter */
|
||||||
ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
|
ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
|
||||||
EccErrCntCsSel, 1);
|
EccErrCntCsSel, 1);
|
||||||
WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel);
|
WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
|
||||||
ecc_err_cnt = RREG32(ecc_err_cnt_addr + umc_reg_offset);
|
ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
|
||||||
*error_count +=
|
*error_count +=
|
||||||
(REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
|
(REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
|
||||||
UMC_V6_1_CE_CNT_INIT);
|
UMC_V6_1_CE_CNT_INIT);
|
||||||
/* clear the higher chip err count */
|
/* clear the higher chip err count */
|
||||||
WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT);
|
WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
|
||||||
|
|
||||||
/* check for SRAM correctable error
|
/* check for SRAM correctable error
|
||||||
MCUMC_STATUS is a 64 bit register */
|
MCUMC_STATUS is a 64 bit register */
|
||||||
mc_umc_status = RREG64_UMC(mc_umc_status_addr + umc_reg_offset);
|
mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
|
||||||
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 6 &&
|
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 6 &&
|
||||||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
|
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
|
||||||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
|
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
|
||||||
|
@ -164,18 +137,16 @@ static void umc_v6_1_querry_uncorrectable_error_count(struct amdgpu_device *adev
|
||||||
|
|
||||||
if (adev->asic_type == CHIP_ARCTURUS) {
|
if (adev->asic_type == CHIP_ARCTURUS) {
|
||||||
/* UMC 6_1_2 registers */
|
/* UMC 6_1_2 registers */
|
||||||
|
|
||||||
mc_umc_status_addr =
|
mc_umc_status_addr =
|
||||||
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT);
|
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT);
|
||||||
} else {
|
} else {
|
||||||
/* UMC 6_1_1 registers */
|
/* UMC 6_1_1 registers */
|
||||||
|
|
||||||
mc_umc_status_addr =
|
mc_umc_status_addr =
|
||||||
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
|
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* check the MCUMC_STATUS */
|
/* check the MCUMC_STATUS */
|
||||||
mc_umc_status = RREG64_UMC(mc_umc_status_addr + umc_reg_offset);
|
mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
|
||||||
if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
|
if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
|
||||||
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
|
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
|
||||||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
|
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
|
||||||
|
@ -185,38 +156,46 @@ static void umc_v6_1_querry_uncorrectable_error_count(struct amdgpu_device *adev
|
||||||
*error_count += 1;
|
*error_count += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void umc_v6_1_query_error_count(struct amdgpu_device *adev,
|
|
||||||
struct ras_err_data *err_data, uint32_t umc_reg_offset,
|
|
||||||
uint32_t channel_index)
|
|
||||||
{
|
|
||||||
umc_v6_1_query_correctable_error_count(adev, umc_reg_offset,
|
|
||||||
&(err_data->ce_count));
|
|
||||||
umc_v6_1_querry_uncorrectable_error_count(adev, umc_reg_offset,
|
|
||||||
&(err_data->ue_count));
|
|
||||||
}
|
|
||||||
|
|
||||||
static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev,
|
static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev,
|
||||||
void *ras_error_status)
|
void *ras_error_status)
|
||||||
{
|
{
|
||||||
amdgpu_umc_for_each_channel(umc_v6_1_query_error_count);
|
struct ras_err_data* err_data = (struct ras_err_data*)ras_error_status;
|
||||||
|
|
||||||
|
uint32_t umc_inst = 0;
|
||||||
|
uint32_t ch_inst = 0;
|
||||||
|
uint32_t umc_reg_offset = 0;
|
||||||
|
|
||||||
|
LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
|
||||||
|
umc_reg_offset = get_umc_6_reg_offset(adev,
|
||||||
|
umc_inst,
|
||||||
|
ch_inst);
|
||||||
|
|
||||||
|
umc_v6_1_query_correctable_error_count(adev,
|
||||||
|
umc_reg_offset,
|
||||||
|
&(err_data->ce_count));
|
||||||
|
umc_v6_1_querry_uncorrectable_error_count(adev,
|
||||||
|
umc_reg_offset,
|
||||||
|
&(err_data->ue_count));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
|
static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
|
||||||
struct ras_err_data *err_data,
|
struct ras_err_data *err_data,
|
||||||
uint32_t umc_reg_offset, uint32_t channel_index)
|
uint32_t umc_reg_offset,
|
||||||
|
uint32_t ch_inst,
|
||||||
|
uint32_t umc_inst)
|
||||||
{
|
{
|
||||||
uint32_t lsb, mc_umc_status_addr;
|
uint32_t lsb, mc_umc_status_addr;
|
||||||
uint64_t mc_umc_status, err_addr, retired_page;
|
uint64_t mc_umc_status, err_addr, retired_page;
|
||||||
struct eeprom_table_record *err_rec;
|
struct eeprom_table_record *err_rec;
|
||||||
|
uint32_t channel_index = adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
|
||||||
|
|
||||||
if (adev->asic_type == CHIP_ARCTURUS) {
|
if (adev->asic_type == CHIP_ARCTURUS) {
|
||||||
/* UMC 6_1_2 registers */
|
/* UMC 6_1_2 registers */
|
||||||
|
|
||||||
mc_umc_status_addr =
|
mc_umc_status_addr =
|
||||||
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT);
|
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT);
|
||||||
} else {
|
} else {
|
||||||
/* UMC 6_1_1 registers */
|
/* UMC 6_1_1 registers */
|
||||||
|
|
||||||
mc_umc_status_addr =
|
mc_umc_status_addr =
|
||||||
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
|
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
|
||||||
}
|
}
|
||||||
|
@ -224,12 +203,12 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
|
||||||
/* skip error address process if -ENOMEM */
|
/* skip error address process if -ENOMEM */
|
||||||
if (!err_data->err_addr) {
|
if (!err_data->err_addr) {
|
||||||
/* clear umc status */
|
/* clear umc status */
|
||||||
WREG64_UMC(mc_umc_status_addr + umc_reg_offset, 0x0ULL);
|
WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
err_rec = &err_data->err_addr[err_data->err_addr_cnt];
|
err_rec = &err_data->err_addr[err_data->err_addr_cnt];
|
||||||
mc_umc_status = RREG64_UMC(mc_umc_status_addr + umc_reg_offset);
|
mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
|
||||||
|
|
||||||
/* calculate error address if ue/ce error is detected */
|
/* calculate error address if ue/ce error is detected */
|
||||||
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
|
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
|
||||||
|
@ -257,39 +236,53 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
|
||||||
err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
|
err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
|
||||||
err_rec->cu = 0;
|
err_rec->cu = 0;
|
||||||
err_rec->mem_channel = channel_index;
|
err_rec->mem_channel = channel_index;
|
||||||
err_rec->mcumc_id = umc_v6_1_get_umc_inst(adev);
|
err_rec->mcumc_id = umc_inst;
|
||||||
|
|
||||||
err_data->err_addr_cnt++;
|
err_data->err_addr_cnt++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* clear umc status */
|
/* clear umc status */
|
||||||
WREG64_UMC(mc_umc_status_addr + umc_reg_offset, 0x0ULL);
|
WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev,
|
static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev,
|
||||||
void *ras_error_status)
|
void *ras_error_status)
|
||||||
{
|
{
|
||||||
amdgpu_umc_for_each_channel(umc_v6_1_query_error_address);
|
struct ras_err_data* err_data = (struct ras_err_data*)ras_error_status;
|
||||||
|
|
||||||
|
uint32_t umc_inst = 0;
|
||||||
|
uint32_t ch_inst = 0;
|
||||||
|
uint32_t umc_reg_offset = 0;
|
||||||
|
|
||||||
|
LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
|
||||||
|
umc_reg_offset = get_umc_6_reg_offset(adev,
|
||||||
|
umc_inst,
|
||||||
|
ch_inst);
|
||||||
|
|
||||||
|
umc_v6_1_query_error_address(adev,
|
||||||
|
err_data,
|
||||||
|
umc_reg_offset,
|
||||||
|
ch_inst,
|
||||||
|
umc_inst);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void umc_v6_1_err_cnt_init_per_channel(struct amdgpu_device *adev,
|
static void umc_v6_1_err_cnt_init_per_channel(struct amdgpu_device *adev,
|
||||||
struct ras_err_data *err_data,
|
uint32_t umc_reg_offset)
|
||||||
uint32_t umc_reg_offset, uint32_t channel_index)
|
|
||||||
{
|
{
|
||||||
uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
|
uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
|
||||||
uint32_t ecc_err_cnt_addr;
|
uint32_t ecc_err_cnt_addr;
|
||||||
|
|
||||||
if (adev->asic_type == CHIP_ARCTURUS) {
|
if (adev->asic_type == CHIP_ARCTURUS) {
|
||||||
/* UMC 6_1_2 registers */
|
/* UMC 6_1_2 registers */
|
||||||
|
|
||||||
ecc_err_cnt_sel_addr =
|
ecc_err_cnt_sel_addr =
|
||||||
SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel_ARCT);
|
SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel_ARCT);
|
||||||
ecc_err_cnt_addr =
|
ecc_err_cnt_addr =
|
||||||
SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt_ARCT);
|
SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt_ARCT);
|
||||||
} else {
|
} else {
|
||||||
/* UMC 6_1_1 registers */
|
/* UMC 6_1_1 registers */
|
||||||
|
|
||||||
ecc_err_cnt_sel_addr =
|
ecc_err_cnt_sel_addr =
|
||||||
SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel);
|
SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel);
|
||||||
ecc_err_cnt_addr =
|
ecc_err_cnt_addr =
|
||||||
|
@ -297,28 +290,38 @@ static void umc_v6_1_err_cnt_init_per_channel(struct amdgpu_device *adev,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* select the lower chip and check the error count */
|
/* select the lower chip and check the error count */
|
||||||
ecc_err_cnt_sel = RREG32(ecc_err_cnt_sel_addr + umc_reg_offset);
|
ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4);
|
||||||
ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
|
ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
|
||||||
EccErrCntCsSel, 0);
|
EccErrCntCsSel, 0);
|
||||||
/* set ce error interrupt type to APIC based interrupt */
|
/* set ce error interrupt type to APIC based interrupt */
|
||||||
ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
|
ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
|
||||||
EccErrInt, 0x1);
|
EccErrInt, 0x1);
|
||||||
WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel);
|
WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
|
||||||
/* set error count to initial value */
|
/* set error count to initial value */
|
||||||
WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT);
|
WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
|
||||||
|
|
||||||
/* select the higher chip and check the err counter */
|
/* select the higher chip and check the err counter */
|
||||||
ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
|
ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
|
||||||
EccErrCntCsSel, 1);
|
EccErrCntCsSel, 1);
|
||||||
WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel);
|
WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
|
||||||
WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT);
|
WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void umc_v6_1_err_cnt_init(struct amdgpu_device *adev)
|
static void umc_v6_1_err_cnt_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
void *ras_error_status = NULL;
|
uint32_t umc_inst = 0;
|
||||||
|
uint32_t ch_inst = 0;
|
||||||
|
uint32_t umc_reg_offset = 0;
|
||||||
|
|
||||||
amdgpu_umc_for_each_channel(umc_v6_1_err_cnt_init_per_channel);
|
umc_v6_1_disable_umc_index_mode(adev);
|
||||||
|
|
||||||
|
LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
|
||||||
|
umc_reg_offset = get_umc_6_reg_offset(adev,
|
||||||
|
umc_inst,
|
||||||
|
ch_inst);
|
||||||
|
|
||||||
|
umc_v6_1_err_cnt_init_per_channel(adev, umc_reg_offset);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct amdgpu_umc_funcs umc_v6_1_funcs = {
|
const struct amdgpu_umc_funcs umc_v6_1_funcs = {
|
||||||
|
@ -326,6 +329,4 @@ const struct amdgpu_umc_funcs umc_v6_1_funcs = {
|
||||||
.ras_late_init = amdgpu_umc_ras_late_init,
|
.ras_late_init = amdgpu_umc_ras_late_init,
|
||||||
.query_ras_error_count = umc_v6_1_query_ras_error_count,
|
.query_ras_error_count = umc_v6_1_query_ras_error_count,
|
||||||
.query_ras_error_address = umc_v6_1_query_ras_error_address,
|
.query_ras_error_address = umc_v6_1_query_ras_error_address,
|
||||||
.enable_umc_index_mode = umc_v6_1_enable_umc_index_mode,
|
|
||||||
.disable_umc_index_mode = umc_v6_1_disable_umc_index_mode,
|
|
||||||
};
|
};
|
||||||
|
|
|
@ -25,6 +25,7 @@
|
||||||
|
|
||||||
#include "amdgpu.h"
|
#include "amdgpu.h"
|
||||||
#include "amdgpu_vcn.h"
|
#include "amdgpu_vcn.h"
|
||||||
|
#include "amdgpu_pm.h"
|
||||||
#include "soc15.h"
|
#include "soc15.h"
|
||||||
#include "soc15d.h"
|
#include "soc15d.h"
|
||||||
#include "soc15_common.h"
|
#include "soc15_common.h"
|
||||||
|
@ -51,6 +52,8 @@ static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_sta
|
||||||
static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
|
static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
|
||||||
struct dpg_pause_state *new_state);
|
struct dpg_pause_state *new_state);
|
||||||
|
|
||||||
|
static void vcn_v1_0_idle_work_handler(struct work_struct *work);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* vcn_v1_0_early_init - set function pointers
|
* vcn_v1_0_early_init - set function pointers
|
||||||
*
|
*
|
||||||
|
@ -105,6 +108,9 @@ static int vcn_v1_0_sw_init(void *handle)
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
/* Override the work func */
|
||||||
|
adev->vcn.idle_work.work.func = vcn_v1_0_idle_work_handler;
|
||||||
|
|
||||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
||||||
const struct common_firmware_header *hdr;
|
const struct common_firmware_header *hdr;
|
||||||
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
|
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
|
||||||
|
@ -1758,6 +1764,86 @@ static int vcn_v1_0_set_powergating_state(void *handle,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void vcn_v1_0_idle_work_handler(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct amdgpu_device *adev =
|
||||||
|
container_of(work, struct amdgpu_device, vcn.idle_work.work);
|
||||||
|
unsigned int fences = 0, i;
|
||||||
|
|
||||||
|
for (i = 0; i < adev->vcn.num_enc_rings; ++i)
|
||||||
|
fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]);
|
||||||
|
|
||||||
|
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
|
||||||
|
struct dpg_pause_state new_state;
|
||||||
|
|
||||||
|
if (fences)
|
||||||
|
new_state.fw_based = VCN_DPG_STATE__PAUSE;
|
||||||
|
else
|
||||||
|
new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
|
||||||
|
|
||||||
|
if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec))
|
||||||
|
new_state.jpeg = VCN_DPG_STATE__PAUSE;
|
||||||
|
else
|
||||||
|
new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
|
||||||
|
|
||||||
|
adev->vcn.pause_dpg_mode(adev, &new_state);
|
||||||
|
}
|
||||||
|
|
||||||
|
fences += amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec);
|
||||||
|
fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_dec);
|
||||||
|
|
||||||
|
if (fences == 0) {
|
||||||
|
amdgpu_gfx_off_ctrl(adev, true);
|
||||||
|
if (adev->pm.dpm_enabled)
|
||||||
|
amdgpu_dpm_enable_uvd(adev, false);
|
||||||
|
else
|
||||||
|
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
|
||||||
|
AMD_PG_STATE_GATE);
|
||||||
|
} else {
|
||||||
|
schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
|
||||||
|
{
|
||||||
|
struct amdgpu_device *adev = ring->adev;
|
||||||
|
bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
|
||||||
|
|
||||||
|
if (set_clocks) {
|
||||||
|
amdgpu_gfx_off_ctrl(adev, false);
|
||||||
|
if (adev->pm.dpm_enabled)
|
||||||
|
amdgpu_dpm_enable_uvd(adev, true);
|
||||||
|
else
|
||||||
|
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
|
||||||
|
AMD_PG_STATE_UNGATE);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
|
||||||
|
struct dpg_pause_state new_state;
|
||||||
|
unsigned int fences = 0, i;
|
||||||
|
|
||||||
|
for (i = 0; i < adev->vcn.num_enc_rings; ++i)
|
||||||
|
fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]);
|
||||||
|
|
||||||
|
if (fences)
|
||||||
|
new_state.fw_based = VCN_DPG_STATE__PAUSE;
|
||||||
|
else
|
||||||
|
new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
|
||||||
|
|
||||||
|
if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec))
|
||||||
|
new_state.jpeg = VCN_DPG_STATE__PAUSE;
|
||||||
|
else
|
||||||
|
new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
|
||||||
|
|
||||||
|
if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
|
||||||
|
new_state.fw_based = VCN_DPG_STATE__PAUSE;
|
||||||
|
else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
|
||||||
|
new_state.jpeg = VCN_DPG_STATE__PAUSE;
|
||||||
|
|
||||||
|
adev->vcn.pause_dpg_mode(adev, &new_state);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
|
static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
|
||||||
.name = "vcn_v1_0",
|
.name = "vcn_v1_0",
|
||||||
.early_init = vcn_v1_0_early_init,
|
.early_init = vcn_v1_0_early_init,
|
||||||
|
@ -1804,7 +1890,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
|
||||||
.insert_start = vcn_v1_0_dec_ring_insert_start,
|
.insert_start = vcn_v1_0_dec_ring_insert_start,
|
||||||
.insert_end = vcn_v1_0_dec_ring_insert_end,
|
.insert_end = vcn_v1_0_dec_ring_insert_end,
|
||||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||||
.begin_use = amdgpu_vcn_ring_begin_use,
|
.begin_use = vcn_v1_0_ring_begin_use,
|
||||||
.end_use = amdgpu_vcn_ring_end_use,
|
.end_use = amdgpu_vcn_ring_end_use,
|
||||||
.emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
|
.emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
|
||||||
.emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
|
.emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
|
||||||
|
@ -1836,7 +1922,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
|
||||||
.insert_nop = amdgpu_ring_insert_nop,
|
.insert_nop = amdgpu_ring_insert_nop,
|
||||||
.insert_end = vcn_v1_0_enc_ring_insert_end,
|
.insert_end = vcn_v1_0_enc_ring_insert_end,
|
||||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||||
.begin_use = amdgpu_vcn_ring_begin_use,
|
.begin_use = vcn_v1_0_ring_begin_use,
|
||||||
.end_use = amdgpu_vcn_ring_end_use,
|
.end_use = amdgpu_vcn_ring_end_use,
|
||||||
.emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
|
.emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
|
||||||
.emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
|
.emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
|
||||||
|
|
|
@ -24,6 +24,8 @@
|
||||||
#ifndef __VCN_V1_0_H__
|
#ifndef __VCN_V1_0_H__
|
||||||
#define __VCN_V1_0_H__
|
#define __VCN_V1_0_H__
|
||||||
|
|
||||||
|
void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring);
|
||||||
|
|
||||||
extern const struct amdgpu_ip_block_version vcn_v1_0_ip_block;
|
extern const struct amdgpu_ip_block_version vcn_v1_0_ip_block;
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -29,6 +29,7 @@
|
||||||
#include "soc15.h"
|
#include "soc15.h"
|
||||||
#include "soc15d.h"
|
#include "soc15d.h"
|
||||||
#include "vcn_v2_0.h"
|
#include "vcn_v2_0.h"
|
||||||
|
#include "mmsch_v1_0.h"
|
||||||
|
|
||||||
#include "vcn/vcn_2_5_offset.h"
|
#include "vcn/vcn_2_5_offset.h"
|
||||||
#include "vcn/vcn_2_5_sh_mask.h"
|
#include "vcn/vcn_2_5_sh_mask.h"
|
||||||
|
@ -54,6 +55,7 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
|
||||||
static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
|
static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
|
||||||
static int vcn_v2_5_set_powergating_state(void *handle,
|
static int vcn_v2_5_set_powergating_state(void *handle,
|
||||||
enum amd_powergating_state state);
|
enum amd_powergating_state state);
|
||||||
|
static int vcn_v2_5_sriov_start(struct amdgpu_device *adev);
|
||||||
|
|
||||||
static int amdgpu_ih_clientid_vcns[] = {
|
static int amdgpu_ih_clientid_vcns[] = {
|
||||||
SOC15_IH_CLIENTID_VCN,
|
SOC15_IH_CLIENTID_VCN,
|
||||||
|
@ -88,7 +90,13 @@ static int vcn_v2_5_early_init(void *handle)
|
||||||
} else
|
} else
|
||||||
adev->vcn.num_vcn_inst = 1;
|
adev->vcn.num_vcn_inst = 1;
|
||||||
|
|
||||||
|
if (amdgpu_sriov_vf(adev)) {
|
||||||
|
adev->vcn.num_vcn_inst = 2;
|
||||||
|
adev->vcn.harvest_config = 0;
|
||||||
|
adev->vcn.num_enc_rings = 1;
|
||||||
|
} else {
|
||||||
adev->vcn.num_enc_rings = 2;
|
adev->vcn.num_enc_rings = 2;
|
||||||
|
}
|
||||||
|
|
||||||
vcn_v2_5_set_dec_ring_funcs(adev);
|
vcn_v2_5_set_dec_ring_funcs(adev);
|
||||||
vcn_v2_5_set_enc_ring_funcs(adev);
|
vcn_v2_5_set_enc_ring_funcs(adev);
|
||||||
|
@ -176,7 +184,9 @@ static int vcn_v2_5_sw_init(void *handle)
|
||||||
|
|
||||||
ring = &adev->vcn.inst[j].ring_dec;
|
ring = &adev->vcn.inst[j].ring_dec;
|
||||||
ring->use_doorbell = true;
|
ring->use_doorbell = true;
|
||||||
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8*j;
|
|
||||||
|
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
|
||||||
|
(amdgpu_sriov_vf(adev) ? 2*j : 8*j);
|
||||||
sprintf(ring->name, "vcn_dec_%d", j);
|
sprintf(ring->name, "vcn_dec_%d", j);
|
||||||
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
|
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
|
||||||
if (r)
|
if (r)
|
||||||
|
@ -185,7 +195,10 @@ static int vcn_v2_5_sw_init(void *handle)
|
||||||
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
|
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
|
||||||
ring = &adev->vcn.inst[j].ring_enc[i];
|
ring = &adev->vcn.inst[j].ring_enc[i];
|
||||||
ring->use_doorbell = true;
|
ring->use_doorbell = true;
|
||||||
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i + 8*j;
|
|
||||||
|
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
|
||||||
|
(amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j));
|
||||||
|
|
||||||
sprintf(ring->name, "vcn_enc_%d.%d", j, i);
|
sprintf(ring->name, "vcn_enc_%d.%d", j, i);
|
||||||
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
|
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
|
||||||
if (r)
|
if (r)
|
||||||
|
@ -193,6 +206,12 @@ static int vcn_v2_5_sw_init(void *handle)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (amdgpu_sriov_vf(adev)) {
|
||||||
|
r = amdgpu_virt_alloc_mm_table(adev);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -208,6 +227,9 @@ static int vcn_v2_5_sw_fini(void *handle)
|
||||||
int r;
|
int r;
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
|
if (amdgpu_sriov_vf(adev))
|
||||||
|
amdgpu_virt_free_mm_table(adev);
|
||||||
|
|
||||||
r = amdgpu_vcn_suspend(adev);
|
r = amdgpu_vcn_suspend(adev);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
@ -228,11 +250,22 @@ static int vcn_v2_5_hw_init(void *handle)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct amdgpu_ring *ring;
|
struct amdgpu_ring *ring;
|
||||||
int i, j, r;
|
int i, j, r = 0;
|
||||||
|
|
||||||
|
if (amdgpu_sriov_vf(adev))
|
||||||
|
r = vcn_v2_5_sriov_start(adev);
|
||||||
|
|
||||||
for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
|
for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
|
||||||
if (adev->vcn.harvest_config & (1 << j))
|
if (adev->vcn.harvest_config & (1 << j))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
if (amdgpu_sriov_vf(adev)) {
|
||||||
|
adev->vcn.inst[j].ring_enc[0].sched.ready = true;
|
||||||
|
adev->vcn.inst[j].ring_enc[1].sched.ready = false;
|
||||||
|
adev->vcn.inst[j].ring_enc[2].sched.ready = false;
|
||||||
|
adev->vcn.inst[j].ring_dec.sched.ready = true;
|
||||||
|
} else {
|
||||||
|
|
||||||
ring = &adev->vcn.inst[j].ring_dec;
|
ring = &adev->vcn.inst[j].ring_dec;
|
||||||
|
|
||||||
adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
|
adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
|
||||||
|
@ -249,6 +282,7 @@ static int vcn_v2_5_hw_init(void *handle)
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
done:
|
done:
|
||||||
if (!r)
|
if (!r)
|
||||||
|
@ -741,6 +775,204 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_mm_table *table)
|
||||||
|
{
|
||||||
|
uint32_t data = 0, loop = 0, size = 0;
|
||||||
|
uint64_t addr = table->gpu_addr;
|
||||||
|
struct mmsch_v1_1_init_header *header = NULL;;
|
||||||
|
|
||||||
|
header = (struct mmsch_v1_1_init_header *)table->cpu_addr;
|
||||||
|
size = header->total_size;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of
|
||||||
|
* memory descriptor location
|
||||||
|
*/
|
||||||
|
WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
|
||||||
|
WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
|
||||||
|
|
||||||
|
/* 2, update vmid of descriptor */
|
||||||
|
data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID);
|
||||||
|
data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
|
||||||
|
/* use domain0 for MM scheduler */
|
||||||
|
data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
|
||||||
|
WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data);
|
||||||
|
|
||||||
|
/* 3, notify mmsch about the size of this descriptor */
|
||||||
|
WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size);
|
||||||
|
|
||||||
|
/* 4, set resp to zero */
|
||||||
|
WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* 5, kick off the initialization and wait until
|
||||||
|
* VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
|
||||||
|
*/
|
||||||
|
WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
|
||||||
|
|
||||||
|
data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
|
||||||
|
loop = 10;
|
||||||
|
while ((data & 0x10000002) != 0x10000002) {
|
||||||
|
udelay(100);
|
||||||
|
data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
|
||||||
|
loop--;
|
||||||
|
if (!loop)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!loop) {
|
||||||
|
dev_err(adev->dev,
|
||||||
|
"failed to init MMSCH, mmMMSCH_VF_MAILBOX_RESP = %x\n",
|
||||||
|
data);
|
||||||
|
return -EBUSY;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
struct amdgpu_ring *ring;
|
||||||
|
uint32_t offset, size, tmp, i, rb_bufsz;
|
||||||
|
uint32_t table_size = 0;
|
||||||
|
struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
|
||||||
|
struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
|
||||||
|
struct mmsch_v1_0_cmd_direct_polling direct_poll = { { 0 } };
|
||||||
|
struct mmsch_v1_0_cmd_end end = { { 0 } };
|
||||||
|
uint32_t *init_table = adev->virt.mm_table.cpu_addr;
|
||||||
|
struct mmsch_v1_1_init_header *header = (struct mmsch_v1_1_init_header *)init_table;
|
||||||
|
|
||||||
|
direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
|
||||||
|
direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
|
||||||
|
direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
|
||||||
|
end.cmd_header.command_type = MMSCH_COMMAND__END;
|
||||||
|
|
||||||
|
header->version = MMSCH_VERSION;
|
||||||
|
header->total_size = sizeof(struct mmsch_v1_1_init_header) >> 2;
|
||||||
|
init_table += header->total_size;
|
||||||
|
|
||||||
|
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
|
||||||
|
header->eng[i].table_offset = header->total_size;
|
||||||
|
header->eng[i].init_status = 0;
|
||||||
|
header->eng[i].table_size = 0;
|
||||||
|
|
||||||
|
table_size = 0;
|
||||||
|
|
||||||
|
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(
|
||||||
|
SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
|
||||||
|
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
|
||||||
|
|
||||||
|
size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
|
||||||
|
/* mc resume*/
|
||||||
|
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
|
||||||
|
MMSCH_V1_0_INSERT_DIRECT_WT(
|
||||||
|
SOC15_REG_OFFSET(UVD, i,
|
||||||
|
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
|
||||||
|
adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
|
||||||
|
MMSCH_V1_0_INSERT_DIRECT_WT(
|
||||||
|
SOC15_REG_OFFSET(UVD, i,
|
||||||
|
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
|
||||||
|
adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
|
||||||
|
offset = 0;
|
||||||
|
MMSCH_V1_0_INSERT_DIRECT_WT(
|
||||||
|
SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
|
||||||
|
} else {
|
||||||
|
MMSCH_V1_0_INSERT_DIRECT_WT(
|
||||||
|
SOC15_REG_OFFSET(UVD, i,
|
||||||
|
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
|
||||||
|
lower_32_bits(adev->vcn.inst[i].gpu_addr));
|
||||||
|
MMSCH_V1_0_INSERT_DIRECT_WT(
|
||||||
|
SOC15_REG_OFFSET(UVD, i,
|
||||||
|
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
|
||||||
|
upper_32_bits(adev->vcn.inst[i].gpu_addr));
|
||||||
|
offset = size;
|
||||||
|
MMSCH_V1_0_INSERT_DIRECT_WT(
|
||||||
|
SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
|
||||||
|
AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
|
||||||
|
}
|
||||||
|
|
||||||
|
MMSCH_V1_0_INSERT_DIRECT_WT(
|
||||||
|
SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0),
|
||||||
|
size);
|
||||||
|
MMSCH_V1_0_INSERT_DIRECT_WT(
|
||||||
|
SOC15_REG_OFFSET(UVD, i,
|
||||||
|
mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
|
||||||
|
lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
|
||||||
|
MMSCH_V1_0_INSERT_DIRECT_WT(
|
||||||
|
SOC15_REG_OFFSET(UVD, i,
|
||||||
|
mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
|
||||||
|
upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
|
||||||
|
MMSCH_V1_0_INSERT_DIRECT_WT(
|
||||||
|
SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1),
|
||||||
|
0);
|
||||||
|
MMSCH_V1_0_INSERT_DIRECT_WT(
|
||||||
|
SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1),
|
||||||
|
AMDGPU_VCN_STACK_SIZE);
|
||||||
|
MMSCH_V1_0_INSERT_DIRECT_WT(
|
||||||
|
SOC15_REG_OFFSET(UVD, i,
|
||||||
|
mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
|
||||||
|
lower_32_bits(adev->vcn.inst[i].gpu_addr + offset +
|
||||||
|
AMDGPU_VCN_STACK_SIZE));
|
||||||
|
MMSCH_V1_0_INSERT_DIRECT_WT(
|
||||||
|
SOC15_REG_OFFSET(UVD, i,
|
||||||
|
mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
|
||||||
|
upper_32_bits(adev->vcn.inst[i].gpu_addr + offset +
|
||||||
|
AMDGPU_VCN_STACK_SIZE));
|
||||||
|
MMSCH_V1_0_INSERT_DIRECT_WT(
|
||||||
|
SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2),
|
||||||
|
0);
|
||||||
|
MMSCH_V1_0_INSERT_DIRECT_WT(
|
||||||
|
SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
|
||||||
|
AMDGPU_VCN_CONTEXT_SIZE);
|
||||||
|
|
||||||
|
ring = &adev->vcn.inst[i].ring_enc[0];
|
||||||
|
ring->wptr = 0;
|
||||||
|
|
||||||
|
MMSCH_V1_0_INSERT_DIRECT_WT(
|
||||||
|
SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO),
|
||||||
|
lower_32_bits(ring->gpu_addr));
|
||||||
|
MMSCH_V1_0_INSERT_DIRECT_WT(
|
||||||
|
SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI),
|
||||||
|
upper_32_bits(ring->gpu_addr));
|
||||||
|
MMSCH_V1_0_INSERT_DIRECT_WT(
|
||||||
|
SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE),
|
||||||
|
ring->ring_size / 4);
|
||||||
|
|
||||||
|
ring = &adev->vcn.inst[i].ring_dec;
|
||||||
|
ring->wptr = 0;
|
||||||
|
MMSCH_V1_0_INSERT_DIRECT_WT(
|
||||||
|
SOC15_REG_OFFSET(UVD, i,
|
||||||
|
mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
|
||||||
|
lower_32_bits(ring->gpu_addr));
|
||||||
|
MMSCH_V1_0_INSERT_DIRECT_WT(
|
||||||
|
SOC15_REG_OFFSET(UVD, i,
|
||||||
|
mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
|
||||||
|
upper_32_bits(ring->gpu_addr));
|
||||||
|
|
||||||
|
/* force RBC into idle state */
|
||||||
|
rb_bufsz = order_base_2(ring->ring_size);
|
||||||
|
tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
|
||||||
|
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
|
||||||
|
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
|
||||||
|
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
|
||||||
|
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
|
||||||
|
MMSCH_V1_0_INSERT_DIRECT_WT(
|
||||||
|
SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
|
||||||
|
|
||||||
|
/* add end packet */
|
||||||
|
memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
|
||||||
|
table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
|
||||||
|
init_table += sizeof(struct mmsch_v1_0_cmd_end) / 4;
|
||||||
|
|
||||||
|
/* refine header */
|
||||||
|
header->eng[i].table_size = table_size;
|
||||||
|
header->total_size += table_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
return vcn_v2_5_mmsch_start(adev, &adev->virt.mm_table);
|
||||||
|
}
|
||||||
|
|
||||||
static int vcn_v2_5_stop(struct amdgpu_device *adev)
|
static int vcn_v2_5_stop(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
uint32_t tmp;
|
uint32_t tmp;
|
||||||
|
@ -1048,6 +1280,9 @@ static int vcn_v2_5_set_clockgating_state(void *handle,
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
|
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
|
||||||
|
|
||||||
|
if (amdgpu_sriov_vf(adev))
|
||||||
|
return 0;
|
||||||
|
|
||||||
if (enable) {
|
if (enable) {
|
||||||
if (vcn_v2_5_is_idle(handle))
|
if (vcn_v2_5_is_idle(handle))
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
@ -1065,6 +1300,9 @@ static int vcn_v2_5_set_powergating_state(void *handle,
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (amdgpu_sriov_vf(adev))
|
||||||
|
return 0;
|
||||||
|
|
||||||
if(state == adev->vcn.cur_state)
|
if(state == adev->vcn.cur_state)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
|
@ -234,16 +234,9 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
|
||||||
WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (ih->gpu_addr >> 40) & 0xff);
|
WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (ih->gpu_addr >> 40) & 0xff);
|
||||||
|
|
||||||
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
|
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
|
||||||
ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN);
|
|
||||||
ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
|
ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
|
||||||
if (adev->irq.ih.use_bus_addr) {
|
|
||||||
ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1);
|
|
||||||
} else {
|
|
||||||
ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, MC_SPACE_FBPA_ENABLE, 1);
|
|
||||||
}
|
|
||||||
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
|
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
|
||||||
!!adev->irq.msi_enabled);
|
!!adev->irq.msi_enabled);
|
||||||
|
|
||||||
if (amdgpu_sriov_vf(adev)) {
|
if (amdgpu_sriov_vf(adev)) {
|
||||||
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
|
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
|
||||||
DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
|
DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
|
||||||
|
@ -253,10 +246,19 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
|
||||||
WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
|
WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((adev->asic_type == CHIP_ARCTURUS
|
if ((adev->asic_type == CHIP_ARCTURUS &&
|
||||||
&& adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)
|
adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) ||
|
||||||
|| adev->asic_type == CHIP_RENOIR)
|
adev->asic_type == CHIP_RENOIR) {
|
||||||
|
ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN);
|
||||||
|
if (adev->irq.ih.use_bus_addr) {
|
||||||
|
ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
|
||||||
|
MC_SPACE_GPA_ENABLE, 1);
|
||||||
|
} else {
|
||||||
|
ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
|
||||||
|
MC_SPACE_FBPA_ENABLE, 1);
|
||||||
|
}
|
||||||
WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken);
|
WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken);
|
||||||
|
}
|
||||||
|
|
||||||
/* set the writeback address whether it's enabled or not */
|
/* set the writeback address whether it's enabled or not */
|
||||||
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO,
|
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO,
|
||||||
|
|
|
@ -42,6 +42,7 @@
|
||||||
|
|
||||||
static long kfd_ioctl(struct file *, unsigned int, unsigned long);
|
static long kfd_ioctl(struct file *, unsigned int, unsigned long);
|
||||||
static int kfd_open(struct inode *, struct file *);
|
static int kfd_open(struct inode *, struct file *);
|
||||||
|
static int kfd_release(struct inode *, struct file *);
|
||||||
static int kfd_mmap(struct file *, struct vm_area_struct *);
|
static int kfd_mmap(struct file *, struct vm_area_struct *);
|
||||||
|
|
||||||
static const char kfd_dev_name[] = "kfd";
|
static const char kfd_dev_name[] = "kfd";
|
||||||
|
@ -51,6 +52,7 @@ static const struct file_operations kfd_fops = {
|
||||||
.unlocked_ioctl = kfd_ioctl,
|
.unlocked_ioctl = kfd_ioctl,
|
||||||
.compat_ioctl = compat_ptr_ioctl,
|
.compat_ioctl = compat_ptr_ioctl,
|
||||||
.open = kfd_open,
|
.open = kfd_open,
|
||||||
|
.release = kfd_release,
|
||||||
.mmap = kfd_mmap,
|
.mmap = kfd_mmap,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -124,8 +126,13 @@ static int kfd_open(struct inode *inode, struct file *filep)
|
||||||
if (IS_ERR(process))
|
if (IS_ERR(process))
|
||||||
return PTR_ERR(process);
|
return PTR_ERR(process);
|
||||||
|
|
||||||
if (kfd_is_locked())
|
if (kfd_is_locked()) {
|
||||||
|
kfd_unref_process(process);
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* filep now owns the reference returned by kfd_create_process */
|
||||||
|
filep->private_data = process;
|
||||||
|
|
||||||
dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
|
dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
|
||||||
process->pasid, process->is_32bit_user_mode);
|
process->pasid, process->is_32bit_user_mode);
|
||||||
|
@ -133,6 +140,16 @@ static int kfd_open(struct inode *inode, struct file *filep)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int kfd_release(struct inode *inode, struct file *filep)
|
||||||
|
{
|
||||||
|
struct kfd_process *process = filep->private_data;
|
||||||
|
|
||||||
|
if (process)
|
||||||
|
kfd_unref_process(process);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
|
static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
|
||||||
void *data)
|
void *data)
|
||||||
{
|
{
|
||||||
|
@ -1801,9 +1818,14 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
|
||||||
|
|
||||||
dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg);
|
dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg);
|
||||||
|
|
||||||
process = kfd_get_process(current);
|
/* Get the process struct from the filep. Only the process
|
||||||
if (IS_ERR(process)) {
|
* that opened /dev/kfd can use the file descriptor. Child
|
||||||
dev_dbg(kfd_device, "no process\n");
|
* processes need to create their own KFD device context.
|
||||||
|
*/
|
||||||
|
process = filep->private_data;
|
||||||
|
if (process->lead_thread != current->group_leader) {
|
||||||
|
dev_dbg(kfd_device, "Using KFD FD in wrong process\n");
|
||||||
|
retcode = -EBADF;
|
||||||
goto err_i1;
|
goto err_i1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -93,7 +93,7 @@ void kfd_debugfs_init(void)
|
||||||
kfd_debugfs_hqds_by_device, &kfd_debugfs_fops);
|
kfd_debugfs_hqds_by_device, &kfd_debugfs_fops);
|
||||||
debugfs_create_file("rls", S_IFREG | 0444, debugfs_root,
|
debugfs_create_file("rls", S_IFREG | 0444, debugfs_root,
|
||||||
kfd_debugfs_rls_by_device, &kfd_debugfs_fops);
|
kfd_debugfs_rls_by_device, &kfd_debugfs_fops);
|
||||||
debugfs_create_file("hang_hws", S_IFREG | 0644, debugfs_root,
|
debugfs_create_file("hang_hws", S_IFREG | 0200, debugfs_root,
|
||||||
NULL, &kfd_debugfs_hang_hws_fops);
|
NULL, &kfd_debugfs_hang_hws_fops);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -728,6 +728,9 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd)
|
||||||
{
|
{
|
||||||
if (!kfd->init_complete)
|
if (!kfd->init_complete)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
kfd->dqm->ops.pre_reset(kfd->dqm);
|
||||||
|
|
||||||
kgd2kfd_suspend(kfd);
|
kgd2kfd_suspend(kfd);
|
||||||
|
|
||||||
kfd_signal_reset_event(kfd);
|
kfd_signal_reset_event(kfd);
|
||||||
|
@ -822,6 +825,21 @@ dqm_start_error:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void kfd_queue_work(struct workqueue_struct *wq,
|
||||||
|
struct work_struct *work)
|
||||||
|
{
|
||||||
|
int cpu, new_cpu;
|
||||||
|
|
||||||
|
cpu = new_cpu = smp_processor_id();
|
||||||
|
do {
|
||||||
|
new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids;
|
||||||
|
if (cpu_to_node(new_cpu) == numa_node_id())
|
||||||
|
break;
|
||||||
|
} while (cpu != new_cpu);
|
||||||
|
|
||||||
|
queue_work_on(new_cpu, wq, work);
|
||||||
|
}
|
||||||
|
|
||||||
/* This is called directly from KGD at ISR. */
|
/* This is called directly from KGD at ISR. */
|
||||||
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
|
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
|
||||||
{
|
{
|
||||||
|
@ -844,7 +862,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
|
||||||
patched_ihre, &is_patched)
|
patched_ihre, &is_patched)
|
||||||
&& enqueue_ih_ring_entry(kfd,
|
&& enqueue_ih_ring_entry(kfd,
|
||||||
is_patched ? patched_ihre : ih_ring_entry))
|
is_patched ? patched_ihre : ih_ring_entry))
|
||||||
queue_work(kfd->ih_wq, &kfd->interrupt_work);
|
kfd_queue_work(kfd->ih_wq, &kfd->interrupt_work);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
|
spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
|
@ -930,7 +930,6 @@ static void uninitialize(struct device_queue_manager *dqm)
|
||||||
for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
|
for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
|
||||||
kfree(dqm->mqd_mgrs[i]);
|
kfree(dqm->mqd_mgrs[i]);
|
||||||
mutex_destroy(&dqm->lock_hidden);
|
mutex_destroy(&dqm->lock_hidden);
|
||||||
kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int start_nocpsch(struct device_queue_manager *dqm)
|
static int start_nocpsch(struct device_queue_manager *dqm)
|
||||||
|
@ -947,12 +946,19 @@ static int start_nocpsch(struct device_queue_manager *dqm)
|
||||||
static int stop_nocpsch(struct device_queue_manager *dqm)
|
static int stop_nocpsch(struct device_queue_manager *dqm)
|
||||||
{
|
{
|
||||||
if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
|
if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
|
||||||
pm_uninit(&dqm->packets);
|
pm_uninit(&dqm->packets, false);
|
||||||
dqm->sched_running = false;
|
dqm->sched_running = false;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void pre_reset(struct device_queue_manager *dqm)
|
||||||
|
{
|
||||||
|
dqm_lock(dqm);
|
||||||
|
dqm->is_resetting = true;
|
||||||
|
dqm_unlock(dqm);
|
||||||
|
}
|
||||||
|
|
||||||
static int allocate_sdma_queue(struct device_queue_manager *dqm,
|
static int allocate_sdma_queue(struct device_queue_manager *dqm,
|
||||||
struct queue *q)
|
struct queue *q)
|
||||||
{
|
{
|
||||||
|
@ -1100,6 +1106,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
|
||||||
dqm_lock(dqm);
|
dqm_lock(dqm);
|
||||||
/* clear hang status when driver try to start the hw scheduler */
|
/* clear hang status when driver try to start the hw scheduler */
|
||||||
dqm->is_hws_hang = false;
|
dqm->is_hws_hang = false;
|
||||||
|
dqm->is_resetting = false;
|
||||||
dqm->sched_running = true;
|
dqm->sched_running = true;
|
||||||
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
|
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
|
||||||
dqm_unlock(dqm);
|
dqm_unlock(dqm);
|
||||||
|
@ -1107,20 +1114,24 @@ static int start_cpsch(struct device_queue_manager *dqm)
|
||||||
return 0;
|
return 0;
|
||||||
fail_allocate_vidmem:
|
fail_allocate_vidmem:
|
||||||
fail_set_sched_resources:
|
fail_set_sched_resources:
|
||||||
pm_uninit(&dqm->packets);
|
pm_uninit(&dqm->packets, false);
|
||||||
fail_packet_manager_init:
|
fail_packet_manager_init:
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int stop_cpsch(struct device_queue_manager *dqm)
|
static int stop_cpsch(struct device_queue_manager *dqm)
|
||||||
{
|
{
|
||||||
|
bool hanging;
|
||||||
|
|
||||||
dqm_lock(dqm);
|
dqm_lock(dqm);
|
||||||
|
if (!dqm->is_hws_hang)
|
||||||
unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
|
unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
|
||||||
|
hanging = dqm->is_hws_hang || dqm->is_resetting;
|
||||||
dqm->sched_running = false;
|
dqm->sched_running = false;
|
||||||
dqm_unlock(dqm);
|
dqm_unlock(dqm);
|
||||||
|
|
||||||
kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
|
kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
|
||||||
pm_uninit(&dqm->packets);
|
pm_uninit(&dqm->packets, hanging);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1352,8 +1363,17 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
|
||||||
/* should be timed out */
|
/* should be timed out */
|
||||||
retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
|
retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
|
||||||
queue_preemption_timeout_ms);
|
queue_preemption_timeout_ms);
|
||||||
if (retval)
|
if (retval) {
|
||||||
|
pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
|
||||||
|
dqm->is_hws_hang = true;
|
||||||
|
/* It's possible we're detecting a HWS hang in the
|
||||||
|
* middle of a GPU reset. No need to schedule another
|
||||||
|
* reset in this case.
|
||||||
|
*/
|
||||||
|
if (!dqm->is_resetting)
|
||||||
|
schedule_work(&dqm->hw_exception_work);
|
||||||
return retval;
|
return retval;
|
||||||
|
}
|
||||||
|
|
||||||
pm_release_ib(&dqm->packets);
|
pm_release_ib(&dqm->packets);
|
||||||
dqm->active_runlist = false;
|
dqm->active_runlist = false;
|
||||||
|
@ -1371,12 +1391,8 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm,
|
||||||
if (dqm->is_hws_hang)
|
if (dqm->is_hws_hang)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
retval = unmap_queues_cpsch(dqm, filter, filter_param);
|
retval = unmap_queues_cpsch(dqm, filter, filter_param);
|
||||||
if (retval) {
|
if (retval)
|
||||||
pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
|
|
||||||
dqm->is_hws_hang = true;
|
|
||||||
schedule_work(&dqm->hw_exception_work);
|
|
||||||
return retval;
|
return retval;
|
||||||
}
|
|
||||||
|
|
||||||
return map_queues_cpsch(dqm);
|
return map_queues_cpsch(dqm);
|
||||||
}
|
}
|
||||||
|
@ -1770,6 +1786,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
|
||||||
dqm->ops.initialize = initialize_cpsch;
|
dqm->ops.initialize = initialize_cpsch;
|
||||||
dqm->ops.start = start_cpsch;
|
dqm->ops.start = start_cpsch;
|
||||||
dqm->ops.stop = stop_cpsch;
|
dqm->ops.stop = stop_cpsch;
|
||||||
|
dqm->ops.pre_reset = pre_reset;
|
||||||
dqm->ops.destroy_queue = destroy_queue_cpsch;
|
dqm->ops.destroy_queue = destroy_queue_cpsch;
|
||||||
dqm->ops.update_queue = update_queue;
|
dqm->ops.update_queue = update_queue;
|
||||||
dqm->ops.register_process = register_process;
|
dqm->ops.register_process = register_process;
|
||||||
|
@ -1788,6 +1805,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
|
||||||
/* initialize dqm for no cp scheduling */
|
/* initialize dqm for no cp scheduling */
|
||||||
dqm->ops.start = start_nocpsch;
|
dqm->ops.start = start_nocpsch;
|
||||||
dqm->ops.stop = stop_nocpsch;
|
dqm->ops.stop = stop_nocpsch;
|
||||||
|
dqm->ops.pre_reset = pre_reset;
|
||||||
dqm->ops.create_queue = create_queue_nocpsch;
|
dqm->ops.create_queue = create_queue_nocpsch;
|
||||||
dqm->ops.destroy_queue = destroy_queue_nocpsch;
|
dqm->ops.destroy_queue = destroy_queue_nocpsch;
|
||||||
dqm->ops.update_queue = update_queue;
|
dqm->ops.update_queue = update_queue;
|
||||||
|
|
|
@ -104,6 +104,7 @@ struct device_queue_manager_ops {
|
||||||
int (*initialize)(struct device_queue_manager *dqm);
|
int (*initialize)(struct device_queue_manager *dqm);
|
||||||
int (*start)(struct device_queue_manager *dqm);
|
int (*start)(struct device_queue_manager *dqm);
|
||||||
int (*stop)(struct device_queue_manager *dqm);
|
int (*stop)(struct device_queue_manager *dqm);
|
||||||
|
void (*pre_reset)(struct device_queue_manager *dqm);
|
||||||
void (*uninitialize)(struct device_queue_manager *dqm);
|
void (*uninitialize)(struct device_queue_manager *dqm);
|
||||||
int (*create_kernel_queue)(struct device_queue_manager *dqm,
|
int (*create_kernel_queue)(struct device_queue_manager *dqm,
|
||||||
struct kernel_queue *kq,
|
struct kernel_queue *kq,
|
||||||
|
@ -190,7 +191,6 @@ struct device_queue_manager {
|
||||||
/* the pasid mapping for each kfd vmid */
|
/* the pasid mapping for each kfd vmid */
|
||||||
uint16_t vmid_pasid[VMID_NUM];
|
uint16_t vmid_pasid[VMID_NUM];
|
||||||
uint64_t pipelines_addr;
|
uint64_t pipelines_addr;
|
||||||
struct kfd_mem_obj *pipeline_mem;
|
|
||||||
uint64_t fence_gpu_addr;
|
uint64_t fence_gpu_addr;
|
||||||
unsigned int *fence_addr;
|
unsigned int *fence_addr;
|
||||||
struct kfd_mem_obj *fence_mem;
|
struct kfd_mem_obj *fence_mem;
|
||||||
|
@ -199,6 +199,7 @@ struct device_queue_manager {
|
||||||
|
|
||||||
/* hw exception */
|
/* hw exception */
|
||||||
bool is_hws_hang;
|
bool is_hws_hang;
|
||||||
|
bool is_resetting;
|
||||||
struct work_struct hw_exception_work;
|
struct work_struct hw_exception_work;
|
||||||
struct kfd_mem_obj hiq_sdma_mqd;
|
struct kfd_mem_obj hiq_sdma_mqd;
|
||||||
bool sched_running;
|
bool sched_running;
|
||||||
|
|
|
@ -195,9 +195,9 @@ err_get_kernel_doorbell:
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Uninitialize a kernel queue and free all its memory usages. */
|
/* Uninitialize a kernel queue and free all its memory usages. */
|
||||||
static void kq_uninitialize(struct kernel_queue *kq)
|
static void kq_uninitialize(struct kernel_queue *kq, bool hanging)
|
||||||
{
|
{
|
||||||
if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ)
|
if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ && !hanging)
|
||||||
kq->mqd_mgr->destroy_mqd(kq->mqd_mgr,
|
kq->mqd_mgr->destroy_mqd(kq->mqd_mgr,
|
||||||
kq->queue->mqd,
|
kq->queue->mqd,
|
||||||
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
|
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
|
||||||
|
@ -337,9 +337,9 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void kernel_queue_uninit(struct kernel_queue *kq)
|
void kernel_queue_uninit(struct kernel_queue *kq, bool hanging)
|
||||||
{
|
{
|
||||||
kq_uninitialize(kq);
|
kq_uninitialize(kq, hanging);
|
||||||
kfree(kq);
|
kfree(kq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -264,10 +264,10 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void pm_uninit(struct packet_manager *pm)
|
void pm_uninit(struct packet_manager *pm, bool hanging)
|
||||||
{
|
{
|
||||||
mutex_destroy(&pm->lock);
|
mutex_destroy(&pm->lock);
|
||||||
kernel_queue_uninit(pm->priv_queue);
|
kernel_queue_uninit(pm->priv_queue, hanging);
|
||||||
}
|
}
|
||||||
|
|
||||||
int pm_send_set_resources(struct packet_manager *pm,
|
int pm_send_set_resources(struct packet_manager *pm,
|
||||||
|
|
|
@ -883,7 +883,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev);
|
||||||
void device_queue_manager_uninit(struct device_queue_manager *dqm);
|
void device_queue_manager_uninit(struct device_queue_manager *dqm);
|
||||||
struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
|
struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
|
||||||
enum kfd_queue_type type);
|
enum kfd_queue_type type);
|
||||||
void kernel_queue_uninit(struct kernel_queue *kq);
|
void kernel_queue_uninit(struct kernel_queue *kq, bool hanging);
|
||||||
int kfd_process_vm_fault(struct device_queue_manager *dqm, unsigned int pasid);
|
int kfd_process_vm_fault(struct device_queue_manager *dqm, unsigned int pasid);
|
||||||
|
|
||||||
/* Process Queue Manager */
|
/* Process Queue Manager */
|
||||||
|
@ -972,7 +972,7 @@ extern const struct packet_manager_funcs kfd_vi_pm_funcs;
|
||||||
extern const struct packet_manager_funcs kfd_v9_pm_funcs;
|
extern const struct packet_manager_funcs kfd_v9_pm_funcs;
|
||||||
|
|
||||||
int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
|
int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
|
||||||
void pm_uninit(struct packet_manager *pm);
|
void pm_uninit(struct packet_manager *pm, bool hanging);
|
||||||
int pm_send_set_resources(struct packet_manager *pm,
|
int pm_send_set_resources(struct packet_manager *pm,
|
||||||
struct scheduling_resources *res);
|
struct scheduling_resources *res);
|
||||||
int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
|
int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
|
||||||
|
|
|
@ -324,6 +324,8 @@ struct kfd_process *kfd_create_process(struct file *filep)
|
||||||
(int)process->lead_thread->pid);
|
(int)process->lead_thread->pid);
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
|
if (!IS_ERR(process))
|
||||||
|
kref_get(&process->ref);
|
||||||
mutex_unlock(&kfd_processes_mutex);
|
mutex_unlock(&kfd_processes_mutex);
|
||||||
|
|
||||||
return process;
|
return process;
|
||||||
|
|
|
@ -374,7 +374,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
|
||||||
/* destroy kernel queue (DIQ) */
|
/* destroy kernel queue (DIQ) */
|
||||||
dqm = pqn->kq->dev->dqm;
|
dqm = pqn->kq->dev->dqm;
|
||||||
dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
|
dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
|
||||||
kernel_queue_uninit(pqn->kq);
|
kernel_queue_uninit(pqn->kq, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pqn->q) {
|
if (pqn->q) {
|
||||||
|
|
|
@ -486,6 +486,10 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
|
||||||
dev->node_props.num_sdma_engines);
|
dev->node_props.num_sdma_engines);
|
||||||
sysfs_show_32bit_prop(buffer, "num_sdma_xgmi_engines",
|
sysfs_show_32bit_prop(buffer, "num_sdma_xgmi_engines",
|
||||||
dev->node_props.num_sdma_xgmi_engines);
|
dev->node_props.num_sdma_xgmi_engines);
|
||||||
|
sysfs_show_32bit_prop(buffer, "num_sdma_queues_per_engine",
|
||||||
|
dev->node_props.num_sdma_queues_per_engine);
|
||||||
|
sysfs_show_32bit_prop(buffer, "num_cp_queues",
|
||||||
|
dev->node_props.num_cp_queues);
|
||||||
|
|
||||||
if (dev->gpu) {
|
if (dev->gpu) {
|
||||||
log_max_watch_addr =
|
log_max_watch_addr =
|
||||||
|
@ -1309,9 +1313,12 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
|
||||||
dev->node_props.num_sdma_engines = gpu->device_info->num_sdma_engines;
|
dev->node_props.num_sdma_engines = gpu->device_info->num_sdma_engines;
|
||||||
dev->node_props.num_sdma_xgmi_engines =
|
dev->node_props.num_sdma_xgmi_engines =
|
||||||
gpu->device_info->num_xgmi_sdma_engines;
|
gpu->device_info->num_xgmi_sdma_engines;
|
||||||
|
dev->node_props.num_sdma_queues_per_engine =
|
||||||
|
gpu->device_info->num_sdma_queues_per_engine;
|
||||||
dev->node_props.num_gws = (hws_gws_support &&
|
dev->node_props.num_gws = (hws_gws_support &&
|
||||||
dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ?
|
dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ?
|
||||||
amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0;
|
amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0;
|
||||||
|
dev->node_props.num_cp_queues = get_queues_num(dev->gpu->dqm);
|
||||||
|
|
||||||
kfd_fill_mem_clk_max_info(dev);
|
kfd_fill_mem_clk_max_info(dev);
|
||||||
kfd_fill_iolink_non_crat_info(dev);
|
kfd_fill_iolink_non_crat_info(dev);
|
||||||
|
|
|
@ -81,6 +81,8 @@ struct kfd_node_properties {
|
||||||
int32_t drm_render_minor;
|
int32_t drm_render_minor;
|
||||||
uint32_t num_sdma_engines;
|
uint32_t num_sdma_engines;
|
||||||
uint32_t num_sdma_xgmi_engines;
|
uint32_t num_sdma_xgmi_engines;
|
||||||
|
uint32_t num_sdma_queues_per_engine;
|
||||||
|
uint32_t num_cp_queues;
|
||||||
char name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE];
|
char name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -6,7 +6,7 @@ config DRM_AMD_DC
|
||||||
bool "AMD DC - Enable new display engine"
|
bool "AMD DC - Enable new display engine"
|
||||||
default y
|
default y
|
||||||
select SND_HDA_COMPONENT if SND_HDA_CORE
|
select SND_HDA_COMPONENT if SND_HDA_CORE
|
||||||
select DRM_AMD_DC_DCN if X86 && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
|
select DRM_AMD_DC_DCN if (X86 || PPC64) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
|
||||||
help
|
help
|
||||||
Choose this option if you want to use the new display engine
|
Choose this option if you want to use the new display engine
|
||||||
support for AMDGPU. This adds required support for Vega and
|
support for AMDGPU. This adds required support for Vega and
|
||||||
|
|
|
@ -98,6 +98,12 @@ MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
|
||||||
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
|
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
|
||||||
MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
|
MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
|
||||||
|
|
||||||
|
/* Number of bytes in PSP header for firmware. */
|
||||||
|
#define PSP_HEADER_BYTES 0x100
|
||||||
|
|
||||||
|
/* Number of bytes in PSP footer for firmware. */
|
||||||
|
#define PSP_FOOTER_BYTES 0x100
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* DOC: overview
|
* DOC: overview
|
||||||
*
|
*
|
||||||
|
@ -741,28 +747,27 @@ void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
|
||||||
|
|
||||||
static int dm_dmub_hw_init(struct amdgpu_device *adev)
|
static int dm_dmub_hw_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
const unsigned int psp_header_bytes = 0x100;
|
|
||||||
const unsigned int psp_footer_bytes = 0x100;
|
|
||||||
const struct dmcub_firmware_header_v1_0 *hdr;
|
const struct dmcub_firmware_header_v1_0 *hdr;
|
||||||
struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
|
struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
|
||||||
|
struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
|
||||||
const struct firmware *dmub_fw = adev->dm.dmub_fw;
|
const struct firmware *dmub_fw = adev->dm.dmub_fw;
|
||||||
struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
|
struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
|
||||||
struct abm *abm = adev->dm.dc->res_pool->abm;
|
struct abm *abm = adev->dm.dc->res_pool->abm;
|
||||||
struct dmub_srv_region_params region_params;
|
|
||||||
struct dmub_srv_region_info region_info;
|
|
||||||
struct dmub_srv_fb_params fb_params;
|
|
||||||
struct dmub_srv_fb_info fb_info;
|
|
||||||
struct dmub_srv_hw_params hw_params;
|
struct dmub_srv_hw_params hw_params;
|
||||||
enum dmub_status status;
|
enum dmub_status status;
|
||||||
const unsigned char *fw_inst_const, *fw_bss_data;
|
const unsigned char *fw_inst_const, *fw_bss_data;
|
||||||
uint32_t i;
|
uint32_t i, fw_inst_const_size, fw_bss_data_size;
|
||||||
int r;
|
|
||||||
bool has_hw_support;
|
bool has_hw_support;
|
||||||
|
|
||||||
if (!dmub_srv)
|
if (!dmub_srv)
|
||||||
/* DMUB isn't supported on the ASIC. */
|
/* DMUB isn't supported on the ASIC. */
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
if (!fb_info) {
|
||||||
|
DRM_ERROR("No framebuffer info for DMUB service.\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
if (!dmub_fw) {
|
if (!dmub_fw) {
|
||||||
/* Firmware required for DMUB support. */
|
/* Firmware required for DMUB support. */
|
||||||
DRM_ERROR("No firmware provided for DMUB.\n");
|
DRM_ERROR("No firmware provided for DMUB.\n");
|
||||||
|
@ -782,60 +787,36 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
|
||||||
|
|
||||||
hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
|
hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
|
||||||
|
|
||||||
/* Calculate the size of all the regions for the DMUB service. */
|
|
||||||
memset(®ion_params, 0, sizeof(region_params));
|
|
||||||
|
|
||||||
region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
|
|
||||||
psp_header_bytes - psp_footer_bytes;
|
|
||||||
region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
|
|
||||||
region_params.vbios_size = adev->dm.dc->ctx->dc_bios->bios_size;
|
|
||||||
|
|
||||||
status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
|
|
||||||
®ion_info);
|
|
||||||
|
|
||||||
if (status != DMUB_STATUS_OK) {
|
|
||||||
DRM_ERROR("Error calculating DMUB region info: %d\n", status);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Allocate a framebuffer based on the total size of all the regions.
|
|
||||||
* TODO: Move this into GART.
|
|
||||||
*/
|
|
||||||
r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
|
|
||||||
AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
|
|
||||||
&adev->dm.dmub_bo_gpu_addr,
|
|
||||||
&adev->dm.dmub_bo_cpu_addr);
|
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
|
|
||||||
/* Rebase the regions on the framebuffer address. */
|
|
||||||
memset(&fb_params, 0, sizeof(fb_params));
|
|
||||||
fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
|
|
||||||
fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
|
|
||||||
fb_params.region_info = ®ion_info;
|
|
||||||
|
|
||||||
status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, &fb_info);
|
|
||||||
if (status != DMUB_STATUS_OK) {
|
|
||||||
DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
fw_inst_const = dmub_fw->data +
|
fw_inst_const = dmub_fw->data +
|
||||||
le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
|
le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
|
||||||
psp_header_bytes;
|
PSP_HEADER_BYTES;
|
||||||
|
|
||||||
fw_bss_data = dmub_fw->data +
|
fw_bss_data = dmub_fw->data +
|
||||||
le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
|
le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
|
||||||
le32_to_cpu(hdr->inst_const_bytes);
|
le32_to_cpu(hdr->inst_const_bytes);
|
||||||
|
|
||||||
/* Copy firmware and bios info into FB memory. */
|
/* Copy firmware and bios info into FB memory. */
|
||||||
memcpy(fb_info.fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
|
fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
|
||||||
region_params.inst_const_size);
|
PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
|
||||||
memcpy(fb_info.fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
|
|
||||||
region_params.bss_data_size);
|
fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
|
||||||
memcpy(fb_info.fb[DMUB_WINDOW_3_VBIOS].cpu_addr,
|
|
||||||
adev->dm.dc->ctx->dc_bios->bios, region_params.vbios_size);
|
memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
|
||||||
|
fw_inst_const_size);
|
||||||
|
memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
|
||||||
|
fw_bss_data_size);
|
||||||
|
memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
|
||||||
|
adev->bios_size);
|
||||||
|
|
||||||
|
/* Reset regions that need to be reset. */
|
||||||
|
memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
|
||||||
|
fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
|
||||||
|
|
||||||
|
memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
|
||||||
|
fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
|
||||||
|
|
||||||
|
memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
|
||||||
|
fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
|
||||||
|
|
||||||
/* Initialize hardware. */
|
/* Initialize hardware. */
|
||||||
memset(&hw_params, 0, sizeof(hw_params));
|
memset(&hw_params, 0, sizeof(hw_params));
|
||||||
|
@ -845,8 +826,8 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
|
||||||
if (dmcu)
|
if (dmcu)
|
||||||
hw_params.psp_version = dmcu->psp_version;
|
hw_params.psp_version = dmcu->psp_version;
|
||||||
|
|
||||||
for (i = 0; i < fb_info.num_fb; ++i)
|
for (i = 0; i < fb_info->num_fb; ++i)
|
||||||
hw_params.fb[i] = &fb_info.fb[i];
|
hw_params.fb[i] = &fb_info->fb[i];
|
||||||
|
|
||||||
status = dmub_srv_hw_init(dmub_srv, &hw_params);
|
status = dmub_srv_hw_init(dmub_srv, &hw_params);
|
||||||
if (status != DMUB_STATUS_OK) {
|
if (status != DMUB_STATUS_OK) {
|
||||||
|
@ -1174,6 +1155,11 @@ static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
|
||||||
static int dm_dmub_sw_init(struct amdgpu_device *adev)
|
static int dm_dmub_sw_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct dmub_srv_create_params create_params;
|
struct dmub_srv_create_params create_params;
|
||||||
|
struct dmub_srv_region_params region_params;
|
||||||
|
struct dmub_srv_region_info region_info;
|
||||||
|
struct dmub_srv_fb_params fb_params;
|
||||||
|
struct dmub_srv_fb_info *fb_info;
|
||||||
|
struct dmub_srv *dmub_srv;
|
||||||
const struct dmcub_firmware_header_v1_0 *hdr;
|
const struct dmcub_firmware_header_v1_0 *hdr;
|
||||||
const char *fw_name_dmub;
|
const char *fw_name_dmub;
|
||||||
enum dmub_asic dmub_asic;
|
enum dmub_asic dmub_asic;
|
||||||
|
@ -1191,24 +1177,6 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
|
|
||||||
if (!adev->dm.dmub_srv) {
|
|
||||||
DRM_ERROR("Failed to allocate DMUB service!\n");
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
memset(&create_params, 0, sizeof(create_params));
|
|
||||||
create_params.user_ctx = adev;
|
|
||||||
create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
|
|
||||||
create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
|
|
||||||
create_params.asic = dmub_asic;
|
|
||||||
|
|
||||||
status = dmub_srv_create(adev->dm.dmub_srv, &create_params);
|
|
||||||
if (status != DMUB_STATUS_OK) {
|
|
||||||
DRM_ERROR("Error creating DMUB service: %d\n", status);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
|
r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("DMUB firmware loading failed: %d\n", r);
|
DRM_ERROR("DMUB firmware loading failed: %d\n", r);
|
||||||
|
@ -1238,6 +1206,80 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
|
||||||
DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
|
DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
|
||||||
adev->dm.dmcub_fw_version);
|
adev->dm.dmcub_fw_version);
|
||||||
|
|
||||||
|
adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
|
||||||
|
dmub_srv = adev->dm.dmub_srv;
|
||||||
|
|
||||||
|
if (!dmub_srv) {
|
||||||
|
DRM_ERROR("Failed to allocate DMUB service!\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
memset(&create_params, 0, sizeof(create_params));
|
||||||
|
create_params.user_ctx = adev;
|
||||||
|
create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
|
||||||
|
create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
|
||||||
|
create_params.asic = dmub_asic;
|
||||||
|
|
||||||
|
/* Create the DMUB service. */
|
||||||
|
status = dmub_srv_create(dmub_srv, &create_params);
|
||||||
|
if (status != DMUB_STATUS_OK) {
|
||||||
|
DRM_ERROR("Error creating DMUB service: %d\n", status);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Calculate the size of all the regions for the DMUB service. */
|
||||||
|
memset(®ion_params, 0, sizeof(region_params));
|
||||||
|
|
||||||
|
region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
|
||||||
|
PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
|
||||||
|
region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
|
||||||
|
region_params.vbios_size = adev->bios_size;
|
||||||
|
region_params.fw_bss_data =
|
||||||
|
adev->dm.dmub_fw->data +
|
||||||
|
le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
|
||||||
|
le32_to_cpu(hdr->inst_const_bytes);
|
||||||
|
|
||||||
|
status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
|
||||||
|
®ion_info);
|
||||||
|
|
||||||
|
if (status != DMUB_STATUS_OK) {
|
||||||
|
DRM_ERROR("Error calculating DMUB region info: %d\n", status);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Allocate a framebuffer based on the total size of all the regions.
|
||||||
|
* TODO: Move this into GART.
|
||||||
|
*/
|
||||||
|
r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
|
||||||
|
AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
|
||||||
|
&adev->dm.dmub_bo_gpu_addr,
|
||||||
|
&adev->dm.dmub_bo_cpu_addr);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
|
/* Rebase the regions on the framebuffer address. */
|
||||||
|
memset(&fb_params, 0, sizeof(fb_params));
|
||||||
|
fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
|
||||||
|
fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
|
||||||
|
fb_params.region_info = ®ion_info;
|
||||||
|
|
||||||
|
adev->dm.dmub_fb_info =
|
||||||
|
kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
|
||||||
|
fb_info = adev->dm.dmub_fb_info;
|
||||||
|
|
||||||
|
if (!fb_info) {
|
||||||
|
DRM_ERROR(
|
||||||
|
"Failed to allocate framebuffer info for DMUB service!\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
|
||||||
|
if (status != DMUB_STATUS_OK) {
|
||||||
|
DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1257,6 +1299,9 @@ static int dm_sw_fini(void *handle)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
|
kfree(adev->dm.dmub_fb_info);
|
||||||
|
adev->dm.dmub_fb_info = NULL;
|
||||||
|
|
||||||
if (adev->dm.dmub_srv) {
|
if (adev->dm.dmub_srv) {
|
||||||
dmub_srv_destroy(adev->dm.dmub_srv);
|
dmub_srv_destroy(adev->dm.dmub_srv);
|
||||||
adev->dm.dmub_srv = NULL;
|
adev->dm.dmub_srv = NULL;
|
||||||
|
@ -1559,7 +1604,7 @@ static int dm_resume(void *handle)
|
||||||
struct dm_plane_state *dm_new_plane_state;
|
struct dm_plane_state *dm_new_plane_state;
|
||||||
struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
|
struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
|
||||||
enum dc_connection_type new_connection_type = dc_connection_none;
|
enum dc_connection_type new_connection_type = dc_connection_none;
|
||||||
int i;
|
int i, r;
|
||||||
|
|
||||||
/* Recreate dc_state - DC invalidates it when setting power state to S3. */
|
/* Recreate dc_state - DC invalidates it when setting power state to S3. */
|
||||||
dc_release_state(dm_state->context);
|
dc_release_state(dm_state->context);
|
||||||
|
@ -1567,6 +1612,11 @@ static int dm_resume(void *handle)
|
||||||
/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
|
/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
|
||||||
dc_resource_state_construct(dm->dc, dm_state->context);
|
dc_resource_state_construct(dm->dc, dm_state->context);
|
||||||
|
|
||||||
|
/* Before powering on DC we need to re-initialize DMUB. */
|
||||||
|
r = dm_dmub_hw_init(adev);
|
||||||
|
if (r)
|
||||||
|
DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
|
||||||
|
|
||||||
/* power on hardware */
|
/* power on hardware */
|
||||||
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
|
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
|
||||||
|
|
||||||
|
@ -3654,27 +3704,21 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
|
||||||
return color_space;
|
return color_space;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
|
static bool adjust_colour_depth_from_display_info(
|
||||||
{
|
struct dc_crtc_timing *timing_out,
|
||||||
if (timing_out->display_color_depth <= COLOR_DEPTH_888)
|
|
||||||
return;
|
|
||||||
|
|
||||||
timing_out->display_color_depth--;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out,
|
|
||||||
const struct drm_display_info *info)
|
const struct drm_display_info *info)
|
||||||
{
|
{
|
||||||
|
enum dc_color_depth depth = timing_out->display_color_depth;
|
||||||
int normalized_clk;
|
int normalized_clk;
|
||||||
if (timing_out->display_color_depth <= COLOR_DEPTH_888)
|
|
||||||
return;
|
|
||||||
do {
|
do {
|
||||||
normalized_clk = timing_out->pix_clk_100hz / 10;
|
normalized_clk = timing_out->pix_clk_100hz / 10;
|
||||||
/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
|
/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
|
||||||
if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
|
if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
|
||||||
normalized_clk /= 2;
|
normalized_clk /= 2;
|
||||||
/* Adjusting pix clock following on HDMI spec based on colour depth */
|
/* Adjusting pix clock following on HDMI spec based on colour depth */
|
||||||
switch (timing_out->display_color_depth) {
|
switch (depth) {
|
||||||
|
case COLOR_DEPTH_888:
|
||||||
|
break;
|
||||||
case COLOR_DEPTH_101010:
|
case COLOR_DEPTH_101010:
|
||||||
normalized_clk = (normalized_clk * 30) / 24;
|
normalized_clk = (normalized_clk * 30) / 24;
|
||||||
break;
|
break;
|
||||||
|
@ -3685,14 +3729,15 @@ static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_
|
||||||
normalized_clk = (normalized_clk * 48) / 24;
|
normalized_clk = (normalized_clk * 48) / 24;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return;
|
/* The above depths are the only ones valid for HDMI. */
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
if (normalized_clk <= info->max_tmds_clock)
|
if (normalized_clk <= info->max_tmds_clock) {
|
||||||
return;
|
timing_out->display_color_depth = depth;
|
||||||
reduce_mode_colour_depth(timing_out);
|
return true;
|
||||||
|
}
|
||||||
} while (timing_out->display_color_depth > COLOR_DEPTH_888);
|
} while (--depth > COLOR_DEPTH_666);
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void fill_stream_properties_from_drm_display_mode(
|
static void fill_stream_properties_from_drm_display_mode(
|
||||||
|
@ -3773,9 +3818,15 @@ static void fill_stream_properties_from_drm_display_mode(
|
||||||
|
|
||||||
stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
|
stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
|
||||||
stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
|
stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
|
||||||
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
|
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
|
||||||
|
if (!adjust_colour_depth_from_display_info(timing_out, info) &&
|
||||||
|
drm_mode_is_420_also(info, mode_in) &&
|
||||||
|
timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
|
||||||
|
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
|
||||||
adjust_colour_depth_from_display_info(timing_out, info);
|
adjust_colour_depth_from_display_info(timing_out, info);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void fill_audio_info(struct audio_info *audio_info,
|
static void fill_audio_info(struct audio_info *audio_info,
|
||||||
const struct drm_connector *drm_connector,
|
const struct drm_connector *drm_connector,
|
||||||
|
@ -4025,7 +4076,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||||
|
|
||||||
if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
|
if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
|
||||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||||
dc_dsc_parse_dsc_dpcd(aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
|
dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
|
||||||
|
aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
|
||||||
aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
|
aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
|
||||||
&dsc_caps);
|
&dsc_caps);
|
||||||
#endif
|
#endif
|
||||||
|
@ -5561,9 +5613,9 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
|
||||||
|
|
||||||
drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
|
drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
|
||||||
|
|
||||||
/* This defaults to the max in the range, but we want 8bpc. */
|
/* This defaults to the max in the range, but we want 8bpc for non-edp. */
|
||||||
aconnector->base.state->max_bpc = 8;
|
aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
|
||||||
aconnector->base.state->max_requested_bpc = 8;
|
aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
|
||||||
|
|
||||||
if (connector_type == DRM_MODE_CONNECTOR_eDP &&
|
if (connector_type == DRM_MODE_CONNECTOR_eDP &&
|
||||||
dc_is_dmcu_initialized(adev->dm.dc)) {
|
dc_is_dmcu_initialized(adev->dm.dc)) {
|
||||||
|
|
|
@ -132,6 +132,13 @@ struct amdgpu_display_manager {
|
||||||
*/
|
*/
|
||||||
struct dmub_srv *dmub_srv;
|
struct dmub_srv *dmub_srv;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @dmub_fb_info:
|
||||||
|
*
|
||||||
|
* Framebuffer regions for the DMUB.
|
||||||
|
*/
|
||||||
|
struct dmub_srv_fb_info *dmub_fb_info;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @dmub_fw:
|
* @dmub_fw:
|
||||||
*
|
*
|
||||||
|
|
|
@ -111,17 +111,12 @@ static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
|
||||||
*/
|
*/
|
||||||
static void dm_irq_work_func(struct work_struct *work)
|
static void dm_irq_work_func(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct list_head *entry;
|
|
||||||
struct irq_list_head *irq_list_head =
|
struct irq_list_head *irq_list_head =
|
||||||
container_of(work, struct irq_list_head, work);
|
container_of(work, struct irq_list_head, work);
|
||||||
struct list_head *handler_list = &irq_list_head->head;
|
struct list_head *handler_list = &irq_list_head->head;
|
||||||
struct amdgpu_dm_irq_handler_data *handler_data;
|
struct amdgpu_dm_irq_handler_data *handler_data;
|
||||||
|
|
||||||
list_for_each(entry, handler_list) {
|
list_for_each_entry(handler_data, handler_list, list) {
|
||||||
handler_data = list_entry(entry,
|
|
||||||
struct amdgpu_dm_irq_handler_data,
|
|
||||||
list);
|
|
||||||
|
|
||||||
DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
|
DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
|
||||||
handler_data->irq_source);
|
handler_data->irq_source);
|
||||||
|
|
||||||
|
@ -528,19 +523,13 @@ static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
|
||||||
enum dc_irq_source irq_source)
|
enum dc_irq_source irq_source)
|
||||||
{
|
{
|
||||||
struct amdgpu_dm_irq_handler_data *handler_data;
|
struct amdgpu_dm_irq_handler_data *handler_data;
|
||||||
struct list_head *entry;
|
|
||||||
unsigned long irq_table_flags;
|
unsigned long irq_table_flags;
|
||||||
|
|
||||||
DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
|
DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
|
||||||
|
|
||||||
list_for_each(
|
list_for_each_entry(handler_data,
|
||||||
entry,
|
&adev->dm.irq_handler_list_high_tab[irq_source],
|
||||||
&adev->dm.irq_handler_list_high_tab[irq_source]) {
|
list) {
|
||||||
|
|
||||||
handler_data = list_entry(entry,
|
|
||||||
struct amdgpu_dm_irq_handler_data,
|
|
||||||
list);
|
|
||||||
|
|
||||||
/* Call a subcomponent which registered for immediate
|
/* Call a subcomponent which registered for immediate
|
||||||
* interrupt notification */
|
* interrupt notification */
|
||||||
handler_data->handler(handler_data->handler_arg);
|
handler_data->handler(handler_data->handler_arg);
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
#
|
#
|
||||||
# Copyright 2017 Advanced Micro Devices, Inc.
|
# Copyright 2017 Advanced Micro Devices, Inc.
|
||||||
|
# Copyright 2019 Raptor Engineering, LLC
|
||||||
#
|
#
|
||||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
# copy of this software and associated documentation files (the "Software"),
|
# copy of this software and associated documentation files (the "Software"),
|
||||||
|
@ -24,7 +25,13 @@
|
||||||
# It calculates Bandwidth and Watermarks values for HW programming
|
# It calculates Bandwidth and Watermarks values for HW programming
|
||||||
#
|
#
|
||||||
|
|
||||||
|
ifdef CONFIG_X86
|
||||||
calcs_ccflags := -mhard-float -msse
|
calcs_ccflags := -mhard-float -msse
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifdef CONFIG_PPC64
|
||||||
|
calcs_ccflags := -mhard-float -maltivec
|
||||||
|
endif
|
||||||
|
|
||||||
ifdef CONFIG_CC_IS_GCC
|
ifdef CONFIG_CC_IS_GCC
|
||||||
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
|
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
|
||||||
|
@ -32,6 +39,7 @@ IS_OLD_GCC = 1
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
ifdef CONFIG_X86
|
||||||
ifdef IS_OLD_GCC
|
ifdef IS_OLD_GCC
|
||||||
# Stack alignment mismatch, proceed with caution.
|
# Stack alignment mismatch, proceed with caution.
|
||||||
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
|
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
|
||||||
|
@ -40,6 +48,7 @@ calcs_ccflags += -mpreferred-stack-boundary=4
|
||||||
else
|
else
|
||||||
calcs_ccflags += -msse2
|
calcs_ccflags += -msse2
|
||||||
endif
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calcs.o := $(calcs_ccflags)
|
CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calcs.o := $(calcs_ccflags)
|
||||||
CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calc_auto.o := $(calcs_ccflags)
|
CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calc_auto.o := $(calcs_ccflags)
|
||||||
|
|
|
@ -154,14 +154,14 @@ static void calculate_bandwidth(
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if (data->d0_underlay_mode == bw_def_none) { d0_underlay_enable = 0; }
|
if (data->d0_underlay_mode == bw_def_none)
|
||||||
else {
|
d0_underlay_enable = false;
|
||||||
d0_underlay_enable = 1;
|
else
|
||||||
}
|
d0_underlay_enable = true;
|
||||||
if (data->d1_underlay_mode == bw_def_none) { d1_underlay_enable = 0; }
|
if (data->d1_underlay_mode == bw_def_none)
|
||||||
else {
|
d1_underlay_enable = false;
|
||||||
d1_underlay_enable = 1;
|
else
|
||||||
}
|
d1_underlay_enable = true;
|
||||||
data->number_of_underlay_surfaces = d0_underlay_enable + d1_underlay_enable;
|
data->number_of_underlay_surfaces = d0_underlay_enable + d1_underlay_enable;
|
||||||
switch (data->underlay_surface_type) {
|
switch (data->underlay_surface_type) {
|
||||||
case bw_def_420:
|
case bw_def_420:
|
||||||
|
@ -286,8 +286,8 @@ static void calculate_bandwidth(
|
||||||
data->cursor_width_pixels[2] = bw_int_to_fixed(0);
|
data->cursor_width_pixels[2] = bw_int_to_fixed(0);
|
||||||
data->cursor_width_pixels[3] = bw_int_to_fixed(0);
|
data->cursor_width_pixels[3] = bw_int_to_fixed(0);
|
||||||
/* graphics surface parameters from spreadsheet*/
|
/* graphics surface parameters from spreadsheet*/
|
||||||
fbc_enabled = 0;
|
fbc_enabled = false;
|
||||||
lpt_enabled = 0;
|
lpt_enabled = false;
|
||||||
for (i = 4; i <= maximum_number_of_surfaces - 3; i++) {
|
for (i = 4; i <= maximum_number_of_surfaces - 3; i++) {
|
||||||
if (i < data->number_of_displays + 4) {
|
if (i < data->number_of_displays + 4) {
|
||||||
if (i == 4 && data->d0_underlay_mode == bw_def_underlay_only) {
|
if (i == 4 && data->d0_underlay_mode == bw_def_underlay_only) {
|
||||||
|
@ -338,9 +338,9 @@ static void calculate_bandwidth(
|
||||||
data->access_one_channel_only[i] = 0;
|
data->access_one_channel_only[i] = 0;
|
||||||
}
|
}
|
||||||
if (data->fbc_en[i] == 1) {
|
if (data->fbc_en[i] == 1) {
|
||||||
fbc_enabled = 1;
|
fbc_enabled = true;
|
||||||
if (data->lpt_en[i] == 1) {
|
if (data->lpt_en[i] == 1) {
|
||||||
lpt_enabled = 1;
|
lpt_enabled = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
data->cursor_width_pixels[i] = bw_int_to_fixed(vbios->cursor_width);
|
data->cursor_width_pixels[i] = bw_int_to_fixed(vbios->cursor_width);
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
/*
|
/*
|
||||||
* Copyright 2017 Advanced Micro Devices, Inc.
|
* Copyright 2017 Advanced Micro Devices, Inc.
|
||||||
|
* Copyright 2019 Raptor Engineering, LLC
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
|
@ -622,7 +623,7 @@ static bool dcn_bw_apply_registry_override(struct dc *dc)
|
||||||
{
|
{
|
||||||
bool updated = false;
|
bool updated = false;
|
||||||
|
|
||||||
kernel_fpu_begin();
|
DC_FP_START();
|
||||||
if ((int)(dc->dcn_soc->sr_exit_time * 1000) != dc->debug.sr_exit_time_ns
|
if ((int)(dc->dcn_soc->sr_exit_time * 1000) != dc->debug.sr_exit_time_ns
|
||||||
&& dc->debug.sr_exit_time_ns) {
|
&& dc->debug.sr_exit_time_ns) {
|
||||||
updated = true;
|
updated = true;
|
||||||
|
@ -658,7 +659,7 @@ static bool dcn_bw_apply_registry_override(struct dc *dc)
|
||||||
dc->dcn_soc->dram_clock_change_latency =
|
dc->dcn_soc->dram_clock_change_latency =
|
||||||
dc->debug.dram_clock_change_latency_ns / 1000.0;
|
dc->debug.dram_clock_change_latency_ns / 1000.0;
|
||||||
}
|
}
|
||||||
kernel_fpu_end();
|
DC_FP_END();
|
||||||
|
|
||||||
return updated;
|
return updated;
|
||||||
}
|
}
|
||||||
|
@ -738,7 +739,7 @@ bool dcn_validate_bandwidth(
|
||||||
dcn_bw_sync_calcs_and_dml(dc);
|
dcn_bw_sync_calcs_and_dml(dc);
|
||||||
|
|
||||||
memset(v, 0, sizeof(*v));
|
memset(v, 0, sizeof(*v));
|
||||||
kernel_fpu_begin();
|
DC_FP_START();
|
||||||
|
|
||||||
v->sr_exit_time = dc->dcn_soc->sr_exit_time;
|
v->sr_exit_time = dc->dcn_soc->sr_exit_time;
|
||||||
v->sr_enter_plus_exit_time = dc->dcn_soc->sr_enter_plus_exit_time;
|
v->sr_enter_plus_exit_time = dc->dcn_soc->sr_enter_plus_exit_time;
|
||||||
|
@ -1271,7 +1272,7 @@ bool dcn_validate_bandwidth(
|
||||||
bw_limit = dc->dcn_soc->percent_disp_bw_limit * v->fabric_and_dram_bandwidth_vmax0p9;
|
bw_limit = dc->dcn_soc->percent_disp_bw_limit * v->fabric_and_dram_bandwidth_vmax0p9;
|
||||||
bw_limit_pass = (v->total_data_read_bandwidth / 1000.0) < bw_limit;
|
bw_limit_pass = (v->total_data_read_bandwidth / 1000.0) < bw_limit;
|
||||||
|
|
||||||
kernel_fpu_end();
|
DC_FP_END();
|
||||||
|
|
||||||
PERFORMANCE_TRACE_END();
|
PERFORMANCE_TRACE_END();
|
||||||
BW_VAL_TRACE_FINISH();
|
BW_VAL_TRACE_FINISH();
|
||||||
|
@ -1439,7 +1440,7 @@ void dcn_bw_update_from_pplib(struct dc *dc)
|
||||||
res = dm_pp_get_clock_levels_by_type_with_voltage(
|
res = dm_pp_get_clock_levels_by_type_with_voltage(
|
||||||
ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks);
|
ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks);
|
||||||
|
|
||||||
kernel_fpu_begin();
|
DC_FP_START();
|
||||||
|
|
||||||
if (res)
|
if (res)
|
||||||
res = verify_clock_values(&fclks);
|
res = verify_clock_values(&fclks);
|
||||||
|
@ -1459,12 +1460,12 @@ void dcn_bw_update_from_pplib(struct dc *dc)
|
||||||
} else
|
} else
|
||||||
BREAK_TO_DEBUGGER();
|
BREAK_TO_DEBUGGER();
|
||||||
|
|
||||||
kernel_fpu_end();
|
DC_FP_END();
|
||||||
|
|
||||||
res = dm_pp_get_clock_levels_by_type_with_voltage(
|
res = dm_pp_get_clock_levels_by_type_with_voltage(
|
||||||
ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks);
|
ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks);
|
||||||
|
|
||||||
kernel_fpu_begin();
|
DC_FP_START();
|
||||||
|
|
||||||
if (res)
|
if (res)
|
||||||
res = verify_clock_values(&dcfclks);
|
res = verify_clock_values(&dcfclks);
|
||||||
|
@ -1477,7 +1478,7 @@ void dcn_bw_update_from_pplib(struct dc *dc)
|
||||||
} else
|
} else
|
||||||
BREAK_TO_DEBUGGER();
|
BREAK_TO_DEBUGGER();
|
||||||
|
|
||||||
kernel_fpu_end();
|
DC_FP_END();
|
||||||
}
|
}
|
||||||
|
|
||||||
void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
|
void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
|
||||||
|
@ -1492,11 +1493,11 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
|
||||||
if (!pp || !pp->set_wm_ranges)
|
if (!pp || !pp->set_wm_ranges)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
kernel_fpu_begin();
|
DC_FP_START();
|
||||||
min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32;
|
min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32;
|
||||||
min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000;
|
min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000;
|
||||||
socclk_khz = dc->dcn_soc->socclk * 1000;
|
socclk_khz = dc->dcn_soc->socclk * 1000;
|
||||||
kernel_fpu_end();
|
DC_FP_END();
|
||||||
|
|
||||||
/* Now notify PPLib/SMU about which Watermarks sets they should select
|
/* Now notify PPLib/SMU about which Watermarks sets they should select
|
||||||
* depending on DPM state they are in. And update BW MGR GFX Engine and
|
* depending on DPM state they are in. And update BW MGR GFX Engine and
|
||||||
|
@ -1547,7 +1548,7 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
|
||||||
|
|
||||||
void dcn_bw_sync_calcs_and_dml(struct dc *dc)
|
void dcn_bw_sync_calcs_and_dml(struct dc *dc)
|
||||||
{
|
{
|
||||||
kernel_fpu_begin();
|
DC_FP_START();
|
||||||
DC_LOG_BANDWIDTH_CALCS("sr_exit_time: %f ns\n"
|
DC_LOG_BANDWIDTH_CALCS("sr_exit_time: %f ns\n"
|
||||||
"sr_enter_plus_exit_time: %f ns\n"
|
"sr_enter_plus_exit_time: %f ns\n"
|
||||||
"urgent_latency: %f ns\n"
|
"urgent_latency: %f ns\n"
|
||||||
|
@ -1736,5 +1737,5 @@ void dcn_bw_sync_calcs_and_dml(struct dc *dc)
|
||||||
dc->dml.ip.bug_forcing_LC_req_same_size_fixed =
|
dc->dml.ip.bug_forcing_LC_req_same_size_fixed =
|
||||||
dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes;
|
dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes;
|
||||||
dc->dml.ip.dcfclk_cstate_latency = dc->dcn_ip->dcfclk_cstate_latency;
|
dc->dml.ip.dcfclk_cstate_latency = dc->dcn_ip->dcfclk_cstate_latency;
|
||||||
kernel_fpu_end();
|
DC_FP_END();
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
#include "clk_mgr_internal.h"
|
#include "clk_mgr_internal.h"
|
||||||
|
|
||||||
#include "dce100/dce_clk_mgr.h"
|
#include "dce100/dce_clk_mgr.h"
|
||||||
|
#include "dcn20_clk_mgr.h"
|
||||||
#include "reg_helper.h"
|
#include "reg_helper.h"
|
||||||
#include "core_types.h"
|
#include "core_types.h"
|
||||||
#include "dm_helpers.h"
|
#include "dm_helpers.h"
|
||||||
|
@ -100,13 +101,13 @@ uint32_t dentist_get_did_from_divider(int divider)
|
||||||
}
|
}
|
||||||
|
|
||||||
void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
|
void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
|
||||||
struct dc_state *context)
|
struct dc_state *context, bool safe_to_lower)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz;
|
clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz;
|
||||||
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
|
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
|
||||||
int dpp_inst, dppclk_khz;
|
int dpp_inst, dppclk_khz, prev_dppclk_khz;
|
||||||
|
|
||||||
/* Loop index will match dpp->inst if resource exists,
|
/* Loop index will match dpp->inst if resource exists,
|
||||||
* and we want to avoid dependency on dpp object
|
* and we want to avoid dependency on dpp object
|
||||||
|
@ -114,10 +115,14 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
|
||||||
dpp_inst = i;
|
dpp_inst = i;
|
||||||
dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
|
dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
|
||||||
|
|
||||||
|
prev_dppclk_khz = clk_mgr->base.ctx->dc->current_state->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
|
||||||
|
|
||||||
|
if (safe_to_lower || prev_dppclk_khz < dppclk_khz) {
|
||||||
clk_mgr->dccg->funcs->update_dpp_dto(
|
clk_mgr->dccg->funcs->update_dpp_dto(
|
||||||
clk_mgr->dccg, dpp_inst, dppclk_khz);
|
clk_mgr->dccg, dpp_inst, dppclk_khz);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr)
|
void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr)
|
||||||
{
|
{
|
||||||
|
@ -161,6 +166,9 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||||
dc->debug.force_clock_mode & 0x1) {
|
dc->debug.force_clock_mode & 0x1) {
|
||||||
//this is from resume or boot up, if forced_clock cfg option used, we bypass program dispclk and DPPCLK, but need set them for S3.
|
//this is from resume or boot up, if forced_clock cfg option used, we bypass program dispclk and DPPCLK, but need set them for S3.
|
||||||
force_reset = true;
|
force_reset = true;
|
||||||
|
|
||||||
|
dcn2_read_clocks_from_hw_dentist(clk_mgr_base);
|
||||||
|
|
||||||
//force_clock_mode 0x1: force reset the clock even it is the same clock as long as it is in Passive level.
|
//force_clock_mode 0x1: force reset the clock even it is the same clock as long as it is in Passive level.
|
||||||
}
|
}
|
||||||
display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
|
display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
|
||||||
|
@ -240,7 +248,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||||
if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
|
if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
|
||||||
if (dpp_clock_lowered) {
|
if (dpp_clock_lowered) {
|
||||||
// if clock is being lowered, increase DTO before lowering refclk
|
// if clock is being lowered, increase DTO before lowering refclk
|
||||||
dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
|
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
|
||||||
dcn20_update_clocks_update_dentist(clk_mgr);
|
dcn20_update_clocks_update_dentist(clk_mgr);
|
||||||
} else {
|
} else {
|
||||||
// if clock is being raised, increase refclk before lowering DTO
|
// if clock is being raised, increase refclk before lowering DTO
|
||||||
|
@ -248,7 +256,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||||
dcn20_update_clocks_update_dentist(clk_mgr);
|
dcn20_update_clocks_update_dentist(clk_mgr);
|
||||||
// always update dtos unless clock is lowered and not safe to lower
|
// always update dtos unless clock is lowered and not safe to lower
|
||||||
if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
|
if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
|
||||||
dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
|
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -339,6 +347,32 @@ void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void dcn2_read_clocks_from_hw_dentist(struct clk_mgr *clk_mgr_base)
|
||||||
|
{
|
||||||
|
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
|
||||||
|
uint32_t dispclk_wdivider;
|
||||||
|
uint32_t dppclk_wdivider;
|
||||||
|
int disp_divider;
|
||||||
|
int dpp_divider;
|
||||||
|
|
||||||
|
REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, &dispclk_wdivider);
|
||||||
|
REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, &dppclk_wdivider);
|
||||||
|
|
||||||
|
disp_divider = dentist_get_divider_from_did(dispclk_wdivider);
|
||||||
|
dpp_divider = dentist_get_divider_from_did(dispclk_wdivider);
|
||||||
|
|
||||||
|
if (disp_divider && dpp_divider) {
|
||||||
|
/* Calculate the current DFS clock, in kHz.*/
|
||||||
|
clk_mgr_base->clks.dispclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
|
||||||
|
* clk_mgr->base.dentist_vco_freq_khz) / disp_divider;
|
||||||
|
|
||||||
|
clk_mgr_base->clks.dppclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
|
||||||
|
* clk_mgr->base.dentist_vco_freq_khz) / dpp_divider;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
void dcn2_get_clock(struct clk_mgr *clk_mgr,
|
void dcn2_get_clock(struct clk_mgr *clk_mgr,
|
||||||
struct dc_state *context,
|
struct dc_state *context,
|
||||||
enum dc_clock_type clock_type,
|
enum dc_clock_type clock_type,
|
||||||
|
|
|
@ -34,7 +34,7 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr,
|
||||||
struct dc_state *context,
|
struct dc_state *context,
|
||||||
bool safe_to_lower);
|
bool safe_to_lower);
|
||||||
void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
|
void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
|
||||||
struct dc_state *context);
|
struct dc_state *context, bool safe_to_lower);
|
||||||
|
|
||||||
void dcn2_init_clocks(struct clk_mgr *clk_mgr);
|
void dcn2_init_clocks(struct clk_mgr *clk_mgr);
|
||||||
|
|
||||||
|
@ -51,4 +51,8 @@ void dcn2_get_clock(struct clk_mgr *clk_mgr,
|
||||||
struct dc_clock_config *clock_cfg);
|
struct dc_clock_config *clock_cfg);
|
||||||
|
|
||||||
void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr);
|
void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr);
|
||||||
|
|
||||||
|
void dcn2_read_clocks_from_hw_dentist(struct clk_mgr *clk_mgr_base);
|
||||||
|
|
||||||
|
|
||||||
#endif //__DCN20_CLK_MGR_H__
|
#endif //__DCN20_CLK_MGR_H__
|
||||||
|
|
|
@ -164,16 +164,16 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dpp_clock_lowered) {
|
if (dpp_clock_lowered) {
|
||||||
// if clock is being lowered, increase DTO before lowering refclk
|
// increase per DPP DTO before lowering global dppclk
|
||||||
dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
|
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
|
||||||
rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
|
rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
|
||||||
} else {
|
} else {
|
||||||
// if clock is being raised, increase refclk before lowering DTO
|
// increase global DPPCLK before lowering per DPP DTO
|
||||||
if (update_dppclk || update_dispclk)
|
if (update_dppclk || update_dispclk)
|
||||||
rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
|
rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
|
||||||
// always update dtos unless clock is lowered and not safe to lower
|
// always update dtos unless clock is lowered and not safe to lower
|
||||||
if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
|
if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
|
||||||
dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
|
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (update_dispclk &&
|
if (update_dispclk &&
|
||||||
|
@ -409,7 +409,7 @@ void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_ra
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
ranges->reader_wm_sets[num_valid_sets].wm_inst = bw_params->wm_table.entries[i].wm_inst;
|
ranges->reader_wm_sets[num_valid_sets].wm_inst = bw_params->wm_table.entries[i].wm_inst;
|
||||||
ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type;;
|
ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type;
|
||||||
/* We will not select WM based on dcfclk, so leave it as unconstrained */
|
/* We will not select WM based on dcfclk, so leave it as unconstrained */
|
||||||
ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
|
ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
|
||||||
ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
|
ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
|
||||||
|
|
|
@ -66,6 +66,9 @@
|
||||||
|
|
||||||
#include "dce/dce_i2c.h"
|
#include "dce/dce_i2c.h"
|
||||||
|
|
||||||
|
#define CTX \
|
||||||
|
dc->ctx
|
||||||
|
|
||||||
#define DC_LOGGER \
|
#define DC_LOGGER \
|
||||||
dc->ctx->logger
|
dc->ctx->logger
|
||||||
|
|
||||||
|
@ -579,6 +582,40 @@ static void dc_destruct(struct dc *dc)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool dc_construct_ctx(struct dc *dc,
|
||||||
|
const struct dc_init_data *init_params)
|
||||||
|
{
|
||||||
|
struct dc_context *dc_ctx;
|
||||||
|
enum dce_version dc_version = DCE_VERSION_UNKNOWN;
|
||||||
|
|
||||||
|
dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
|
||||||
|
if (!dc_ctx)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
dc_ctx->cgs_device = init_params->cgs_device;
|
||||||
|
dc_ctx->driver_context = init_params->driver;
|
||||||
|
dc_ctx->dc = dc;
|
||||||
|
dc_ctx->asic_id = init_params->asic_id;
|
||||||
|
dc_ctx->dc_sink_id_count = 0;
|
||||||
|
dc_ctx->dc_stream_id_count = 0;
|
||||||
|
dc_ctx->dce_environment = init_params->dce_environment;
|
||||||
|
|
||||||
|
/* Create logger */
|
||||||
|
|
||||||
|
dc_version = resource_parse_asic_id(init_params->asic_id);
|
||||||
|
dc_ctx->dce_version = dc_version;
|
||||||
|
|
||||||
|
dc_ctx->perf_trace = dc_perf_trace_create();
|
||||||
|
if (!dc_ctx->perf_trace) {
|
||||||
|
ASSERT_CRITICAL(false);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
dc->ctx = dc_ctx;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
static bool dc_construct(struct dc *dc,
|
static bool dc_construct(struct dc *dc,
|
||||||
const struct dc_init_data *init_params)
|
const struct dc_init_data *init_params)
|
||||||
{
|
{
|
||||||
|
@ -590,7 +627,6 @@ static bool dc_construct(struct dc *dc,
|
||||||
struct dcn_ip_params *dcn_ip;
|
struct dcn_ip_params *dcn_ip;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
enum dce_version dc_version = DCE_VERSION_UNKNOWN;
|
|
||||||
dc->config = init_params->flags;
|
dc->config = init_params->flags;
|
||||||
|
|
||||||
// Allocate memory for the vm_helper
|
// Allocate memory for the vm_helper
|
||||||
|
@ -636,26 +672,12 @@ static bool dc_construct(struct dc *dc,
|
||||||
dc->soc_bounding_box = init_params->soc_bounding_box;
|
dc->soc_bounding_box = init_params->soc_bounding_box;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
|
if (!dc_construct_ctx(dc, init_params)) {
|
||||||
if (!dc_ctx) {
|
|
||||||
dm_error("%s: failed to create ctx\n", __func__);
|
dm_error("%s: failed to create ctx\n", __func__);
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
dc_ctx->cgs_device = init_params->cgs_device;
|
dc_ctx = dc->ctx;
|
||||||
dc_ctx->driver_context = init_params->driver;
|
|
||||||
dc_ctx->dc = dc;
|
|
||||||
dc_ctx->asic_id = init_params->asic_id;
|
|
||||||
dc_ctx->dc_sink_id_count = 0;
|
|
||||||
dc_ctx->dc_stream_id_count = 0;
|
|
||||||
dc->ctx = dc_ctx;
|
|
||||||
|
|
||||||
/* Create logger */
|
|
||||||
|
|
||||||
dc_ctx->dce_environment = init_params->dce_environment;
|
|
||||||
|
|
||||||
dc_version = resource_parse_asic_id(init_params->asic_id);
|
|
||||||
dc_ctx->dce_version = dc_version;
|
|
||||||
|
|
||||||
/* Resource should construct all asic specific resources.
|
/* Resource should construct all asic specific resources.
|
||||||
* This should be the only place where we need to parse the asic id
|
* This should be the only place where we need to parse the asic id
|
||||||
|
@ -670,7 +692,7 @@ static bool dc_construct(struct dc *dc,
|
||||||
bp_init_data.bios = init_params->asic_id.atombios_base_address;
|
bp_init_data.bios = init_params->asic_id.atombios_base_address;
|
||||||
|
|
||||||
dc_ctx->dc_bios = dal_bios_parser_create(
|
dc_ctx->dc_bios = dal_bios_parser_create(
|
||||||
&bp_init_data, dc_version);
|
&bp_init_data, dc_ctx->dce_version);
|
||||||
|
|
||||||
if (!dc_ctx->dc_bios) {
|
if (!dc_ctx->dc_bios) {
|
||||||
ASSERT_CRITICAL(false);
|
ASSERT_CRITICAL(false);
|
||||||
|
@ -680,15 +702,11 @@ static bool dc_construct(struct dc *dc,
|
||||||
dc_ctx->created_bios = true;
|
dc_ctx->created_bios = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
dc_ctx->perf_trace = dc_perf_trace_create();
|
|
||||||
if (!dc_ctx->perf_trace) {
|
|
||||||
ASSERT_CRITICAL(false);
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Create GPIO service */
|
/* Create GPIO service */
|
||||||
dc_ctx->gpio_service = dal_gpio_service_create(
|
dc_ctx->gpio_service = dal_gpio_service_create(
|
||||||
dc_version,
|
dc_ctx->dce_version,
|
||||||
dc_ctx->dce_environment,
|
dc_ctx->dce_environment,
|
||||||
dc_ctx);
|
dc_ctx);
|
||||||
|
|
||||||
|
@ -697,7 +715,7 @@ static bool dc_construct(struct dc *dc,
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
dc->res_pool = dc_create_resource_pool(dc, init_params, dc_version);
|
dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
|
||||||
if (!dc->res_pool)
|
if (!dc->res_pool)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
|
@ -728,8 +746,6 @@ static bool dc_construct(struct dc *dc,
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
|
|
||||||
dc_destruct(dc);
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -783,6 +799,33 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
|
||||||
dc_release_state(current_ctx);
|
dc_release_state(current_ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
int count = 0;
|
||||||
|
struct pipe_ctx *pipe;
|
||||||
|
PERF_TRACE();
|
||||||
|
for (i = 0; i < MAX_PIPES; i++) {
|
||||||
|
pipe = &context->res_ctx.pipe_ctx[i];
|
||||||
|
|
||||||
|
if (!pipe->plane_state)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/* Timeout 100 ms */
|
||||||
|
while (count < 100000) {
|
||||||
|
/* Must set to false to start with, due to OR in update function */
|
||||||
|
pipe->plane_state->status.is_flip_pending = false;
|
||||||
|
dc->hwss.update_pending_status(pipe);
|
||||||
|
if (!pipe->plane_state->status.is_flip_pending)
|
||||||
|
break;
|
||||||
|
udelay(1);
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
ASSERT(!pipe->plane_state->status.is_flip_pending);
|
||||||
|
}
|
||||||
|
PERF_TRACE();
|
||||||
|
}
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
* Public functions
|
* Public functions
|
||||||
******************************************************************************/
|
******************************************************************************/
|
||||||
|
@ -795,8 +838,16 @@ struct dc *dc_create(const struct dc_init_data *init_params)
|
||||||
if (NULL == dc)
|
if (NULL == dc)
|
||||||
goto alloc_fail;
|
goto alloc_fail;
|
||||||
|
|
||||||
if (false == dc_construct(dc, init_params))
|
if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
|
||||||
|
if (false == dc_construct_ctx(dc, init_params)) {
|
||||||
|
dc_destruct(dc);
|
||||||
goto construct_fail;
|
goto construct_fail;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (false == dc_construct(dc, init_params)) {
|
||||||
|
dc_destruct(dc);
|
||||||
|
goto construct_fail;
|
||||||
|
}
|
||||||
|
|
||||||
full_pipe_count = dc->res_pool->pipe_count;
|
full_pipe_count = dc->res_pool->pipe_count;
|
||||||
if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
|
if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
|
||||||
|
@ -805,17 +856,19 @@ struct dc *dc_create(const struct dc_init_data *init_params)
|
||||||
full_pipe_count,
|
full_pipe_count,
|
||||||
dc->res_pool->stream_enc_count);
|
dc->res_pool->stream_enc_count);
|
||||||
|
|
||||||
|
dc->optimize_seamless_boot_streams = 0;
|
||||||
dc->caps.max_links = dc->link_count;
|
dc->caps.max_links = dc->link_count;
|
||||||
dc->caps.max_audios = dc->res_pool->audio_count;
|
dc->caps.max_audios = dc->res_pool->audio_count;
|
||||||
dc->caps.linear_pitch_alignment = 64;
|
dc->caps.linear_pitch_alignment = 64;
|
||||||
|
|
||||||
dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
|
dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
|
||||||
|
|
||||||
/* Populate versioning information */
|
|
||||||
dc->versions.dc_ver = DC_VER;
|
|
||||||
|
|
||||||
if (dc->res_pool->dmcu != NULL)
|
if (dc->res_pool->dmcu != NULL)
|
||||||
dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
|
dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Populate versioning information */
|
||||||
|
dc->versions.dc_ver = DC_VER;
|
||||||
|
|
||||||
dc->build_id = DC_BUILD_ID;
|
dc->build_id = DC_BUILD_ID;
|
||||||
|
|
||||||
|
@ -834,6 +887,7 @@ alloc_fail:
|
||||||
|
|
||||||
void dc_hardware_init(struct dc *dc)
|
void dc_hardware_init(struct dc *dc)
|
||||||
{
|
{
|
||||||
|
if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
|
||||||
dc->hwss.init_hw(dc);
|
dc->hwss.init_hw(dc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1148,10 +1202,10 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
|
||||||
|
|
||||||
for (i = 0; i < context->stream_count; i++) {
|
for (i = 0; i < context->stream_count; i++) {
|
||||||
if (context->streams[i]->apply_seamless_boot_optimization)
|
if (context->streams[i]->apply_seamless_boot_optimization)
|
||||||
dc->optimize_seamless_boot = true;
|
dc->optimize_seamless_boot_streams++;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!dc->optimize_seamless_boot)
|
if (dc->optimize_seamless_boot_streams == 0)
|
||||||
dc->hwss.prepare_bandwidth(dc, context);
|
dc->hwss.prepare_bandwidth(dc, context);
|
||||||
|
|
||||||
/* re-program planes for existing stream, in case we need to
|
/* re-program planes for existing stream, in case we need to
|
||||||
|
@ -1224,9 +1278,12 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
|
||||||
|
|
||||||
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
|
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
|
||||||
|
|
||||||
if (!dc->optimize_seamless_boot)
|
if (dc->optimize_seamless_boot_streams == 0) {
|
||||||
|
/* Must wait for no flips to be pending before doing optimize bw */
|
||||||
|
wait_for_no_pipes_pending(dc, context);
|
||||||
/* pplib is notified if disp_num changed */
|
/* pplib is notified if disp_num changed */
|
||||||
dc->hwss.optimize_bandwidth(dc, context);
|
dc->hwss.optimize_bandwidth(dc, context);
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < context->stream_count; i++)
|
for (i = 0; i < context->stream_count; i++)
|
||||||
context->streams[i]->mode_changed = false;
|
context->streams[i]->mode_changed = false;
|
||||||
|
@ -1267,7 +1324,7 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
|
||||||
int i;
|
int i;
|
||||||
struct dc_state *context = dc->current_state;
|
struct dc_state *context = dc->current_state;
|
||||||
|
|
||||||
if (!dc->optimized_required || dc->optimize_seamless_boot)
|
if (!dc->optimized_required || dc->optimize_seamless_boot_streams > 0)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
post_surface_trace(dc);
|
post_surface_trace(dc);
|
||||||
|
@ -1543,7 +1600,7 @@ static enum surface_update_type get_scaling_info_update_type(
|
||||||
|
|
||||||
update_flags->bits.scaling_change = 1;
|
update_flags->bits.scaling_change = 1;
|
||||||
if (u->scaling_info->src_rect.width > u->surface->src_rect.width
|
if (u->scaling_info->src_rect.width > u->surface->src_rect.width
|
||||||
&& u->scaling_info->src_rect.height > u->surface->src_rect.height)
|
|| u->scaling_info->src_rect.height > u->surface->src_rect.height)
|
||||||
/* Making src rect bigger requires a bandwidth change */
|
/* Making src rect bigger requires a bandwidth change */
|
||||||
update_flags->bits.clock_change = 1;
|
update_flags->bits.clock_change = 1;
|
||||||
}
|
}
|
||||||
|
@ -1557,11 +1614,11 @@ static enum surface_update_type get_scaling_info_update_type(
|
||||||
update_flags->bits.position_change = 1;
|
update_flags->bits.position_change = 1;
|
||||||
|
|
||||||
if (update_flags->bits.clock_change
|
if (update_flags->bits.clock_change
|
||||||
|| update_flags->bits.bandwidth_change)
|
|| update_flags->bits.bandwidth_change
|
||||||
|
|| update_flags->bits.scaling_change)
|
||||||
return UPDATE_TYPE_FULL;
|
return UPDATE_TYPE_FULL;
|
||||||
|
|
||||||
if (update_flags->bits.scaling_change
|
if (update_flags->bits.position_change)
|
||||||
|| update_flags->bits.position_change)
|
|
||||||
return UPDATE_TYPE_MED;
|
return UPDATE_TYPE_MED;
|
||||||
|
|
||||||
return UPDATE_TYPE_FAST;
|
return UPDATE_TYPE_FAST;
|
||||||
|
@ -2051,7 +2108,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
|
||||||
|
|
||||||
dc->hwss.optimize_bandwidth(dc, dc->current_state);
|
dc->hwss.optimize_bandwidth(dc, dc->current_state);
|
||||||
} else {
|
} else {
|
||||||
if (!dc->optimize_seamless_boot)
|
if (dc->optimize_seamless_boot_streams == 0)
|
||||||
dc->hwss.prepare_bandwidth(dc, dc->current_state);
|
dc->hwss.prepare_bandwidth(dc, dc->current_state);
|
||||||
|
|
||||||
core_link_enable_stream(dc->current_state, pipe_ctx);
|
core_link_enable_stream(dc->current_state, pipe_ctx);
|
||||||
|
@ -2092,7 +2149,7 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||||
int i, j;
|
int i, j;
|
||||||
struct pipe_ctx *top_pipe_to_program = NULL;
|
struct pipe_ctx *top_pipe_to_program = NULL;
|
||||||
|
|
||||||
if (dc->optimize_seamless_boot && surface_count > 0) {
|
if (dc->optimize_seamless_boot_streams > 0 && surface_count > 0) {
|
||||||
/* Optimize seamless boot flag keeps clocks and watermarks high until
|
/* Optimize seamless boot flag keeps clocks and watermarks high until
|
||||||
* first flip. After first flip, optimization is required to lower
|
* first flip. After first flip, optimization is required to lower
|
||||||
* bandwidth. Important to note that it is expected UEFI will
|
* bandwidth. Important to note that it is expected UEFI will
|
||||||
|
@ -2101,12 +2158,14 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||||
*/
|
*/
|
||||||
if (stream->apply_seamless_boot_optimization) {
|
if (stream->apply_seamless_boot_optimization) {
|
||||||
stream->apply_seamless_boot_optimization = false;
|
stream->apply_seamless_boot_optimization = false;
|
||||||
dc->optimize_seamless_boot = false;
|
dc->optimize_seamless_boot_streams--;
|
||||||
|
|
||||||
|
if (dc->optimize_seamless_boot_streams == 0)
|
||||||
dc->optimized_required = true;
|
dc->optimized_required = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (update_type == UPDATE_TYPE_FULL && !dc->optimize_seamless_boot) {
|
if (update_type == UPDATE_TYPE_FULL && dc->optimize_seamless_boot_streams == 0) {
|
||||||
dc->hwss.prepare_bandwidth(dc, context);
|
dc->hwss.prepare_bandwidth(dc, context);
|
||||||
context_clock_trace(dc, context);
|
context_clock_trace(dc, context);
|
||||||
}
|
}
|
||||||
|
|
|
@ -590,7 +590,7 @@ bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
|
||||||
struct aux_payload *payload)
|
struct aux_payload *payload)
|
||||||
{
|
{
|
||||||
uint32_t retrieved = 0;
|
uint32_t retrieved = 0;
|
||||||
bool ret = 0;
|
bool ret = false;
|
||||||
|
|
||||||
if (!ddc)
|
if (!ddc)
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -2854,8 +2854,10 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
|
||||||
/* For now we only handle 'Downstream port status' case.
|
/* For now we only handle 'Downstream port status' case.
|
||||||
* If we got sink count changed it means
|
* If we got sink count changed it means
|
||||||
* Downstream port status changed,
|
* Downstream port status changed,
|
||||||
* then DM should call DC to do the detection. */
|
* then DM should call DC to do the detection.
|
||||||
if (hpd_rx_irq_check_link_loss_status(
|
* NOTE: Do not handle link loss on eDP since it is internal link*/
|
||||||
|
if ((link->connector_signal != SIGNAL_TYPE_EDP) &&
|
||||||
|
hpd_rx_irq_check_link_loss_status(
|
||||||
link,
|
link,
|
||||||
&hpd_irq_dpcd_data)) {
|
&hpd_irq_dpcd_data)) {
|
||||||
/* Connectivity log: link loss */
|
/* Connectivity log: link loss */
|
||||||
|
|
|
@ -173,15 +173,20 @@ bool edp_receiver_ready_T9(struct dc_link *link)
|
||||||
}
|
}
|
||||||
bool edp_receiver_ready_T7(struct dc_link *link)
|
bool edp_receiver_ready_T7(struct dc_link *link)
|
||||||
{
|
{
|
||||||
unsigned int tries = 0;
|
|
||||||
unsigned char sinkstatus = 0;
|
unsigned char sinkstatus = 0;
|
||||||
unsigned char edpRev = 0;
|
unsigned char edpRev = 0;
|
||||||
enum dc_status result = DC_OK;
|
enum dc_status result = DC_OK;
|
||||||
|
|
||||||
|
/* use absolute time stamp to constrain max T7*/
|
||||||
|
unsigned long long enter_timestamp = 0;
|
||||||
|
unsigned long long finish_timestamp = 0;
|
||||||
|
unsigned long long time_taken_in_ns = 0;
|
||||||
|
|
||||||
result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
|
result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
|
||||||
if (result == DC_OK && edpRev < DP_EDP_12)
|
if (result == DC_OK && edpRev < DP_EDP_12)
|
||||||
return true;
|
return true;
|
||||||
/* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
|
/* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
|
||||||
|
enter_timestamp = dm_get_timestamp(link->ctx);
|
||||||
do {
|
do {
|
||||||
sinkstatus = 0;
|
sinkstatus = 0;
|
||||||
result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
|
result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
|
||||||
|
@ -189,8 +194,10 @@ bool edp_receiver_ready_T7(struct dc_link *link)
|
||||||
break;
|
break;
|
||||||
if (result != DC_OK)
|
if (result != DC_OK)
|
||||||
break;
|
break;
|
||||||
udelay(25); //MAx T7 is 50ms
|
udelay(25);
|
||||||
} while (++tries < 300);
|
finish_timestamp = dm_get_timestamp(link->ctx);
|
||||||
|
time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp);
|
||||||
|
} while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms
|
||||||
|
|
||||||
if (link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0)
|
if (link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0)
|
||||||
udelay(link->local_sink->edid_caps.panel_patch.extra_t7_ms * 1000);
|
udelay(link->local_sink->edid_caps.panel_patch.extra_t7_ms * 1000);
|
||||||
|
|
|
@ -940,30 +940,43 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void calculate_integer_scaling(struct pipe_ctx *pipe_ctx)
|
/*
|
||||||
|
* When handling 270 rotation in mixed SLS mode, we have
|
||||||
|
* stream->timing.h_border_left that is non zero. If we are doing
|
||||||
|
* pipe-splitting, this h_border_left value gets added to recout.x and when it
|
||||||
|
* calls calculate_inits_and_adj_vp() and
|
||||||
|
* adjust_vp_and_init_for_seamless_clip(), it can cause viewport.height for a
|
||||||
|
* pipe to be incorrect.
|
||||||
|
*
|
||||||
|
* To fix this, instead of using stream->timing.h_border_left, we can use
|
||||||
|
* stream->dst.x to represent the border instead. So we will set h_border_left
|
||||||
|
* to 0 and shift the appropriate amount in stream->dst.x. We will then
|
||||||
|
* perform all calculations in resource_build_scaling_params() based on this
|
||||||
|
* and then restore the h_border_left and stream->dst.x to their original
|
||||||
|
* values.
|
||||||
|
*
|
||||||
|
* shift_border_left_to_dst() will shift the amount of h_border_left to
|
||||||
|
* stream->dst.x and set h_border_left to 0. restore_border_left_from_dst()
|
||||||
|
* will restore h_border_left and stream->dst.x back to their original values
|
||||||
|
* We also need to make sure pipe_ctx->plane_res.scl_data.h_active uses the
|
||||||
|
* original h_border_left value in its calculation.
|
||||||
|
*/
|
||||||
|
int shift_border_left_to_dst(struct pipe_ctx *pipe_ctx)
|
||||||
{
|
{
|
||||||
unsigned int integer_multiple = 1;
|
int store_h_border_left = pipe_ctx->stream->timing.h_border_left;
|
||||||
|
|
||||||
if (pipe_ctx->plane_state->scaling_quality.integer_scaling) {
|
if (store_h_border_left) {
|
||||||
// calculate maximum # of replication of src onto addressable
|
pipe_ctx->stream->timing.h_border_left = 0;
|
||||||
integer_multiple = min(
|
pipe_ctx->stream->dst.x += store_h_border_left;
|
||||||
pipe_ctx->stream->timing.h_addressable / pipe_ctx->stream->src.width,
|
|
||||||
pipe_ctx->stream->timing.v_addressable / pipe_ctx->stream->src.height);
|
|
||||||
|
|
||||||
//scale dst
|
|
||||||
pipe_ctx->stream->dst.width = integer_multiple * pipe_ctx->stream->src.width;
|
|
||||||
pipe_ctx->stream->dst.height = integer_multiple * pipe_ctx->stream->src.height;
|
|
||||||
|
|
||||||
//center dst onto addressable
|
|
||||||
pipe_ctx->stream->dst.x = (pipe_ctx->stream->timing.h_addressable - pipe_ctx->stream->dst.width)/2;
|
|
||||||
pipe_ctx->stream->dst.y = (pipe_ctx->stream->timing.v_addressable - pipe_ctx->stream->dst.height)/2;
|
|
||||||
|
|
||||||
//We are guaranteed that we are scaling in integer ratio
|
|
||||||
pipe_ctx->plane_state->scaling_quality.v_taps = 1;
|
|
||||||
pipe_ctx->plane_state->scaling_quality.h_taps = 1;
|
|
||||||
pipe_ctx->plane_state->scaling_quality.v_taps_c = 1;
|
|
||||||
pipe_ctx->plane_state->scaling_quality.h_taps_c = 1;
|
|
||||||
}
|
}
|
||||||
|
return store_h_border_left;
|
||||||
|
}
|
||||||
|
|
||||||
|
void restore_border_left_from_dst(struct pipe_ctx *pipe_ctx,
|
||||||
|
int store_h_border_left)
|
||||||
|
{
|
||||||
|
pipe_ctx->stream->dst.x -= store_h_border_left;
|
||||||
|
pipe_ctx->stream->timing.h_border_left = store_h_border_left;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
|
bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
|
||||||
|
@ -971,6 +984,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
|
||||||
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
|
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
|
||||||
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
|
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
|
||||||
bool res = false;
|
bool res = false;
|
||||||
|
int store_h_border_left = shift_border_left_to_dst(pipe_ctx);
|
||||||
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
|
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
|
||||||
/* Important: scaling ratio calculation requires pixel format,
|
/* Important: scaling ratio calculation requires pixel format,
|
||||||
* lb depth calculation requires recout and taps require scaling ratios.
|
* lb depth calculation requires recout and taps require scaling ratios.
|
||||||
|
@ -979,14 +993,18 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
|
||||||
pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
|
pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
|
||||||
pipe_ctx->plane_state->format);
|
pipe_ctx->plane_state->format);
|
||||||
|
|
||||||
calculate_integer_scaling(pipe_ctx);
|
|
||||||
|
|
||||||
calculate_scaling_ratios(pipe_ctx);
|
calculate_scaling_ratios(pipe_ctx);
|
||||||
|
|
||||||
calculate_viewport(pipe_ctx);
|
calculate_viewport(pipe_ctx);
|
||||||
|
|
||||||
if (pipe_ctx->plane_res.scl_data.viewport.height < 16 || pipe_ctx->plane_res.scl_data.viewport.width < 16)
|
if (pipe_ctx->plane_res.scl_data.viewport.height < 16 ||
|
||||||
|
pipe_ctx->plane_res.scl_data.viewport.width < 16) {
|
||||||
|
if (store_h_border_left) {
|
||||||
|
restore_border_left_from_dst(pipe_ctx,
|
||||||
|
store_h_border_left);
|
||||||
|
}
|
||||||
return false;
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
calculate_recout(pipe_ctx);
|
calculate_recout(pipe_ctx);
|
||||||
|
|
||||||
|
@ -999,8 +1017,10 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
|
||||||
pipe_ctx->plane_res.scl_data.recout.x += timing->h_border_left;
|
pipe_ctx->plane_res.scl_data.recout.x += timing->h_border_left;
|
||||||
pipe_ctx->plane_res.scl_data.recout.y += timing->v_border_top;
|
pipe_ctx->plane_res.scl_data.recout.y += timing->v_border_top;
|
||||||
|
|
||||||
pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right;
|
pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable +
|
||||||
pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
|
store_h_border_left + timing->h_border_right;
|
||||||
|
pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable +
|
||||||
|
timing->v_border_top + timing->v_border_bottom;
|
||||||
|
|
||||||
/* Taps calculations */
|
/* Taps calculations */
|
||||||
if (pipe_ctx->plane_res.xfm != NULL)
|
if (pipe_ctx->plane_res.xfm != NULL)
|
||||||
|
@ -1047,6 +1067,9 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
|
||||||
plane_state->dst_rect.x,
|
plane_state->dst_rect.x,
|
||||||
plane_state->dst_rect.y);
|
plane_state->dst_rect.y);
|
||||||
|
|
||||||
|
if (store_h_border_left)
|
||||||
|
restore_border_left_from_dst(pipe_ctx, store_h_border_left);
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1894,8 +1917,26 @@ static int acquire_resource_from_hw_enabled_state(
|
||||||
pipe_ctx->plane_res.dpp = pool->dpps[tg_inst];
|
pipe_ctx->plane_res.dpp = pool->dpps[tg_inst];
|
||||||
pipe_ctx->stream_res.opp = pool->opps[tg_inst];
|
pipe_ctx->stream_res.opp = pool->opps[tg_inst];
|
||||||
|
|
||||||
if (pool->dpps[tg_inst])
|
if (pool->dpps[tg_inst]) {
|
||||||
pipe_ctx->plane_res.mpcc_inst = pool->dpps[tg_inst]->inst;
|
pipe_ctx->plane_res.mpcc_inst = pool->dpps[tg_inst]->inst;
|
||||||
|
|
||||||
|
// Read DPP->MPCC->OPP Pipe from HW State
|
||||||
|
if (pool->mpc->funcs->read_mpcc_state) {
|
||||||
|
struct mpcc_state s = {0};
|
||||||
|
|
||||||
|
pool->mpc->funcs->read_mpcc_state(pool->mpc, pipe_ctx->plane_res.mpcc_inst, &s);
|
||||||
|
|
||||||
|
if (s.dpp_id < MAX_MPCC)
|
||||||
|
pool->mpc->mpcc_array[pipe_ctx->plane_res.mpcc_inst].dpp_id = s.dpp_id;
|
||||||
|
|
||||||
|
if (s.bot_mpcc_id < MAX_MPCC)
|
||||||
|
pool->mpc->mpcc_array[pipe_ctx->plane_res.mpcc_inst].mpcc_bot =
|
||||||
|
&pool->mpc->mpcc_array[s.bot_mpcc_id];
|
||||||
|
|
||||||
|
if (s.opp_id < MAX_OPP)
|
||||||
|
pipe_ctx->stream_res.opp->mpc_tree_params.opp_id = s.opp_id;
|
||||||
|
}
|
||||||
|
}
|
||||||
pipe_ctx->pipe_idx = tg_inst;
|
pipe_ctx->pipe_idx = tg_inst;
|
||||||
|
|
||||||
pipe_ctx->stream = stream;
|
pipe_ctx->stream = stream;
|
||||||
|
@ -2281,7 +2322,7 @@ static void set_avi_info_frame(
|
||||||
if (color_space == COLOR_SPACE_SRGB ||
|
if (color_space == COLOR_SPACE_SRGB ||
|
||||||
color_space == COLOR_SPACE_2020_RGB_FULLRANGE) {
|
color_space == COLOR_SPACE_2020_RGB_FULLRANGE) {
|
||||||
hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_FULL_RANGE;
|
hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_FULL_RANGE;
|
||||||
hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_FULL_RANGE;
|
hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
|
||||||
} else if (color_space == COLOR_SPACE_SRGB_LIMITED ||
|
} else if (color_space == COLOR_SPACE_SRGB_LIMITED ||
|
||||||
color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) {
|
color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) {
|
||||||
hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_LIMITED_RANGE;
|
hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_LIMITED_RANGE;
|
||||||
|
@ -2811,3 +2852,51 @@ unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format)
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
static unsigned int get_max_audio_sample_rate(struct audio_mode *modes)
|
||||||
|
{
|
||||||
|
if (modes) {
|
||||||
|
if (modes->sample_rates.rate.RATE_192)
|
||||||
|
return 192000;
|
||||||
|
if (modes->sample_rates.rate.RATE_176_4)
|
||||||
|
return 176400;
|
||||||
|
if (modes->sample_rates.rate.RATE_96)
|
||||||
|
return 96000;
|
||||||
|
if (modes->sample_rates.rate.RATE_88_2)
|
||||||
|
return 88200;
|
||||||
|
if (modes->sample_rates.rate.RATE_48)
|
||||||
|
return 48000;
|
||||||
|
if (modes->sample_rates.rate.RATE_44_1)
|
||||||
|
return 44100;
|
||||||
|
if (modes->sample_rates.rate.RATE_32)
|
||||||
|
return 32000;
|
||||||
|
}
|
||||||
|
/*original logic when no audio info*/
|
||||||
|
return 441000;
|
||||||
|
}
|
||||||
|
|
||||||
|
void get_audio_check(struct audio_info *aud_modes,
|
||||||
|
struct audio_check *audio_chk)
|
||||||
|
{
|
||||||
|
unsigned int i;
|
||||||
|
unsigned int max_sample_rate = 0;
|
||||||
|
|
||||||
|
if (aud_modes) {
|
||||||
|
audio_chk->audio_packet_type = 0x2;/*audio sample packet AP = .25 for layout0, 1 for layout1*/
|
||||||
|
|
||||||
|
audio_chk->max_audiosample_rate = 0;
|
||||||
|
for (i = 0; i < aud_modes->mode_count; i++) {
|
||||||
|
max_sample_rate = get_max_audio_sample_rate(&aud_modes->modes[i]);
|
||||||
|
if (audio_chk->max_audiosample_rate < max_sample_rate)
|
||||||
|
audio_chk->max_audiosample_rate = max_sample_rate;
|
||||||
|
/*dts takes the same as type 2: AP = 0.25*/
|
||||||
|
}
|
||||||
|
/*check which one take more bandwidth*/
|
||||||
|
if (audio_chk->max_audiosample_rate > 192000)
|
||||||
|
audio_chk->audio_packet_type = 0x9;/*AP =1*/
|
||||||
|
audio_chk->acat = 0;/*not support*/
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -406,6 +406,12 @@ bool dc_stream_add_writeback(struct dc *dc,
|
||||||
stream->writeback_info[stream->num_wb_info++] = *wb_info;
|
stream->writeback_info[stream->num_wb_info++] = *wb_info;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (dc->hwss.enable_writeback) {
|
||||||
|
struct dc_stream_status *stream_status = dc_stream_get_status(stream);
|
||||||
|
struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
|
||||||
|
dwb->otg_inst = stream_status->primary_otg_inst;
|
||||||
|
}
|
||||||
|
if (IS_DIAG_DC(dc->ctx->dce_environment)) {
|
||||||
if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
|
if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
|
||||||
dm_error("DC: update_bandwidth failed!\n");
|
dm_error("DC: update_bandwidth failed!\n");
|
||||||
return false;
|
return false;
|
||||||
|
@ -413,18 +419,17 @@ bool dc_stream_add_writeback(struct dc *dc,
|
||||||
|
|
||||||
/* enable writeback */
|
/* enable writeback */
|
||||||
if (dc->hwss.enable_writeback) {
|
if (dc->hwss.enable_writeback) {
|
||||||
struct dc_stream_status *stream_status = dc_stream_get_status(stream);
|
|
||||||
struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
|
struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
|
||||||
|
|
||||||
if (dwb->funcs->is_enabled(dwb)) {
|
if (dwb->funcs->is_enabled(dwb)) {
|
||||||
/* writeback pipe already enabled, only need to update */
|
/* writeback pipe already enabled, only need to update */
|
||||||
dc->hwss.update_writeback(dc, stream_status, wb_info, dc->current_state);
|
dc->hwss.update_writeback(dc, wb_info, dc->current_state);
|
||||||
} else {
|
} else {
|
||||||
/* Enable writeback pipe from scratch*/
|
/* Enable writeback pipe from scratch*/
|
||||||
dc->hwss.enable_writeback(dc, stream_status, wb_info, dc->current_state);
|
dc->hwss.enable_writeback(dc, wb_info, dc->current_state);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -463,6 +468,7 @@ bool dc_stream_remove_writeback(struct dc *dc,
|
||||||
}
|
}
|
||||||
stream->num_wb_info = j;
|
stream->num_wb_info = j;
|
||||||
|
|
||||||
|
if (IS_DIAG_DC(dc->ctx->dce_environment)) {
|
||||||
/* recalculate and apply DML parameters */
|
/* recalculate and apply DML parameters */
|
||||||
if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
|
if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
|
||||||
dm_error("DC: update_bandwidth failed!\n");
|
dm_error("DC: update_bandwidth failed!\n");
|
||||||
|
@ -472,10 +478,19 @@ bool dc_stream_remove_writeback(struct dc *dc,
|
||||||
/* disable writeback */
|
/* disable writeback */
|
||||||
if (dc->hwss.disable_writeback)
|
if (dc->hwss.disable_writeback)
|
||||||
dc->hwss.disable_writeback(dc, dwb_pipe_inst);
|
dc->hwss.disable_writeback(dc, dwb_pipe_inst);
|
||||||
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool dc_stream_warmup_writeback(struct dc *dc,
|
||||||
|
int num_dwb,
|
||||||
|
struct dc_writeback_info *wb_info)
|
||||||
|
{
|
||||||
|
if (dc->hwss.mmhubbub_warmup)
|
||||||
|
return dc->hwss.mmhubbub_warmup(dc, num_dwb, wb_info);
|
||||||
|
else
|
||||||
|
return false;
|
||||||
|
}
|
||||||
uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
|
uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
|
||||||
{
|
{
|
||||||
uint8_t i;
|
uint8_t i;
|
||||||
|
|
|
@ -39,7 +39,7 @@
|
||||||
#include "inc/hw/dmcu.h"
|
#include "inc/hw/dmcu.h"
|
||||||
#include "dml/display_mode_lib.h"
|
#include "dml/display_mode_lib.h"
|
||||||
|
|
||||||
#define DC_VER "3.2.62"
|
#define DC_VER "3.2.64"
|
||||||
|
|
||||||
#define MAX_SURFACES 3
|
#define MAX_SURFACES 3
|
||||||
#define MAX_PLANES 6
|
#define MAX_PLANES 6
|
||||||
|
@ -367,6 +367,7 @@ struct dc_debug_options {
|
||||||
bool disable_hubp_power_gate;
|
bool disable_hubp_power_gate;
|
||||||
bool disable_dsc_power_gate;
|
bool disable_dsc_power_gate;
|
||||||
int dsc_min_slice_height_override;
|
int dsc_min_slice_height_override;
|
||||||
|
int dsc_bpp_increment_div;
|
||||||
bool native422_support;
|
bool native422_support;
|
||||||
bool disable_pplib_wm_range;
|
bool disable_pplib_wm_range;
|
||||||
enum wm_report_mode pplib_wm_report_mode;
|
enum wm_report_mode pplib_wm_report_mode;
|
||||||
|
@ -513,7 +514,7 @@ struct dc {
|
||||||
bool optimized_required;
|
bool optimized_required;
|
||||||
|
|
||||||
/* Require to maintain clocks and bandwidth for UEFI enabled HW */
|
/* Require to maintain clocks and bandwidth for UEFI enabled HW */
|
||||||
bool optimize_seamless_boot;
|
int optimize_seamless_boot_streams;
|
||||||
|
|
||||||
/* FBC compressor */
|
/* FBC compressor */
|
||||||
struct compressor *fbc_compressor;
|
struct compressor *fbc_compressor;
|
||||||
|
|
|
@ -53,7 +53,8 @@ struct dc_dsc_policy {
|
||||||
uint32_t min_target_bpp;
|
uint32_t min_target_bpp;
|
||||||
};
|
};
|
||||||
|
|
||||||
bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data,
|
bool dc_dsc_parse_dsc_dpcd(const struct dc *dc,
|
||||||
|
const uint8_t *dpcd_dsc_basic_data,
|
||||||
const uint8_t *dpcd_dsc_ext_data,
|
const uint8_t *dpcd_dsc_ext_data,
|
||||||
struct dsc_dec_dpcd_caps *dsc_sink_caps);
|
struct dsc_dec_dpcd_caps *dsc_sink_caps);
|
||||||
|
|
||||||
|
@ -77,4 +78,6 @@ bool dc_dsc_compute_config(
|
||||||
void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing,
|
void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing,
|
||||||
struct dc_dsc_policy *policy);
|
struct dc_dsc_policy *policy);
|
||||||
|
|
||||||
|
void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -133,6 +133,7 @@ struct dc_link {
|
||||||
struct link_flags {
|
struct link_flags {
|
||||||
bool dp_keep_receiver_powered;
|
bool dp_keep_receiver_powered;
|
||||||
bool dp_skip_DID2;
|
bool dp_skip_DID2;
|
||||||
|
bool dp_skip_reset_segment;
|
||||||
} wa_flags;
|
} wa_flags;
|
||||||
struct link_mst_stream_allocation_table mst_stream_alloc_table;
|
struct link_mst_stream_allocation_table mst_stream_alloc_table;
|
||||||
|
|
||||||
|
|
|
@ -344,10 +344,17 @@ bool dc_add_all_planes_for_stream(
|
||||||
bool dc_stream_add_writeback(struct dc *dc,
|
bool dc_stream_add_writeback(struct dc *dc,
|
||||||
struct dc_stream_state *stream,
|
struct dc_stream_state *stream,
|
||||||
struct dc_writeback_info *wb_info);
|
struct dc_writeback_info *wb_info);
|
||||||
|
|
||||||
bool dc_stream_remove_writeback(struct dc *dc,
|
bool dc_stream_remove_writeback(struct dc *dc,
|
||||||
struct dc_stream_state *stream,
|
struct dc_stream_state *stream,
|
||||||
uint32_t dwb_pipe_inst);
|
uint32_t dwb_pipe_inst);
|
||||||
|
|
||||||
|
bool dc_stream_warmup_writeback(struct dc *dc,
|
||||||
|
int num_dwb,
|
||||||
|
struct dc_writeback_info *wb_info);
|
||||||
|
|
||||||
bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream);
|
bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream);
|
||||||
|
|
||||||
bool dc_stream_set_dynamic_metadata(struct dc *dc,
|
bool dc_stream_set_dynamic_metadata(struct dc *dc,
|
||||||
struct dc_stream_state *stream,
|
struct dc_stream_state *stream,
|
||||||
struct dc_dmdata_attributes *dmdata_attr);
|
struct dc_dmdata_attributes *dmdata_attr);
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue