Merge branch 'drm-next-4.11' of git://people.freedesktop.org/~agd5f/linux into drm-next
This is the main feature pull for radeon and amdgpu for 4.11. Highlights: - Power and clockgating improvements - Preliminary SR-IOV support - ttm buffer priority support - ttm eviction fixes - Removal of the ttm lru callbacks - Remove SI DPM quirks due to MC firmware issues - Handle VFCT with multiple vbioses - Powerplay improvements - Lots of driver cleanups * 'drm-next-4.11' of git://people.freedesktop.org/~agd5f/linux: (120 commits) drm/amdgpu: fix amdgpu_bo_va_mapping flags drm/amdgpu: access stolen VRAM directly on CZ (v2) drm/amdgpu: access stolen VRAM directly on KV/KB (v2) drm/amdgpu: fix kernel panic when dpm disabled on Kv. drm/amdgpu: fix dpm bug on Kv. drm/amd/powerplay: fix regresstion issue can't set manual dpm mode. drm/amdgpu: handle vfct with multiple vbios images drm/radeon: handle vfct with multiple vbios images drm/amdgpu: move misc si headers into amdgpu drm/amdgpu: remove unused header si_reg.h drm/radeon: drop pitcairn dpm quirks drm/amdgpu: drop pitcairn dpm quirks drm: radeon: radeon_ttm: Handle return NULL error from ioremap_nocache drm/amd/amdgpu/amdgpu_ttm: Handle return NULL error from ioremap_nocache drm/amdgpu: add new virtual display ID drm/amd/amdgpu: remove the uncessary parameter for ib scheduler drm/amdgpu: Bring bo creation in line with radeon driver (v2) drm/amd/powerplay: fix misspelling in header guard drm/ttm: revert "add optional LRU removal callback v2" drm/ttm: revert "implement LRU add callbacks v2" ...
This commit is contained in:
commit
29a73d906b
|
@ -24,7 +24,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
|
|||
atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
|
||||
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
|
||||
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
|
||||
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o
|
||||
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o
|
||||
|
||||
# add asic specific block
|
||||
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
|
||||
|
@ -34,7 +34,7 @@ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
|
|||
amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o
|
||||
|
||||
amdgpu-y += \
|
||||
vi.o
|
||||
vi.o mxgpu_vi.o
|
||||
|
||||
# add GMC block
|
||||
amdgpu-y += \
|
||||
|
@ -52,8 +52,7 @@ amdgpu-y += \
|
|||
# add SMC block
|
||||
amdgpu-y += \
|
||||
amdgpu_dpm.o \
|
||||
amdgpu_powerplay.o \
|
||||
cz_smc.o cz_dpm.o
|
||||
amdgpu_powerplay.o
|
||||
|
||||
# add DCE block
|
||||
amdgpu-y += \
|
||||
|
|
|
@ -91,7 +91,6 @@ extern int amdgpu_vm_fault_stop;
|
|||
extern int amdgpu_vm_debug;
|
||||
extern int amdgpu_sched_jobs;
|
||||
extern int amdgpu_sched_hw_submission;
|
||||
extern int amdgpu_powerplay;
|
||||
extern int amdgpu_no_evict;
|
||||
extern int amdgpu_direct_gma_size;
|
||||
extern unsigned amdgpu_pcie_gen_cap;
|
||||
|
@ -184,12 +183,18 @@ enum amdgpu_thermal_irq {
|
|||
AMDGPU_THERMAL_IRQ_LAST
|
||||
};
|
||||
|
||||
enum amdgpu_kiq_irq {
|
||||
AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
|
||||
AMDGPU_CP_KIQ_IRQ_LAST
|
||||
};
|
||||
|
||||
int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
|
||||
enum amd_ip_block_type block_type,
|
||||
enum amd_clockgating_state state);
|
||||
int amdgpu_set_powergating_state(struct amdgpu_device *adev,
|
||||
enum amd_ip_block_type block_type,
|
||||
enum amd_powergating_state state);
|
||||
void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags);
|
||||
int amdgpu_wait_for_idle(struct amdgpu_device *adev,
|
||||
enum amd_ip_block_type block_type);
|
||||
bool amdgpu_is_idle(struct amdgpu_device *adev,
|
||||
|
@ -352,7 +357,7 @@ struct amdgpu_bo_va_mapping {
|
|||
struct list_head list;
|
||||
struct interval_tree_node it;
|
||||
uint64_t offset;
|
||||
uint32_t flags;
|
||||
uint64_t flags;
|
||||
};
|
||||
|
||||
/* bo virtual addresses in a specific vm */
|
||||
|
@ -776,14 +781,20 @@ struct amdgpu_mec {
|
|||
u32 num_queue;
|
||||
};
|
||||
|
||||
struct amdgpu_kiq {
|
||||
u64 eop_gpu_addr;
|
||||
struct amdgpu_bo *eop_obj;
|
||||
struct amdgpu_ring ring;
|
||||
struct amdgpu_irq_src irq;
|
||||
};
|
||||
|
||||
/*
|
||||
* GPU scratch registers structures, functions & helpers
|
||||
*/
|
||||
struct amdgpu_scratch {
|
||||
unsigned num_reg;
|
||||
uint32_t reg_base;
|
||||
bool free[32];
|
||||
uint32_t reg[32];
|
||||
uint32_t free_mask;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -851,6 +862,7 @@ struct amdgpu_gfx {
|
|||
struct amdgpu_gca_config config;
|
||||
struct amdgpu_rlc rlc;
|
||||
struct amdgpu_mec mec;
|
||||
struct amdgpu_kiq kiq;
|
||||
struct amdgpu_scratch scratch;
|
||||
const struct firmware *me_fw; /* ME firmware */
|
||||
uint32_t me_fw_version;
|
||||
|
@ -894,8 +906,8 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
|
||||
struct dma_fence *f);
|
||||
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||
struct amdgpu_ib *ib, struct dma_fence *last_vm_update,
|
||||
struct amdgpu_job *job, struct dma_fence **f);
|
||||
struct amdgpu_ib *ibs, struct amdgpu_job *job,
|
||||
struct dma_fence **f);
|
||||
int amdgpu_ib_pool_init(struct amdgpu_device *adev);
|
||||
void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
|
||||
int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
|
||||
|
@ -938,6 +950,7 @@ struct amdgpu_cs_parser {
|
|||
#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0) /* bit set means command submit involves a preamble IB */
|
||||
#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1) /* bit set means preamble IB is first presented in belonging context */
|
||||
#define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */
|
||||
#define AMDGPU_VM_DOMAIN (1 << 3) /* bit set means in virtual memory context */
|
||||
|
||||
struct amdgpu_job {
|
||||
struct amd_sched_job base;
|
||||
|
@ -1024,6 +1037,7 @@ struct amdgpu_uvd {
|
|||
bool use_ctx_buf;
|
||||
struct amd_sched_entity entity;
|
||||
uint32_t srbm_soft_reset;
|
||||
bool is_powergated;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -1052,6 +1066,7 @@ struct amdgpu_vce {
|
|||
struct amd_sched_entity entity;
|
||||
uint32_t srbm_soft_reset;
|
||||
unsigned num_rings;
|
||||
bool is_powergated;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -1177,7 +1192,6 @@ struct amdgpu_asic_funcs {
|
|||
bool (*read_disabled_bios)(struct amdgpu_device *adev);
|
||||
bool (*read_bios_from_rom)(struct amdgpu_device *adev,
|
||||
u8 *bios, u32 length_bytes);
|
||||
void (*detect_hw_virtualization) (struct amdgpu_device *adev);
|
||||
int (*read_register)(struct amdgpu_device *adev, u32 se_num,
|
||||
u32 sh_num, u32 reg_offset, u32 *value);
|
||||
void (*set_vga_state)(struct amdgpu_device *adev, bool state);
|
||||
|
@ -1332,7 +1346,6 @@ struct amdgpu_device {
|
|||
/* BIOS */
|
||||
uint8_t *bios;
|
||||
uint32_t bios_size;
|
||||
bool is_atom_bios;
|
||||
struct amdgpu_bo *stollen_vga_memory;
|
||||
uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
|
||||
|
||||
|
@ -1462,7 +1475,7 @@ struct amdgpu_device {
|
|||
/* amdkfd interface */
|
||||
struct kfd_dev *kfd;
|
||||
|
||||
struct amdgpu_virtualization virtualization;
|
||||
struct amdgpu_virt virt;
|
||||
|
||||
/* link all shadow bo */
|
||||
struct list_head shadow_list;
|
||||
|
@ -1575,6 +1588,37 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
|
|||
ring->count_dw--;
|
||||
}
|
||||
|
||||
static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, void *src, int count_dw)
|
||||
{
|
||||
unsigned occupied, chunk1, chunk2;
|
||||
void *dst;
|
||||
|
||||
if (ring->count_dw < count_dw) {
|
||||
DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
|
||||
} else {
|
||||
occupied = ring->wptr & ring->ptr_mask;
|
||||
dst = (void *)&ring->ring[occupied];
|
||||
chunk1 = ring->ptr_mask + 1 - occupied;
|
||||
chunk1 = (chunk1 >= count_dw) ? count_dw: chunk1;
|
||||
chunk2 = count_dw - chunk1;
|
||||
chunk1 <<= 2;
|
||||
chunk2 <<= 2;
|
||||
|
||||
if (chunk1)
|
||||
memcpy(dst, src, chunk1);
|
||||
|
||||
if (chunk2) {
|
||||
src += chunk1;
|
||||
dst = (void *)ring->ring;
|
||||
memcpy(dst, src, chunk2);
|
||||
}
|
||||
|
||||
ring->wptr += count_dw;
|
||||
ring->wptr &= ring->ptr_mask;
|
||||
ring->count_dw -= count_dw;
|
||||
}
|
||||
}
|
||||
|
||||
static inline struct amdgpu_sdma_instance *
|
||||
amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
|
||||
{
|
||||
|
@ -1604,7 +1648,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
|
|||
#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
|
||||
#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
|
||||
#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
|
||||
#define amdgpu_asic_detect_hw_virtualization(adev) (adev)->asic_funcs->detect_hw_virtualization((adev))
|
||||
#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
|
||||
#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
|
||||
#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
|
||||
|
@ -1626,6 +1669,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
|
|||
#define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r))
|
||||
#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
|
||||
#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
|
||||
#define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d))
|
||||
#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
|
||||
#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
|
||||
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
|
||||
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
|
||||
|
|
|
@ -672,12 +672,10 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
|
|||
|
||||
if ((enc->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
|
||||
enc->enc_priv) {
|
||||
if (adev->is_atom_bios) {
|
||||
struct amdgpu_encoder_atom_dig *dig = enc->enc_priv;
|
||||
if (dig->bl_dev) {
|
||||
atif->encoder_for_bl = enc;
|
||||
break;
|
||||
}
|
||||
struct amdgpu_encoder_atom_dig *dig = enc->enc_priv;
|
||||
if (dig->bl_dev) {
|
||||
atif->encoder_for_bl = enc;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,6 +42,51 @@
|
|||
#define AMD_IS_VALID_VBIOS(p) ((p)[0] == 0x55 && (p)[1] == 0xAA)
|
||||
#define AMD_VBIOS_LENGTH(p) ((p)[2] << 9)
|
||||
|
||||
/* Check if current bios is an ATOM BIOS.
|
||||
* Return true if it is ATOM BIOS. Otherwise, return false.
|
||||
*/
|
||||
static bool check_atom_bios(uint8_t *bios, size_t size)
|
||||
{
|
||||
uint16_t tmp, bios_header_start;
|
||||
|
||||
if (!bios || size < 0x49) {
|
||||
DRM_INFO("vbios mem is null or mem size is wrong\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!AMD_IS_VALID_VBIOS(bios)) {
|
||||
DRM_INFO("BIOS signature incorrect %x %x\n", bios[0], bios[1]);
|
||||
return false;
|
||||
}
|
||||
|
||||
tmp = bios[0x18] | (bios[0x19] << 8);
|
||||
if (bios[tmp + 0x14] != 0x0) {
|
||||
DRM_INFO("Not an x86 BIOS ROM\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
bios_header_start = bios[0x48] | (bios[0x49] << 8);
|
||||
if (!bios_header_start) {
|
||||
DRM_INFO("Can't locate bios header\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
tmp = bios_header_start + 4;
|
||||
if (size < tmp) {
|
||||
DRM_INFO("BIOS header is broken\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!memcmp(bios + tmp, "ATOM", 4) ||
|
||||
!memcmp(bios + tmp, "MOTA", 4)) {
|
||||
DRM_DEBUG("ATOMBIOS detected\n");
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/* If you boot an IGP board with a discrete card as the primary,
|
||||
* the IGP rom is not accessible via the rom bar as the IGP rom is
|
||||
* part of the system bios. On boot, the system bios puts a
|
||||
|
@ -65,10 +110,6 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
|
|||
return false;
|
||||
}
|
||||
|
||||
if (size == 0 || !AMD_IS_VALID_VBIOS(bios)) {
|
||||
iounmap(bios);
|
||||
return false;
|
||||
}
|
||||
adev->bios = kmalloc(size, GFP_KERNEL);
|
||||
if (!adev->bios) {
|
||||
iounmap(bios);
|
||||
|
@ -77,12 +118,18 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
|
|||
adev->bios_size = size;
|
||||
memcpy_fromio(adev->bios, bios, size);
|
||||
iounmap(bios);
|
||||
|
||||
if (!check_atom_bios(adev->bios, size)) {
|
||||
kfree(adev->bios);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool amdgpu_read_bios(struct amdgpu_device *adev)
|
||||
{
|
||||
uint8_t __iomem *bios, val[2];
|
||||
uint8_t __iomem *bios;
|
||||
size_t size;
|
||||
|
||||
adev->bios = NULL;
|
||||
|
@ -92,13 +139,6 @@ bool amdgpu_read_bios(struct amdgpu_device *adev)
|
|||
return false;
|
||||
}
|
||||
|
||||
val[0] = readb(&bios[0]);
|
||||
val[1] = readb(&bios[1]);
|
||||
|
||||
if (size == 0 || !AMD_IS_VALID_VBIOS(val)) {
|
||||
pci_unmap_rom(adev->pdev, bios);
|
||||
return false;
|
||||
}
|
||||
adev->bios = kzalloc(size, GFP_KERNEL);
|
||||
if (adev->bios == NULL) {
|
||||
pci_unmap_rom(adev->pdev, bios);
|
||||
|
@ -107,6 +147,12 @@ bool amdgpu_read_bios(struct amdgpu_device *adev)
|
|||
adev->bios_size = size;
|
||||
memcpy_fromio(adev->bios, bios, size);
|
||||
pci_unmap_rom(adev->pdev, bios);
|
||||
|
||||
if (!check_atom_bios(adev->bios, size)) {
|
||||
kfree(adev->bios);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -140,7 +186,14 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
|
|||
adev->bios_size = len;
|
||||
|
||||
/* read complete BIOS */
|
||||
return amdgpu_asic_read_bios_from_rom(adev, adev->bios, len);
|
||||
amdgpu_asic_read_bios_from_rom(adev, adev->bios, len);
|
||||
|
||||
if (!check_atom_bios(adev->bios, len)) {
|
||||
kfree(adev->bios);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool amdgpu_read_platform_bios(struct amdgpu_device *adev)
|
||||
|
@ -155,13 +208,17 @@ static bool amdgpu_read_platform_bios(struct amdgpu_device *adev)
|
|||
return false;
|
||||
}
|
||||
|
||||
if (size == 0 || !AMD_IS_VALID_VBIOS(bios)) {
|
||||
return false;
|
||||
}
|
||||
adev->bios = kmemdup(bios, size, GFP_KERNEL);
|
||||
if (adev->bios == NULL) {
|
||||
adev->bios = kzalloc(size, GFP_KERNEL);
|
||||
if (adev->bios == NULL)
|
||||
return false;
|
||||
|
||||
memcpy_fromio(adev->bios, bios, size);
|
||||
|
||||
if (!check_atom_bios(adev->bios, size)) {
|
||||
kfree(adev->bios);
|
||||
return false;
|
||||
}
|
||||
|
||||
adev->bios_size = size;
|
||||
|
||||
return true;
|
||||
|
@ -273,7 +330,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
|
|||
break;
|
||||
}
|
||||
|
||||
if (i == 0 || !AMD_IS_VALID_VBIOS(adev->bios)) {
|
||||
if (!check_atom_bios(adev->bios, size)) {
|
||||
kfree(adev->bios);
|
||||
return false;
|
||||
}
|
||||
|
@ -298,53 +355,59 @@ static bool amdgpu_read_disabled_bios(struct amdgpu_device *adev)
|
|||
#ifdef CONFIG_ACPI
|
||||
static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
|
||||
{
|
||||
bool ret = false;
|
||||
struct acpi_table_header *hdr;
|
||||
acpi_size tbl_size;
|
||||
UEFI_ACPI_VFCT *vfct;
|
||||
GOP_VBIOS_CONTENT *vbios;
|
||||
VFCT_IMAGE_HEADER *vhdr;
|
||||
unsigned offset;
|
||||
|
||||
if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr)))
|
||||
return false;
|
||||
tbl_size = hdr->length;
|
||||
if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
|
||||
DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
|
||||
goto out_unmap;
|
||||
return false;
|
||||
}
|
||||
|
||||
vfct = (UEFI_ACPI_VFCT *)hdr;
|
||||
if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) {
|
||||
DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
|
||||
goto out_unmap;
|
||||
offset = vfct->VBIOSImageOffset;
|
||||
|
||||
while (offset < tbl_size) {
|
||||
GOP_VBIOS_CONTENT *vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + offset);
|
||||
VFCT_IMAGE_HEADER *vhdr = &vbios->VbiosHeader;
|
||||
|
||||
offset += sizeof(VFCT_IMAGE_HEADER);
|
||||
if (offset > tbl_size) {
|
||||
DRM_ERROR("ACPI VFCT image header truncated\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
offset += vhdr->ImageLength;
|
||||
if (offset > tbl_size) {
|
||||
DRM_ERROR("ACPI VFCT image truncated\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (vhdr->ImageLength &&
|
||||
vhdr->PCIBus == adev->pdev->bus->number &&
|
||||
vhdr->PCIDevice == PCI_SLOT(adev->pdev->devfn) &&
|
||||
vhdr->PCIFunction == PCI_FUNC(adev->pdev->devfn) &&
|
||||
vhdr->VendorID == adev->pdev->vendor &&
|
||||
vhdr->DeviceID == adev->pdev->device) {
|
||||
adev->bios = kmemdup(&vbios->VbiosContent,
|
||||
vhdr->ImageLength,
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!check_atom_bios(adev->bios, vhdr->ImageLength)) {
|
||||
kfree(adev->bios);
|
||||
return false;
|
||||
}
|
||||
adev->bios_size = vhdr->ImageLength;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset);
|
||||
vhdr = &vbios->VbiosHeader;
|
||||
DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n",
|
||||
vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction,
|
||||
vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength);
|
||||
|
||||
if (vhdr->PCIBus != adev->pdev->bus->number ||
|
||||
vhdr->PCIDevice != PCI_SLOT(adev->pdev->devfn) ||
|
||||
vhdr->PCIFunction != PCI_FUNC(adev->pdev->devfn) ||
|
||||
vhdr->VendorID != adev->pdev->vendor ||
|
||||
vhdr->DeviceID != adev->pdev->device) {
|
||||
DRM_INFO("ACPI VFCT table is not for this card\n");
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) {
|
||||
DRM_ERROR("ACPI VFCT image truncated\n");
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
adev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL);
|
||||
adev->bios_size = vhdr->ImageLength;
|
||||
ret = !!adev->bios;
|
||||
|
||||
out_unmap:
|
||||
return ret;
|
||||
DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
|
||||
return false;
|
||||
}
|
||||
#else
|
||||
static inline bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
|
||||
|
@ -355,57 +418,27 @@ static inline bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
|
|||
|
||||
bool amdgpu_get_bios(struct amdgpu_device *adev)
|
||||
{
|
||||
bool r;
|
||||
uint16_t tmp, bios_header_start;
|
||||
if (amdgpu_atrm_get_bios(adev))
|
||||
return true;
|
||||
|
||||
r = amdgpu_atrm_get_bios(adev);
|
||||
if (!r)
|
||||
r = amdgpu_acpi_vfct_bios(adev);
|
||||
if (!r)
|
||||
r = igp_read_bios_from_vram(adev);
|
||||
if (!r)
|
||||
r = amdgpu_read_bios(adev);
|
||||
if (!r) {
|
||||
r = amdgpu_read_bios_from_rom(adev);
|
||||
}
|
||||
if (!r) {
|
||||
r = amdgpu_read_disabled_bios(adev);
|
||||
}
|
||||
if (!r) {
|
||||
r = amdgpu_read_platform_bios(adev);
|
||||
}
|
||||
if (!r || adev->bios == NULL) {
|
||||
DRM_ERROR("Unable to locate a BIOS ROM\n");
|
||||
adev->bios = NULL;
|
||||
return false;
|
||||
}
|
||||
if (!AMD_IS_VALID_VBIOS(adev->bios)) {
|
||||
printk("BIOS signature incorrect %x %x\n", adev->bios[0], adev->bios[1]);
|
||||
goto free_bios;
|
||||
}
|
||||
if (amdgpu_acpi_vfct_bios(adev))
|
||||
return true;
|
||||
|
||||
tmp = RBIOS16(0x18);
|
||||
if (RBIOS8(tmp + 0x14) != 0x0) {
|
||||
DRM_INFO("Not an x86 BIOS ROM, not using.\n");
|
||||
goto free_bios;
|
||||
}
|
||||
if (igp_read_bios_from_vram(adev))
|
||||
return true;
|
||||
|
||||
bios_header_start = RBIOS16(0x48);
|
||||
if (!bios_header_start) {
|
||||
goto free_bios;
|
||||
}
|
||||
tmp = bios_header_start + 4;
|
||||
if (!memcmp(adev->bios + tmp, "ATOM", 4) ||
|
||||
!memcmp(adev->bios + tmp, "MOTA", 4)) {
|
||||
adev->is_atom_bios = true;
|
||||
} else {
|
||||
adev->is_atom_bios = false;
|
||||
}
|
||||
if (amdgpu_read_bios(adev))
|
||||
return true;
|
||||
|
||||
DRM_DEBUG("%sBIOS detected\n", adev->is_atom_bios ? "ATOM" : "COM");
|
||||
return true;
|
||||
free_bios:
|
||||
kfree(adev->bios);
|
||||
adev->bios = NULL;
|
||||
if (amdgpu_read_bios_from_rom(adev))
|
||||
return true;
|
||||
|
||||
if (amdgpu_read_disabled_bios(adev))
|
||||
return true;
|
||||
|
||||
if (amdgpu_read_platform_bios(adev))
|
||||
return true;
|
||||
|
||||
DRM_ERROR("Unable to locate a BIOS ROM\n");
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -713,6 +713,7 @@ static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode
|
|||
CGS_FUNC_ADEV;
|
||||
if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
|
||||
release_firmware(adev->pm.fw);
|
||||
adev->pm.fw = NULL;
|
||||
return 0;
|
||||
}
|
||||
/* cannot release other firmware because they are not created by cgs */
|
||||
|
@ -762,6 +763,23 @@ static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
|
|||
return fw_version;
|
||||
}
|
||||
|
||||
static int amdgpu_cgs_enter_safe_mode(struct cgs_device *cgs_device,
|
||||
bool en)
|
||||
{
|
||||
CGS_FUNC_ADEV;
|
||||
|
||||
if (adev->gfx.rlc.funcs->enter_safe_mode == NULL ||
|
||||
adev->gfx.rlc.funcs->exit_safe_mode == NULL)
|
||||
return 0;
|
||||
|
||||
if (en)
|
||||
adev->gfx.rlc.funcs->enter_safe_mode(adev);
|
||||
else
|
||||
adev->gfx.rlc.funcs->exit_safe_mode(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
|
||||
enum cgs_ucode_id type,
|
||||
struct cgs_firmware_info *info)
|
||||
|
@ -808,6 +826,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
|
|||
const uint8_t *src;
|
||||
const struct smc_firmware_header_v1_0 *hdr;
|
||||
|
||||
if (CGS_UCODE_ID_SMU_SK == type)
|
||||
amdgpu_cgs_rel_firmware(cgs_device, CGS_UCODE_ID_SMU);
|
||||
|
||||
if (!adev->pm.fw) {
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_TOPAZ:
|
||||
|
@ -1200,51 +1221,52 @@ static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
|
|||
}
|
||||
|
||||
static const struct cgs_ops amdgpu_cgs_ops = {
|
||||
amdgpu_cgs_gpu_mem_info,
|
||||
amdgpu_cgs_gmap_kmem,
|
||||
amdgpu_cgs_gunmap_kmem,
|
||||
amdgpu_cgs_alloc_gpu_mem,
|
||||
amdgpu_cgs_free_gpu_mem,
|
||||
amdgpu_cgs_gmap_gpu_mem,
|
||||
amdgpu_cgs_gunmap_gpu_mem,
|
||||
amdgpu_cgs_kmap_gpu_mem,
|
||||
amdgpu_cgs_kunmap_gpu_mem,
|
||||
amdgpu_cgs_read_register,
|
||||
amdgpu_cgs_write_register,
|
||||
amdgpu_cgs_read_ind_register,
|
||||
amdgpu_cgs_write_ind_register,
|
||||
amdgpu_cgs_read_pci_config_byte,
|
||||
amdgpu_cgs_read_pci_config_word,
|
||||
amdgpu_cgs_read_pci_config_dword,
|
||||
amdgpu_cgs_write_pci_config_byte,
|
||||
amdgpu_cgs_write_pci_config_word,
|
||||
amdgpu_cgs_write_pci_config_dword,
|
||||
amdgpu_cgs_get_pci_resource,
|
||||
amdgpu_cgs_atom_get_data_table,
|
||||
amdgpu_cgs_atom_get_cmd_table_revs,
|
||||
amdgpu_cgs_atom_exec_cmd_table,
|
||||
amdgpu_cgs_create_pm_request,
|
||||
amdgpu_cgs_destroy_pm_request,
|
||||
amdgpu_cgs_set_pm_request,
|
||||
amdgpu_cgs_pm_request_clock,
|
||||
amdgpu_cgs_pm_request_engine,
|
||||
amdgpu_cgs_pm_query_clock_limits,
|
||||
amdgpu_cgs_set_camera_voltages,
|
||||
amdgpu_cgs_get_firmware_info,
|
||||
amdgpu_cgs_rel_firmware,
|
||||
amdgpu_cgs_set_powergating_state,
|
||||
amdgpu_cgs_set_clockgating_state,
|
||||
amdgpu_cgs_get_active_displays_info,
|
||||
amdgpu_cgs_notify_dpm_enabled,
|
||||
amdgpu_cgs_call_acpi_method,
|
||||
amdgpu_cgs_query_system_info,
|
||||
amdgpu_cgs_is_virtualization_enabled
|
||||
.gpu_mem_info = amdgpu_cgs_gpu_mem_info,
|
||||
.gmap_kmem = amdgpu_cgs_gmap_kmem,
|
||||
.gunmap_kmem = amdgpu_cgs_gunmap_kmem,
|
||||
.alloc_gpu_mem = amdgpu_cgs_alloc_gpu_mem,
|
||||
.free_gpu_mem = amdgpu_cgs_free_gpu_mem,
|
||||
.gmap_gpu_mem = amdgpu_cgs_gmap_gpu_mem,
|
||||
.gunmap_gpu_mem = amdgpu_cgs_gunmap_gpu_mem,
|
||||
.kmap_gpu_mem = amdgpu_cgs_kmap_gpu_mem,
|
||||
.kunmap_gpu_mem = amdgpu_cgs_kunmap_gpu_mem,
|
||||
.read_register = amdgpu_cgs_read_register,
|
||||
.write_register = amdgpu_cgs_write_register,
|
||||
.read_ind_register = amdgpu_cgs_read_ind_register,
|
||||
.write_ind_register = amdgpu_cgs_write_ind_register,
|
||||
.read_pci_config_byte = amdgpu_cgs_read_pci_config_byte,
|
||||
.read_pci_config_word = amdgpu_cgs_read_pci_config_word,
|
||||
.read_pci_config_dword = amdgpu_cgs_read_pci_config_dword,
|
||||
.write_pci_config_byte = amdgpu_cgs_write_pci_config_byte,
|
||||
.write_pci_config_word = amdgpu_cgs_write_pci_config_word,
|
||||
.write_pci_config_dword = amdgpu_cgs_write_pci_config_dword,
|
||||
.get_pci_resource = amdgpu_cgs_get_pci_resource,
|
||||
.atom_get_data_table = amdgpu_cgs_atom_get_data_table,
|
||||
.atom_get_cmd_table_revs = amdgpu_cgs_atom_get_cmd_table_revs,
|
||||
.atom_exec_cmd_table = amdgpu_cgs_atom_exec_cmd_table,
|
||||
.create_pm_request = amdgpu_cgs_create_pm_request,
|
||||
.destroy_pm_request = amdgpu_cgs_destroy_pm_request,
|
||||
.set_pm_request = amdgpu_cgs_set_pm_request,
|
||||
.pm_request_clock = amdgpu_cgs_pm_request_clock,
|
||||
.pm_request_engine = amdgpu_cgs_pm_request_engine,
|
||||
.pm_query_clock_limits = amdgpu_cgs_pm_query_clock_limits,
|
||||
.set_camera_voltages = amdgpu_cgs_set_camera_voltages,
|
||||
.get_firmware_info = amdgpu_cgs_get_firmware_info,
|
||||
.rel_firmware = amdgpu_cgs_rel_firmware,
|
||||
.set_powergating_state = amdgpu_cgs_set_powergating_state,
|
||||
.set_clockgating_state = amdgpu_cgs_set_clockgating_state,
|
||||
.get_active_displays_info = amdgpu_cgs_get_active_displays_info,
|
||||
.notify_dpm_enabled = amdgpu_cgs_notify_dpm_enabled,
|
||||
.call_acpi_method = amdgpu_cgs_call_acpi_method,
|
||||
.query_system_info = amdgpu_cgs_query_system_info,
|
||||
.is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled,
|
||||
.enter_safe_mode = amdgpu_cgs_enter_safe_mode,
|
||||
};
|
||||
|
||||
static const struct cgs_os_ops amdgpu_cgs_os_ops = {
|
||||
amdgpu_cgs_add_irq_source,
|
||||
amdgpu_cgs_irq_get,
|
||||
amdgpu_cgs_irq_put
|
||||
.add_irq_source = amdgpu_cgs_add_irq_source,
|
||||
.irq_get = amdgpu_cgs_irq_get,
|
||||
.irq_put = amdgpu_cgs_irq_put
|
||||
};
|
||||
|
||||
struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev)
|
||||
|
|
|
@ -75,10 +75,10 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
|
|||
*out_ring = &adev->uvd.ring;
|
||||
break;
|
||||
case AMDGPU_HW_IP_VCE:
|
||||
if (ring < 2){
|
||||
if (ring < adev->vce.num_rings){
|
||||
*out_ring = &adev->vce.ring[ring];
|
||||
} else {
|
||||
DRM_ERROR("only two VCE rings are supported\n");
|
||||
DRM_ERROR("only %d VCE rings are supported\n", adev->vce.num_rings);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
|
@ -771,6 +771,20 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
struct dma_fence *f;
|
||||
bo_va = vm->csa_bo_va;
|
||||
BUG_ON(!bo_va);
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, false);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
f = bo_va->last_pt_update;
|
||||
r = amdgpu_sync_fence(adev, &p->job->sync, f);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
if (p->bo_list) {
|
||||
for (i = 0; i < p->bo_list->num_entries; i++) {
|
||||
struct dma_fence *f;
|
||||
|
|
|
@ -94,6 +94,11 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
|
|||
{
|
||||
uint32_t ret;
|
||||
|
||||
if (amdgpu_sriov_runtime(adev)) {
|
||||
BUG_ON(in_interrupt());
|
||||
return amdgpu_virt_kiq_rreg(adev, reg);
|
||||
}
|
||||
|
||||
if ((reg * 4) < adev->rmmio_size && !always_indirect)
|
||||
ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
|
||||
else {
|
||||
|
@ -113,6 +118,11 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
|
|||
{
|
||||
trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
|
||||
|
||||
if (amdgpu_sriov_runtime(adev)) {
|
||||
BUG_ON(in_interrupt());
|
||||
return amdgpu_virt_kiq_wreg(adev, reg, v);
|
||||
}
|
||||
|
||||
if ((reg * 4) < adev->rmmio_size && !always_indirect)
|
||||
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
|
||||
else {
|
||||
|
@ -885,7 +895,7 @@ static int amdgpu_atombios_init(struct amdgpu_device *adev)
|
|||
atom_card_info->ioreg_read = cail_ioreg_read;
|
||||
atom_card_info->ioreg_write = cail_ioreg_write;
|
||||
} else {
|
||||
DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
|
||||
DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
|
||||
atom_card_info->ioreg_read = cail_reg_read;
|
||||
atom_card_info->ioreg_write = cail_reg_write;
|
||||
}
|
||||
|
@ -1131,6 +1141,18 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev,
|
|||
return r;
|
||||
}
|
||||
|
||||
void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
continue;
|
||||
if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
|
||||
adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
|
||||
}
|
||||
}
|
||||
|
||||
int amdgpu_wait_for_idle(struct amdgpu_device *adev,
|
||||
enum amd_ip_block_type block_type)
|
||||
{
|
||||
|
@ -1235,7 +1257,8 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
|
|||
pciaddstr_tmp = pciaddstr;
|
||||
while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
|
||||
pciaddname = strsep(&pciaddname_tmp, ",");
|
||||
if (!strcmp(pci_address_name, pciaddname)) {
|
||||
if (!strcmp("all", pciaddname)
|
||||
|| !strcmp(pci_address_name, pciaddname)) {
|
||||
long num_crtc;
|
||||
int res = -1;
|
||||
|
||||
|
@ -1323,6 +1346,12 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
r = amdgpu_virt_request_full_gpu(adev, true);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
|
||||
DRM_ERROR("disabled ip block: %d\n", i);
|
||||
|
@ -1383,6 +1412,15 @@ static int amdgpu_init(struct amdgpu_device *adev)
|
|||
return r;
|
||||
}
|
||||
adev->ip_blocks[i].status.hw = true;
|
||||
|
||||
/* right after GMC hw init, we create CSA */
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
r = amdgpu_allocate_static_csa(adev);
|
||||
if (r) {
|
||||
DRM_ERROR("allocate CSA failed %d\n", r);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1516,6 +1554,11 @@ static int amdgpu_fini(struct amdgpu_device *adev)
|
|||
adev->ip_blocks[i].status.late_initialized = false;
|
||||
}
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
|
||||
amdgpu_virt_release_full_gpu(adev, false);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1523,6 +1566,9 @@ int amdgpu_suspend(struct amdgpu_device *adev)
|
|||
{
|
||||
int i, r;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
amdgpu_virt_request_full_gpu(adev, false);
|
||||
|
||||
/* ungate SMC block first */
|
||||
r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
|
@ -1551,6 +1597,9 @@ int amdgpu_suspend(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
amdgpu_virt_release_full_gpu(adev, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1575,7 +1624,7 @@ static int amdgpu_resume(struct amdgpu_device *adev)
|
|||
static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
|
||||
{
|
||||
if (amdgpu_atombios_has_gpu_virtualization_table(adev))
|
||||
adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
|
||||
adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1605,7 +1654,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||
adev->pdev = pdev;
|
||||
adev->flags = flags;
|
||||
adev->asic_type = flags & AMD_ASIC_MASK;
|
||||
adev->is_atom_bios = false;
|
||||
adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
|
||||
adev->mc.gtt_size = 512 * 1024 * 1024;
|
||||
adev->accel_working = false;
|
||||
|
@ -1695,7 +1743,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||
}
|
||||
}
|
||||
if (adev->rio_mem == NULL)
|
||||
DRM_ERROR("Unable to find PCI I/O BAR\n");
|
||||
DRM_INFO("PCI I/O BAR is not found.\n");
|
||||
|
||||
/* early init functions */
|
||||
r = amdgpu_early_init(adev);
|
||||
|
@ -1720,12 +1768,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||
r = -EINVAL;
|
||||
goto failed;
|
||||
}
|
||||
/* Must be an ATOMBIOS */
|
||||
if (!adev->is_atom_bios) {
|
||||
dev_err(adev->dev, "Expecting atombios for GPU\n");
|
||||
r = -EINVAL;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
r = amdgpu_atombios_init(adev);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "amdgpu_atombios_init failed\n");
|
||||
|
@ -2249,6 +2292,9 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
|
|||
int resched;
|
||||
bool need_full_reset;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
|
||||
if (!amdgpu_check_soft_reset(adev)) {
|
||||
DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
|
||||
return 0;
|
||||
|
@ -2837,7 +2883,7 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
|
|||
return -ENOMEM;
|
||||
|
||||
/* version, increment each time something is added */
|
||||
config[no_regs++] = 2;
|
||||
config[no_regs++] = 3;
|
||||
config[no_regs++] = adev->gfx.config.max_shader_engines;
|
||||
config[no_regs++] = adev->gfx.config.max_tile_pipes;
|
||||
config[no_regs++] = adev->gfx.config.max_cu_per_sh;
|
||||
|
@ -2871,6 +2917,12 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
|
|||
config[no_regs++] = adev->family;
|
||||
config[no_regs++] = adev->external_rev_id;
|
||||
|
||||
/* rev==3 */
|
||||
config[no_regs++] = adev->pdev->device;
|
||||
config[no_regs++] = adev->pdev->revision;
|
||||
config[no_regs++] = adev->pdev->subsystem_device;
|
||||
config[no_regs++] = adev->pdev->subsystem_vendor;
|
||||
|
||||
while (size && (*pos < no_regs * 4)) {
|
||||
uint32_t value;
|
||||
|
||||
|
|
|
@ -138,10 +138,52 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
|
|||
kfree(work);
|
||||
}
|
||||
|
||||
int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_pending_vblank_event *event,
|
||||
uint32_t page_flip_flags, uint32_t target)
|
||||
|
||||
static void amdgpu_flip_work_cleanup(struct amdgpu_flip_work *work)
|
||||
{
|
||||
int i;
|
||||
|
||||
amdgpu_bo_unref(&work->old_abo);
|
||||
dma_fence_put(work->excl);
|
||||
for (i = 0; i < work->shared_count; ++i)
|
||||
dma_fence_put(work->shared[i]);
|
||||
kfree(work->shared);
|
||||
kfree(work);
|
||||
}
|
||||
|
||||
static void amdgpu_flip_cleanup_unreserve(struct amdgpu_flip_work *work,
|
||||
struct amdgpu_bo *new_abo)
|
||||
{
|
||||
amdgpu_bo_unreserve(new_abo);
|
||||
amdgpu_flip_work_cleanup(work);
|
||||
}
|
||||
|
||||
static void amdgpu_flip_cleanup_unpin(struct amdgpu_flip_work *work,
|
||||
struct amdgpu_bo *new_abo)
|
||||
{
|
||||
if (unlikely(amdgpu_bo_unpin(new_abo) != 0))
|
||||
DRM_ERROR("failed to unpin new abo in error path\n");
|
||||
amdgpu_flip_cleanup_unreserve(work, new_abo);
|
||||
}
|
||||
|
||||
void amdgpu_crtc_cleanup_flip_ctx(struct amdgpu_flip_work *work,
|
||||
struct amdgpu_bo *new_abo)
|
||||
{
|
||||
if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
|
||||
DRM_ERROR("failed to reserve new abo in error path\n");
|
||||
amdgpu_flip_work_cleanup(work);
|
||||
return;
|
||||
}
|
||||
amdgpu_flip_cleanup_unpin(work, new_abo);
|
||||
}
|
||||
|
||||
int amdgpu_crtc_prepare_flip(struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_pending_vblank_event *event,
|
||||
uint32_t page_flip_flags,
|
||||
uint32_t target,
|
||||
struct amdgpu_flip_work **work_p,
|
||||
struct amdgpu_bo **new_abo_p)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
|
@ -154,7 +196,7 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
|
|||
unsigned long flags;
|
||||
u64 tiling_flags;
|
||||
u64 base;
|
||||
int i, r;
|
||||
int r;
|
||||
|
||||
work = kzalloc(sizeof *work, GFP_KERNEL);
|
||||
if (work == NULL)
|
||||
|
@ -189,7 +231,6 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
|
|||
|
||||
r = amdgpu_bo_pin(new_abo, AMDGPU_GEM_DOMAIN_VRAM, &base);
|
||||
if (unlikely(r != 0)) {
|
||||
r = -EINVAL;
|
||||
DRM_ERROR("failed to pin new abo buffer before flip\n");
|
||||
goto unreserve;
|
||||
}
|
||||
|
@ -216,41 +257,79 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
|
|||
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
|
||||
r = -EBUSY;
|
||||
goto pflip_cleanup;
|
||||
|
||||
}
|
||||
|
||||
amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
|
||||
amdgpu_crtc->pflip_works = work;
|
||||
|
||||
|
||||
DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
|
||||
amdgpu_crtc->crtc_id, amdgpu_crtc, work);
|
||||
/* update crtc fb */
|
||||
crtc->primary->fb = fb;
|
||||
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
|
||||
amdgpu_flip_work_func(&work->flip_work.work);
|
||||
|
||||
*work_p = work;
|
||||
*new_abo_p = new_abo;
|
||||
|
||||
return 0;
|
||||
|
||||
pflip_cleanup:
|
||||
if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
|
||||
DRM_ERROR("failed to reserve new abo in error path\n");
|
||||
goto cleanup;
|
||||
}
|
||||
amdgpu_crtc_cleanup_flip_ctx(work, new_abo);
|
||||
return r;
|
||||
|
||||
unpin:
|
||||
if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) {
|
||||
DRM_ERROR("failed to unpin new abo in error path\n");
|
||||
}
|
||||
amdgpu_flip_cleanup_unpin(work, new_abo);
|
||||
return r;
|
||||
|
||||
unreserve:
|
||||
amdgpu_bo_unreserve(new_abo);
|
||||
amdgpu_flip_cleanup_unreserve(work, new_abo);
|
||||
return r;
|
||||
|
||||
cleanup:
|
||||
amdgpu_bo_unref(&work->old_abo);
|
||||
dma_fence_put(work->excl);
|
||||
for (i = 0; i < work->shared_count; ++i)
|
||||
dma_fence_put(work->shared[i]);
|
||||
kfree(work->shared);
|
||||
kfree(work);
|
||||
|
||||
amdgpu_flip_work_cleanup(work);
|
||||
return r;
|
||||
|
||||
}
|
||||
|
||||
void amdgpu_crtc_submit_flip(struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
struct amdgpu_flip_work *work,
|
||||
struct amdgpu_bo *new_abo)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
|
||||
spin_lock_irqsave(&crtc->dev->event_lock, flags);
|
||||
amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
|
||||
amdgpu_crtc->pflip_works = work;
|
||||
|
||||
/* update crtc fb */
|
||||
crtc->primary->fb = fb;
|
||||
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
|
||||
|
||||
DRM_DEBUG_DRIVER(
|
||||
"crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
|
||||
amdgpu_crtc->crtc_id, amdgpu_crtc, work);
|
||||
|
||||
amdgpu_flip_work_func(&work->flip_work.work);
|
||||
}
|
||||
|
||||
int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_pending_vblank_event *event,
|
||||
uint32_t page_flip_flags,
|
||||
uint32_t target)
|
||||
{
|
||||
struct amdgpu_bo *new_abo;
|
||||
struct amdgpu_flip_work *work;
|
||||
int r;
|
||||
|
||||
r = amdgpu_crtc_prepare_flip(crtc,
|
||||
fb,
|
||||
event,
|
||||
page_flip_flags,
|
||||
target,
|
||||
&work,
|
||||
&new_abo);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
amdgpu_crtc_submit_flip(crtc, fb, work, new_abo);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_crtc_set_config(struct drm_mode_set *set)
|
||||
|
@ -582,12 +661,10 @@ int amdgpu_modeset_create_props(struct amdgpu_device *adev)
|
|||
{
|
||||
int sz;
|
||||
|
||||
if (adev->is_atom_bios) {
|
||||
adev->mode_info.coherent_mode_property =
|
||||
drm_property_create_range(adev->ddev, 0 , "coherent", 0, 1);
|
||||
if (!adev->mode_info.coherent_mode_property)
|
||||
return -ENOMEM;
|
||||
}
|
||||
adev->mode_info.coherent_mode_property =
|
||||
drm_property_create_range(adev->ddev, 0 , "coherent", 0, 1);
|
||||
if (!adev->mode_info.coherent_mode_property)
|
||||
return -ENOMEM;
|
||||
|
||||
adev->mode_info.load_detect_property =
|
||||
drm_property_create_range(adev->ddev, 0, "load detection", 0, 1);
|
||||
|
|
|
@ -241,13 +241,6 @@ enum amdgpu_pcie_gen {
|
|||
AMDGPU_PCIE_GEN_INVALID = 0xffff
|
||||
};
|
||||
|
||||
enum amdgpu_dpm_forced_level {
|
||||
AMDGPU_DPM_FORCED_LEVEL_AUTO = 0,
|
||||
AMDGPU_DPM_FORCED_LEVEL_LOW = 1,
|
||||
AMDGPU_DPM_FORCED_LEVEL_HIGH = 2,
|
||||
AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3,
|
||||
};
|
||||
|
||||
struct amdgpu_dpm_funcs {
|
||||
int (*get_temperature)(struct amdgpu_device *adev);
|
||||
int (*pre_set_power_state)(struct amdgpu_device *adev);
|
||||
|
@ -258,7 +251,7 @@ struct amdgpu_dpm_funcs {
|
|||
u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
|
||||
void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
|
||||
void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
|
||||
int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level);
|
||||
int (*force_performance_level)(struct amdgpu_device *adev, enum amd_dpm_forced_level level);
|
||||
bool (*vblank_too_short)(struct amdgpu_device *adev);
|
||||
void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
|
||||
void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
|
||||
|
@ -353,9 +346,6 @@ struct amdgpu_dpm_funcs {
|
|||
#define amdgpu_dpm_get_current_power_state(adev) \
|
||||
(adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
|
||||
|
||||
#define amdgpu_dpm_get_performance_level(adev) \
|
||||
(adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle)
|
||||
|
||||
#define amdgpu_dpm_get_pp_num_states(adev, data) \
|
||||
(adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)
|
||||
|
||||
|
@ -393,6 +383,11 @@ struct amdgpu_dpm_funcs {
|
|||
(adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)) : \
|
||||
(adev)->pm.funcs->get_vce_clock_state((adev), (i)))
|
||||
|
||||
#define amdgpu_dpm_get_performance_level(adev) \
|
||||
((adev)->pp_enabled ? \
|
||||
(adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle) : \
|
||||
(adev)->pm.dpm.forced_level)
|
||||
|
||||
struct amdgpu_dpm {
|
||||
struct amdgpu_ps *ps;
|
||||
/* number of valid power states */
|
||||
|
@ -440,7 +435,7 @@ struct amdgpu_dpm {
|
|||
/* thermal handling */
|
||||
struct amdgpu_dpm_thermal thermal;
|
||||
/* forced levels */
|
||||
enum amdgpu_dpm_forced_level forced_level;
|
||||
enum amd_dpm_forced_level forced_level;
|
||||
};
|
||||
|
||||
struct amdgpu_pm {
|
||||
|
|
|
@ -90,7 +90,6 @@ int amdgpu_vram_page_split = 1024;
|
|||
int amdgpu_exp_hw_support = 0;
|
||||
int amdgpu_sched_jobs = 32;
|
||||
int amdgpu_sched_hw_submission = 2;
|
||||
int amdgpu_powerplay = -1;
|
||||
int amdgpu_no_evict = 0;
|
||||
int amdgpu_direct_gma_size = 0;
|
||||
unsigned amdgpu_pcie_gen_cap = 0;
|
||||
|
@ -179,9 +178,6 @@ module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
|
|||
MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
|
||||
module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))");
|
||||
module_param_named(powerplay, amdgpu_powerplay, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
|
||||
module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, int, 0444);
|
||||
|
||||
|
|
|
@ -471,12 +471,15 @@ out:
|
|||
|
||||
static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo)
|
||||
{
|
||||
unsigned domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
|
||||
|
||||
/* if anything is swapped out don't swap it in here,
|
||||
just abort and wait for the next CS */
|
||||
if (!amdgpu_bo_gpu_accessible(bo))
|
||||
return -ERESTARTSYS;
|
||||
|
||||
return domain == AMDGPU_GEM_DOMAIN_CPU ? -ERESTARTSYS : 0;
|
||||
if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow))
|
||||
return -ERESTARTSYS;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -496,7 +499,6 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
|
|||
struct amdgpu_bo_list_entry vm_pd;
|
||||
struct ww_acquire_ctx ticket;
|
||||
struct list_head list, duplicates;
|
||||
unsigned domain;
|
||||
int r;
|
||||
|
||||
INIT_LIST_HEAD(&list);
|
||||
|
@ -514,12 +516,18 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
|
|||
goto error_print;
|
||||
|
||||
list_for_each_entry(entry, &list, head) {
|
||||
domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
|
||||
struct amdgpu_bo *bo =
|
||||
container_of(entry->bo, struct amdgpu_bo, tbo);
|
||||
|
||||
/* if anything is swapped out don't swap it in here,
|
||||
just abort and wait for the next CS */
|
||||
if (domain == AMDGPU_GEM_DOMAIN_CPU)
|
||||
if (!amdgpu_bo_gpu_accessible(bo))
|
||||
goto error_unreserve;
|
||||
|
||||
if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow))
|
||||
goto error_unreserve;
|
||||
}
|
||||
|
||||
r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check,
|
||||
NULL);
|
||||
if (r)
|
||||
|
|
|
@ -42,12 +42,12 @@ int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg)
|
|||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
|
||||
if (adev->gfx.scratch.free[i]) {
|
||||
adev->gfx.scratch.free[i] = false;
|
||||
*reg = adev->gfx.scratch.reg[i];
|
||||
return 0;
|
||||
}
|
||||
i = ffs(adev->gfx.scratch.free_mask);
|
||||
if (i != 0 && i <= adev->gfx.scratch.num_reg) {
|
||||
i--;
|
||||
adev->gfx.scratch.free_mask &= ~(1u << i);
|
||||
*reg = adev->gfx.scratch.reg_base + i;
|
||||
return 0;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -62,14 +62,7 @@ int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg)
|
|||
*/
|
||||
void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
|
||||
if (adev->gfx.scratch.reg[i] == reg) {
|
||||
adev->gfx.scratch.free[i] = true;
|
||||
return;
|
||||
}
|
||||
}
|
||||
adev->gfx.scratch.free_mask |= 1u << (reg - adev->gfx.scratch.reg_base);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -243,9 +243,9 @@ static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man,
|
|||
}
|
||||
|
||||
const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func = {
|
||||
amdgpu_gtt_mgr_init,
|
||||
amdgpu_gtt_mgr_fini,
|
||||
amdgpu_gtt_mgr_new,
|
||||
amdgpu_gtt_mgr_del,
|
||||
amdgpu_gtt_mgr_debug
|
||||
.init = amdgpu_gtt_mgr_init,
|
||||
.takedown = amdgpu_gtt_mgr_fini,
|
||||
.get_node = amdgpu_gtt_mgr_new,
|
||||
.put_node = amdgpu_gtt_mgr_del,
|
||||
.debug = amdgpu_gtt_mgr_debug
|
||||
};
|
||||
|
|
|
@ -231,8 +231,7 @@ void amdgpu_i2c_init(struct amdgpu_device *adev)
|
|||
if (amdgpu_hw_i2c)
|
||||
DRM_INFO("hw_i2c forced on, you may experience display detection problems!\n");
|
||||
|
||||
if (adev->is_atom_bios)
|
||||
amdgpu_atombios_i2c_init(adev);
|
||||
amdgpu_atombios_i2c_init(adev);
|
||||
}
|
||||
|
||||
/* remove all the buses */
|
||||
|
|
|
@ -116,8 +116,8 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
|
|||
* to SI there was just a DE IB.
|
||||
*/
|
||||
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||
struct amdgpu_ib *ibs, struct dma_fence *last_vm_update,
|
||||
struct amdgpu_job *job, struct dma_fence **f)
|
||||
struct amdgpu_ib *ibs, struct amdgpu_job *job,
|
||||
struct dma_fence **f)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
struct amdgpu_ib *ib = &ibs[0];
|
||||
|
@ -175,15 +175,15 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||
if (ring->funcs->emit_hdp_flush)
|
||||
amdgpu_ring_emit_hdp_flush(ring);
|
||||
|
||||
/* always set cond_exec_polling to CONTINUE */
|
||||
*ring->cond_exe_cpu_addr = 1;
|
||||
|
||||
skip_preamble = ring->current_ctx == fence_ctx;
|
||||
need_ctx_switch = ring->current_ctx != fence_ctx;
|
||||
if (job && ring->funcs->emit_cntxcntl) {
|
||||
if (need_ctx_switch)
|
||||
status |= AMDGPU_HAVE_CTX_SWITCH;
|
||||
status |= job->preamble_status;
|
||||
|
||||
if (vm)
|
||||
status |= AMDGPU_VM_DOMAIN;
|
||||
amdgpu_ring_emit_cntxcntl(ring, status);
|
||||
}
|
||||
|
||||
|
@ -193,7 +193,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||
/* drop preamble IBs if we don't have a context switch */
|
||||
if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
|
||||
skip_preamble &&
|
||||
!(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST))
|
||||
!(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) &&
|
||||
!amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
|
||||
continue;
|
||||
|
||||
amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
|
||||
|
@ -223,7 +224,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
|||
amdgpu_ring_patch_cond_exec(ring, patch_offset);
|
||||
|
||||
ring->current_ctx = fence_ctx;
|
||||
if (ring->funcs->emit_switch_buffer)
|
||||
if (vm && ring->funcs->emit_switch_buffer)
|
||||
amdgpu_ring_emit_switch_buffer(ring);
|
||||
amdgpu_ring_commit(ring);
|
||||
return 0;
|
||||
|
|
|
@ -170,8 +170,7 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
|
|||
BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
|
||||
|
||||
trace_amdgpu_sched_run_job(job);
|
||||
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
|
||||
job->sync.last_vm_update, job, &fence);
|
||||
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job, &fence);
|
||||
if (r)
|
||||
DRM_ERROR("Error scheduling IBs (%d)\n", r);
|
||||
|
||||
|
|
|
@ -60,6 +60,9 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
|
|||
if (adev->rmmio == NULL)
|
||||
goto done_free;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
amdgpu_virt_request_full_gpu(adev, false);
|
||||
|
||||
if (amdgpu_device_is_px(dev)) {
|
||||
pm_runtime_get_sync(dev->dev);
|
||||
pm_runtime_forbid(dev->dev);
|
||||
|
@ -138,6 +141,9 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
|
|||
pm_runtime_put_autosuspend(dev->dev);
|
||||
}
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
amdgpu_virt_release_full_gpu(adev, true);
|
||||
|
||||
out:
|
||||
if (r) {
|
||||
/* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
|
||||
|
@ -569,6 +575,27 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
case AMDGPU_INFO_NUM_HANDLES: {
|
||||
struct drm_amdgpu_info_num_handles handle;
|
||||
|
||||
switch (info->query_hw_ip.type) {
|
||||
case AMDGPU_HW_IP_UVD:
|
||||
/* Starting Polaris, we support unlimited UVD handles */
|
||||
if (adev->asic_type < CHIP_POLARIS10) {
|
||||
handle.uvd_max_handles = adev->uvd.max_handles;
|
||||
handle.uvd_used_handles = amdgpu_uvd_used_handles(adev);
|
||||
|
||||
return copy_to_user(out, &handle,
|
||||
min((size_t)size, sizeof(handle))) ? -EFAULT : 0;
|
||||
} else {
|
||||
return -ENODATA;
|
||||
}
|
||||
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
default:
|
||||
DRM_DEBUG_KMS("Invalid request %d\n", info->query);
|
||||
return -EINVAL;
|
||||
|
@ -628,6 +655,12 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
|
|||
goto out_suspend;
|
||||
}
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
r = amdgpu_map_static_csa(adev, &fpriv->vm);
|
||||
if (r)
|
||||
goto out_suspend;
|
||||
}
|
||||
|
||||
mutex_init(&fpriv->bo_list_lock);
|
||||
idr_init(&fpriv->bo_list_handles);
|
||||
|
||||
|
@ -666,6 +699,14 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
|
|||
amdgpu_uvd_free_handles(adev, file_priv);
|
||||
amdgpu_vce_free_handles(adev, file_priv);
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
/* TODO: how to handle reserve failure */
|
||||
BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, false));
|
||||
amdgpu_vm_bo_rmv(adev, fpriv->vm.csa_bo_va);
|
||||
fpriv->vm.csa_bo_va = NULL;
|
||||
amdgpu_bo_unreserve(adev->virt.csa_obj);
|
||||
}
|
||||
|
||||
amdgpu_vm_fini(adev, &fpriv->vm);
|
||||
|
||||
idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
|
||||
|
|
|
@ -595,6 +595,21 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
|
|||
struct drm_framebuffer *fb,
|
||||
struct drm_pending_vblank_event *event,
|
||||
uint32_t page_flip_flags, uint32_t target);
|
||||
void amdgpu_crtc_cleanup_flip_ctx(struct amdgpu_flip_work *work,
|
||||
struct amdgpu_bo *new_abo);
|
||||
int amdgpu_crtc_prepare_flip(struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_pending_vblank_event *event,
|
||||
uint32_t page_flip_flags,
|
||||
uint32_t target,
|
||||
struct amdgpu_flip_work **work,
|
||||
struct amdgpu_bo **new_abo);
|
||||
|
||||
void amdgpu_crtc_submit_flip(struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
struct amdgpu_flip_work *work,
|
||||
struct amdgpu_bo *new_abo);
|
||||
|
||||
extern const struct drm_mode_config_funcs amdgpu_mode_funcs;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -363,11 +363,31 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
|
|||
|
||||
bo->flags = flags;
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
|
||||
* See https://bugs.freedesktop.org/show_bug.cgi?id=84627
|
||||
*/
|
||||
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
|
||||
#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
|
||||
/* Don't try to enable write-combining when it can't work, or things
|
||||
* may be slow
|
||||
* See https://bugs.freedesktop.org/show_bug.cgi?id=88758
|
||||
*/
|
||||
|
||||
#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
|
||||
thanks to write-combining
|
||||
|
||||
if (bo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
|
||||
DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
|
||||
"better performance thanks to write-combining\n");
|
||||
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
|
||||
#else
|
||||
/* For architectures that don't support WC memory,
|
||||
* mask out the WC flag from the BO
|
||||
*/
|
||||
if (!drm_arch_can_wc_memory())
|
||||
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
|
||||
#endif
|
||||
|
||||
amdgpu_fill_placement_to_bo(bo, placement);
|
||||
/* Kernel allocation are uninterruptible */
|
||||
|
@ -386,6 +406,11 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
|
|||
if (unlikely(r != 0))
|
||||
return r;
|
||||
|
||||
bo->tbo.priority = ilog2(bo->tbo.num_pages);
|
||||
if (kernel)
|
||||
bo->tbo.priority *= 2;
|
||||
bo->tbo.priority = min(bo->tbo.priority, (unsigned)(TTM_MAX_BO_PRIORITY - 1));
|
||||
|
||||
if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
|
||||
bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
|
||||
struct dma_fence *fence;
|
||||
|
@ -408,7 +433,8 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
|
|||
return 0;
|
||||
|
||||
fail_unreserve:
|
||||
ww_mutex_unlock(&bo->tbo.resv->lock);
|
||||
if (!resv)
|
||||
ww_mutex_unlock(&bo->tbo.resv->lock);
|
||||
amdgpu_bo_unref(&bo);
|
||||
return r;
|
||||
}
|
||||
|
@ -472,7 +498,16 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
|
|||
return r;
|
||||
|
||||
if (amdgpu_need_backup(adev) && (flags & AMDGPU_GEM_CREATE_SHADOW)) {
|
||||
if (!resv) {
|
||||
r = ww_mutex_lock(&(*bo_ptr)->tbo.resv->lock, NULL);
|
||||
WARN_ON(r != 0);
|
||||
}
|
||||
|
||||
r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr));
|
||||
|
||||
if (!resv)
|
||||
ww_mutex_unlock(&(*bo_ptr)->tbo.resv->lock);
|
||||
|
||||
if (r)
|
||||
amdgpu_bo_unref(bo_ptr);
|
||||
}
|
||||
|
@ -849,6 +884,7 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
|
|||
}
|
||||
|
||||
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
|
||||
bool evict,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
|
||||
|
@ -861,6 +897,10 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
|
|||
abo = container_of(bo, struct amdgpu_bo, tbo);
|
||||
amdgpu_vm_bo_invalidate(adev, abo);
|
||||
|
||||
/* remember the eviction */
|
||||
if (evict)
|
||||
atomic64_inc(&adev->num_evictions);
|
||||
|
||||
/* update statistics */
|
||||
if (!new_mem)
|
||||
return;
|
||||
|
|
|
@ -114,6 +114,15 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
|
|||
return drm_vma_node_offset_addr(&bo->tbo.vma_node);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_bo_gpu_accessible - return whether the bo is currently in memory that
|
||||
* is accessible to the GPU.
|
||||
*/
|
||||
static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo)
|
||||
{
|
||||
return bo->tbo.mem.mem_type != TTM_PL_SYSTEM;
|
||||
}
|
||||
|
||||
int amdgpu_bo_create(struct amdgpu_device *adev,
|
||||
unsigned long size, int byte_align,
|
||||
bool kernel, u32 domain, u64 flags,
|
||||
|
@ -155,7 +164,8 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
|
|||
size_t buffer_size, uint32_t *metadata_size,
|
||||
uint64_t *flags);
|
||||
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *new_mem);
|
||||
bool evict,
|
||||
struct ttm_mem_reg *new_mem);
|
||||
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
|
||||
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
|
||||
bool shared);
|
||||
|
|
|
@ -34,6 +34,28 @@
|
|||
|
||||
static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
|
||||
|
||||
static const struct cg_flag_name clocks[] = {
|
||||
{AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
|
||||
{AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
|
||||
{AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
|
||||
{AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
|
||||
{AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
|
||||
{AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
|
||||
{AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
|
||||
{AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
|
||||
{AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
|
||||
{AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
|
||||
{AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
|
||||
{AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
|
||||
{AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
|
||||
{AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
|
||||
{AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
|
||||
{AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
|
||||
{AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
|
||||
{AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
|
||||
{0, NULL},
|
||||
};
|
||||
|
||||
void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->pp_enabled)
|
||||
|
@ -112,28 +134,23 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
|
|||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
enum amd_dpm_forced_level level;
|
||||
|
||||
if ((adev->flags & AMD_IS_PX) &&
|
||||
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
|
||||
return snprintf(buf, PAGE_SIZE, "off\n");
|
||||
|
||||
if (adev->pp_enabled) {
|
||||
enum amd_dpm_forced_level level;
|
||||
|
||||
level = amdgpu_dpm_get_performance_level(adev);
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n",
|
||||
(level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
|
||||
(level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
|
||||
(level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
|
||||
(level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : "unknown");
|
||||
} else {
|
||||
enum amdgpu_dpm_forced_level level;
|
||||
|
||||
level = adev->pm.dpm.forced_level;
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n",
|
||||
(level == AMDGPU_DPM_FORCED_LEVEL_AUTO) ? "auto" :
|
||||
(level == AMDGPU_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
|
||||
}
|
||||
level = amdgpu_dpm_get_performance_level(adev);
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n",
|
||||
(level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
|
||||
(level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
|
||||
(level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
|
||||
(level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
|
||||
(level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
|
||||
(level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
|
||||
(level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
|
||||
(level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
|
||||
"unknown");
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
|
||||
|
@ -143,7 +160,8 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
|
|||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
enum amdgpu_dpm_forced_level level;
|
||||
enum amd_dpm_forced_level level;
|
||||
enum amd_dpm_forced_level current_level;
|
||||
int ret = 0;
|
||||
|
||||
/* Can't force performance level when the card is off */
|
||||
|
@ -151,19 +169,34 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
|
|||
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
|
||||
return -EINVAL;
|
||||
|
||||
current_level = amdgpu_dpm_get_performance_level(adev);
|
||||
|
||||
if (strncmp("low", buf, strlen("low")) == 0) {
|
||||
level = AMDGPU_DPM_FORCED_LEVEL_LOW;
|
||||
level = AMD_DPM_FORCED_LEVEL_LOW;
|
||||
} else if (strncmp("high", buf, strlen("high")) == 0) {
|
||||
level = AMDGPU_DPM_FORCED_LEVEL_HIGH;
|
||||
level = AMD_DPM_FORCED_LEVEL_HIGH;
|
||||
} else if (strncmp("auto", buf, strlen("auto")) == 0) {
|
||||
level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
|
||||
level = AMD_DPM_FORCED_LEVEL_AUTO;
|
||||
} else if (strncmp("manual", buf, strlen("manual")) == 0) {
|
||||
level = AMDGPU_DPM_FORCED_LEVEL_MANUAL;
|
||||
} else {
|
||||
level = AMD_DPM_FORCED_LEVEL_MANUAL;
|
||||
} else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
|
||||
level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
|
||||
} else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
|
||||
level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
|
||||
} else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
|
||||
level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
|
||||
} else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
|
||||
level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
|
||||
} else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
|
||||
level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
|
||||
} else {
|
||||
count = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (current_level == level)
|
||||
return count;
|
||||
|
||||
if (adev->pp_enabled)
|
||||
amdgpu_dpm_force_performance_level(adev, level);
|
||||
else {
|
||||
|
@ -180,6 +213,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
|
|||
adev->pm.dpm.forced_level = level;
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
}
|
||||
|
||||
fail:
|
||||
return count;
|
||||
}
|
||||
|
@ -1060,9 +1094,9 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
|
|||
|
||||
if (adev->pm.funcs->force_performance_level) {
|
||||
if (adev->pm.dpm.thermal_active) {
|
||||
enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
|
||||
enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
|
||||
/* force low perf level for thermal */
|
||||
amdgpu_dpm_force_performance_level(adev, AMDGPU_DPM_FORCED_LEVEL_LOW);
|
||||
amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
|
||||
/* save the user's level */
|
||||
adev->pm.dpm.forced_level = level;
|
||||
} else {
|
||||
|
@ -1351,12 +1385,27 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; clocks[i].flag; i++)
|
||||
seq_printf(m, "\t%s: %s\n", clocks[i].name,
|
||||
(flags & clocks[i].flag) ? "On" : "Off");
|
||||
}
|
||||
|
||||
static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct drm_device *ddev = adev->ddev;
|
||||
u32 flags = 0;
|
||||
|
||||
amdgpu_get_clockgating_state(adev, &flags);
|
||||
seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
|
||||
amdgpu_parse_cg_state(m, flags);
|
||||
seq_printf(m, "\n");
|
||||
|
||||
if (!adev->pm.dpm_enabled) {
|
||||
seq_printf(m, "dpm not enabled\n");
|
||||
|
|
|
@ -24,6 +24,12 @@
|
|||
#ifndef __AMDGPU_PM_H__
|
||||
#define __AMDGPU_PM_H__
|
||||
|
||||
struct cg_flag_name
|
||||
{
|
||||
u32 flag;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev);
|
||||
void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev);
|
||||
void amdgpu_pm_print_power_states(struct amdgpu_device *adev);
|
||||
|
|
|
@ -34,67 +34,34 @@
|
|||
#include "cik_dpm.h"
|
||||
#include "vi_dpm.h"
|
||||
|
||||
static int amdgpu_powerplay_init(struct amdgpu_device *adev)
|
||||
static int amdgpu_create_pp_handle(struct amdgpu_device *adev)
|
||||
{
|
||||
int ret = 0;
|
||||
struct amd_pp_init pp_init;
|
||||
struct amd_powerplay *amd_pp;
|
||||
int ret;
|
||||
|
||||
amd_pp = &(adev->powerplay);
|
||||
|
||||
if (adev->pp_enabled) {
|
||||
struct amd_pp_init *pp_init;
|
||||
|
||||
pp_init = kzalloc(sizeof(struct amd_pp_init), GFP_KERNEL);
|
||||
|
||||
if (pp_init == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
pp_init->chip_family = adev->family;
|
||||
pp_init->chip_id = adev->asic_type;
|
||||
pp_init->device = amdgpu_cgs_create_device(adev);
|
||||
ret = amd_powerplay_init(pp_init, amd_pp);
|
||||
kfree(pp_init);
|
||||
} else {
|
||||
amd_pp->pp_handle = (void *)adev;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||
case CHIP_TAHITI:
|
||||
case CHIP_PITCAIRN:
|
||||
case CHIP_VERDE:
|
||||
case CHIP_OLAND:
|
||||
case CHIP_HAINAN:
|
||||
amd_pp->ip_funcs = &si_dpm_ip_funcs;
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||
case CHIP_BONAIRE:
|
||||
case CHIP_HAWAII:
|
||||
amd_pp->ip_funcs = &ci_dpm_ip_funcs;
|
||||
break;
|
||||
case CHIP_KABINI:
|
||||
case CHIP_MULLINS:
|
||||
case CHIP_KAVERI:
|
||||
amd_pp->ip_funcs = &kv_dpm_ip_funcs;
|
||||
break;
|
||||
#endif
|
||||
case CHIP_CARRIZO:
|
||||
case CHIP_STONEY:
|
||||
amd_pp->ip_funcs = &cz_dpm_ip_funcs;
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
pp_init.chip_family = adev->family;
|
||||
pp_init.chip_id = adev->asic_type;
|
||||
pp_init.pm_en = amdgpu_dpm != 0 ? true : false;
|
||||
pp_init.feature_mask = amdgpu_pp_feature_mask;
|
||||
pp_init.device = amdgpu_cgs_create_device(adev);
|
||||
ret = amd_powerplay_create(&pp_init, &(amd_pp->pp_handle));
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_pp_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amd_powerplay *amd_pp;
|
||||
int ret = 0;
|
||||
|
||||
amd_pp = &(adev->powerplay);
|
||||
adev->pp_enabled = false;
|
||||
amd_pp->pp_handle = (void *)adev;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS10:
|
||||
|
@ -102,30 +69,48 @@ static int amdgpu_pp_early_init(void *handle)
|
|||
case CHIP_TONGA:
|
||||
case CHIP_FIJI:
|
||||
case CHIP_TOPAZ:
|
||||
adev->pp_enabled = true;
|
||||
break;
|
||||
case CHIP_CARRIZO:
|
||||
case CHIP_STONEY:
|
||||
adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true;
|
||||
adev->pp_enabled = true;
|
||||
if (amdgpu_create_pp_handle(adev))
|
||||
return -EINVAL;
|
||||
amd_pp->ip_funcs = &pp_ip_funcs;
|
||||
amd_pp->pp_funcs = &pp_dpm_funcs;
|
||||
break;
|
||||
/* These chips don't have powerplay implemenations */
|
||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||
case CHIP_TAHITI:
|
||||
case CHIP_PITCAIRN:
|
||||
case CHIP_VERDE:
|
||||
case CHIP_OLAND:
|
||||
case CHIP_HAINAN:
|
||||
amd_pp->ip_funcs = &si_dpm_ip_funcs;
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||
case CHIP_BONAIRE:
|
||||
case CHIP_HAWAII:
|
||||
amd_pp->ip_funcs = &ci_dpm_ip_funcs;
|
||||
break;
|
||||
case CHIP_KABINI:
|
||||
case CHIP_MULLINS:
|
||||
case CHIP_KAVERI:
|
||||
amd_pp->ip_funcs = &kv_dpm_ip_funcs;
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
adev->pp_enabled = false;
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
ret = amdgpu_powerplay_init(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (adev->powerplay.ip_funcs->early_init)
|
||||
ret = adev->powerplay.ip_funcs->early_init(
|
||||
adev->powerplay.pp_handle);
|
||||
|
||||
if (ret == PP_DPM_DISABLED) {
|
||||
adev->pm.dpm_enabled = false;
|
||||
return 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -185,6 +170,11 @@ static int amdgpu_pp_hw_init(void *handle)
|
|||
ret = adev->powerplay.ip_funcs->hw_init(
|
||||
adev->powerplay.pp_handle);
|
||||
|
||||
if (ret == PP_DPM_DISABLED) {
|
||||
adev->pm.dpm_enabled = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if ((amdgpu_dpm != 0) && !amdgpu_sriov_vf(adev))
|
||||
adev->pm.dpm_enabled = true;
|
||||
|
||||
|
@ -210,14 +200,14 @@ static void amdgpu_pp_late_fini(void *handle)
|
|||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->pp_enabled) {
|
||||
amdgpu_pm_sysfs_fini(adev);
|
||||
amd_powerplay_fini(adev->powerplay.pp_handle);
|
||||
}
|
||||
|
||||
if (adev->powerplay.ip_funcs->late_fini)
|
||||
adev->powerplay.ip_funcs->late_fini(
|
||||
adev->powerplay.pp_handle);
|
||||
|
||||
if (adev->pp_enabled && adev->pm.dpm_enabled)
|
||||
amdgpu_pm_sysfs_fini(adev);
|
||||
|
||||
amd_powerplay_destroy(adev->powerplay.pp_handle);
|
||||
}
|
||||
|
||||
static int amdgpu_pp_suspend(void *handle)
|
||||
|
|
|
@ -207,6 +207,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
|||
}
|
||||
ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4);
|
||||
ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs];
|
||||
/* always set cond_exec_polling to CONTINUE */
|
||||
*ring->cond_exe_cpu_addr = 1;
|
||||
|
||||
r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
|
||||
if (r) {
|
||||
|
@ -307,7 +309,7 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
|
|||
while (size) {
|
||||
if (*pos >= (ring->ring_size + 12))
|
||||
return result;
|
||||
|
||||
|
||||
value = ring->ring[(*pos - 12)/4];
|
||||
r = put_user(value, (uint32_t*)buf);
|
||||
if (r)
|
||||
|
|
|
@ -135,6 +135,8 @@ struct amdgpu_ring_funcs {
|
|||
void (*end_use)(struct amdgpu_ring *ring);
|
||||
void (*emit_switch_buffer) (struct amdgpu_ring *ring);
|
||||
void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
|
||||
void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg);
|
||||
void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
|
||||
};
|
||||
|
||||
struct amdgpu_ring {
|
||||
|
|
|
@ -24,7 +24,7 @@ TRACE_EVENT(amdgpu_mm_rreg,
|
|||
__entry->reg = reg;
|
||||
__entry->value = value;
|
||||
),
|
||||
TP_printk("0x%04lx, 0x%04lx, 0x%08lx",
|
||||
TP_printk("0x%04lx, 0x%08lx, 0x%08lx",
|
||||
(unsigned long)__entry->did,
|
||||
(unsigned long)__entry->reg,
|
||||
(unsigned long)__entry->value)
|
||||
|
@ -43,7 +43,7 @@ TRACE_EVENT(amdgpu_mm_wreg,
|
|||
__entry->reg = reg;
|
||||
__entry->value = value;
|
||||
),
|
||||
TP_printk("0x%04lx, 0x%04lx, 0x%08lx",
|
||||
TP_printk("0x%04lx, 0x%08lx, 0x%08lx",
|
||||
(unsigned long)__entry->did,
|
||||
(unsigned long)__entry->reg,
|
||||
(unsigned long)__entry->value)
|
||||
|
|
|
@ -466,10 +466,6 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
|
|||
|
||||
adev = amdgpu_ttm_adev(bo->bdev);
|
||||
|
||||
/* remember the eviction */
|
||||
if (evict)
|
||||
atomic64_inc(&adev->num_evictions);
|
||||
|
||||
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
|
||||
amdgpu_move_null(bo, new_mem);
|
||||
return 0;
|
||||
|
@ -552,6 +548,8 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
|
|||
mem->bus.addr =
|
||||
ioremap_nocache(mem->bus.base + mem->bus.offset,
|
||||
mem->bus.size);
|
||||
if (!mem->bus.addr)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* Alpha: Use just the bus offset plus
|
||||
|
@ -1052,56 +1050,6 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
|
|||
return flags;
|
||||
}
|
||||
|
||||
static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
|
||||
unsigned i, j;
|
||||
|
||||
for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) {
|
||||
struct amdgpu_mman_lru *lru = &adev->mman.log2_size[i];
|
||||
|
||||
for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
|
||||
if (&tbo->lru == lru->lru[j])
|
||||
lru->lru[j] = tbo->lru.prev;
|
||||
|
||||
if (&tbo->swap == lru->swap_lru)
|
||||
lru->swap_lru = tbo->swap.prev;
|
||||
}
|
||||
}
|
||||
|
||||
static struct amdgpu_mman_lru *amdgpu_ttm_lru(struct ttm_buffer_object *tbo)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
|
||||
unsigned log2_size = min(ilog2(tbo->num_pages),
|
||||
AMDGPU_TTM_LRU_SIZE - 1);
|
||||
|
||||
return &adev->mman.log2_size[log2_size];
|
||||
}
|
||||
|
||||
static struct list_head *amdgpu_ttm_lru_tail(struct ttm_buffer_object *tbo)
|
||||
{
|
||||
struct amdgpu_mman_lru *lru = amdgpu_ttm_lru(tbo);
|
||||
struct list_head *res = lru->lru[tbo->mem.mem_type];
|
||||
|
||||
lru->lru[tbo->mem.mem_type] = &tbo->lru;
|
||||
while ((++lru)->lru[tbo->mem.mem_type] == res)
|
||||
lru->lru[tbo->mem.mem_type] = &tbo->lru;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo)
|
||||
{
|
||||
struct amdgpu_mman_lru *lru = amdgpu_ttm_lru(tbo);
|
||||
struct list_head *res = lru->swap_lru;
|
||||
|
||||
lru->swap_lru = &tbo->swap;
|
||||
while ((++lru)->swap_lru == res)
|
||||
lru->swap_lru = &tbo->swap;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
|
||||
const struct ttm_place *place)
|
||||
{
|
||||
|
@ -1140,14 +1088,10 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
|
|||
.fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
|
||||
.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
|
||||
.io_mem_free = &amdgpu_ttm_io_mem_free,
|
||||
.lru_removal = &amdgpu_ttm_lru_removal,
|
||||
.lru_tail = &amdgpu_ttm_lru_tail,
|
||||
.swap_lru_tail = &amdgpu_ttm_swap_lru_tail,
|
||||
};
|
||||
|
||||
int amdgpu_ttm_init(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned i, j;
|
||||
int r;
|
||||
|
||||
r = amdgpu_ttm_global_init(adev);
|
||||
|
@ -1165,19 +1109,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
|||
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) {
|
||||
struct amdgpu_mman_lru *lru = &adev->mman.log2_size[i];
|
||||
|
||||
for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
|
||||
lru->lru[j] = &adev->mman.bdev.man[j].lru;
|
||||
lru->swap_lru = &adev->mman.bdev.glob->swap_lru;
|
||||
}
|
||||
|
||||
for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
|
||||
adev->mman.guard.lru[j] = NULL;
|
||||
adev->mman.guard.swap_lru = NULL;
|
||||
|
||||
adev->mman.initialized = true;
|
||||
r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
|
||||
adev->mc.real_vram_size >> PAGE_SHIFT);
|
||||
|
@ -1365,7 +1296,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
|
|||
WARN_ON(job->ibs[0].length_dw > num_dw);
|
||||
if (direct_submit) {
|
||||
r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
|
||||
NULL, NULL, fence);
|
||||
NULL, fence);
|
||||
job->fence = dma_fence_get(*fence);
|
||||
if (r)
|
||||
DRM_ERROR("Error scheduling IBs (%d)\n", r);
|
||||
|
|
|
@ -34,13 +34,6 @@
|
|||
#define AMDGPU_PL_FLAG_GWS (TTM_PL_FLAG_PRIV << 1)
|
||||
#define AMDGPU_PL_FLAG_OA (TTM_PL_FLAG_PRIV << 2)
|
||||
|
||||
#define AMDGPU_TTM_LRU_SIZE 20
|
||||
|
||||
struct amdgpu_mman_lru {
|
||||
struct list_head *lru[TTM_NUM_MEM_TYPES];
|
||||
struct list_head *swap_lru;
|
||||
};
|
||||
|
||||
struct amdgpu_mman {
|
||||
struct ttm_bo_global_ref bo_global_ref;
|
||||
struct drm_global_reference mem_global_ref;
|
||||
|
@ -58,11 +51,6 @@ struct amdgpu_mman {
|
|||
struct amdgpu_ring *buffer_funcs_ring;
|
||||
/* Scheduler entity for buffer moves */
|
||||
struct amd_sched_entity entity;
|
||||
|
||||
/* custom LRU management */
|
||||
struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE];
|
||||
/* guard for log2_size array, don't add anything in between */
|
||||
struct amdgpu_mman_lru guard;
|
||||
};
|
||||
|
||||
extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
|
||||
|
|
|
@ -976,7 +976,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
|
|||
ib->length_dw = 16;
|
||||
|
||||
if (direct) {
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
job->fence = dma_fence_get(f);
|
||||
if (r)
|
||||
goto err_free;
|
||||
|
@ -1178,3 +1178,28 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|||
error:
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_uvd_used_handles - returns used UVD handles
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Returns the number of UVD handles in use
|
||||
*/
|
||||
uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned i;
|
||||
uint32_t used_handles = 0;
|
||||
|
||||
for (i = 0; i < adev->uvd.max_handles; ++i) {
|
||||
/*
|
||||
* Handles can be freed in any order, and not
|
||||
* necessarily linear. So we need to count
|
||||
* all non-zero handles.
|
||||
*/
|
||||
if (atomic_read(&adev->uvd.handles[i]))
|
||||
used_handles++;
|
||||
}
|
||||
|
||||
return used_handles;
|
||||
}
|
||||
|
|
|
@ -38,5 +38,6 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx);
|
|||
void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring);
|
||||
void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring);
|
||||
int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout);
|
||||
uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -455,7 +455,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|||
for (i = ib->length_dw; i < ib_size_dw; ++i)
|
||||
ib->ptr[i] = 0x0;
|
||||
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
job->fence = dma_fence_get(f);
|
||||
if (r)
|
||||
goto err;
|
||||
|
@ -518,7 +518,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|||
ib->ptr[i] = 0x0;
|
||||
|
||||
if (direct) {
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
|
||||
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
|
||||
job->fence = dma_fence_get(f);
|
||||
if (r)
|
||||
goto err;
|
||||
|
|
|
@ -0,0 +1,222 @@
|
|||
/*
|
||||
* Copyright 2016 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "amdgpu.h"
|
||||
|
||||
int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
void *ptr;
|
||||
|
||||
r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj,
|
||||
&adev->virt.csa_vmid0_addr, &ptr);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
memset(ptr, 0, AMDGPU_CSA_SIZE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* amdgpu_map_static_csa should be called during amdgpu_vm_init
|
||||
* it maps virtual address "AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE"
|
||||
* to this VM, and each command submission of GFX should use this virtual
|
||||
* address within META_DATA init package to support SRIOV gfx preemption.
|
||||
*/
|
||||
|
||||
int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||
{
|
||||
int r;
|
||||
struct amdgpu_bo_va *bo_va;
|
||||
struct ww_acquire_ctx ticket;
|
||||
struct list_head list;
|
||||
struct amdgpu_bo_list_entry pd;
|
||||
struct ttm_validate_buffer csa_tv;
|
||||
|
||||
INIT_LIST_HEAD(&list);
|
||||
INIT_LIST_HEAD(&csa_tv.head);
|
||||
csa_tv.bo = &adev->virt.csa_obj->tbo;
|
||||
csa_tv.shared = true;
|
||||
|
||||
list_add(&csa_tv.head, &list);
|
||||
amdgpu_vm_get_pd_bo(vm, &list, &pd);
|
||||
|
||||
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
|
||||
if (r) {
|
||||
DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
|
||||
if (!bo_va) {
|
||||
ttm_eu_backoff_reservation(&ticket, &list);
|
||||
DRM_ERROR("failed to create bo_va for static CSA\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
r = amdgpu_vm_bo_map(adev, bo_va, AMDGPU_CSA_VADDR, 0,AMDGPU_CSA_SIZE,
|
||||
AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
|
||||
AMDGPU_PTE_EXECUTABLE);
|
||||
|
||||
if (r) {
|
||||
DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
|
||||
amdgpu_vm_bo_rmv(adev, bo_va);
|
||||
ttm_eu_backoff_reservation(&ticket, &list);
|
||||
kfree(bo_va);
|
||||
return r;
|
||||
}
|
||||
|
||||
vm->csa_bo_va = bo_va;
|
||||
ttm_eu_backoff_reservation(&ticket, &list);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void amdgpu_virt_init_setting(struct amdgpu_device *adev)
|
||||
{
|
||||
/* enable virtual display */
|
||||
adev->mode_info.num_crtc = 1;
|
||||
adev->enable_virtual_display = true;
|
||||
|
||||
mutex_init(&adev->virt.lock);
|
||||
}
|
||||
|
||||
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
|
||||
{
|
||||
signed long r;
|
||||
uint32_t val;
|
||||
struct dma_fence *f;
|
||||
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
|
||||
struct amdgpu_ring *ring = &kiq->ring;
|
||||
|
||||
BUG_ON(!ring->funcs->emit_rreg);
|
||||
|
||||
mutex_lock(&adev->virt.lock);
|
||||
amdgpu_ring_alloc(ring, 32);
|
||||
amdgpu_ring_emit_hdp_flush(ring);
|
||||
amdgpu_ring_emit_rreg(ring, reg);
|
||||
amdgpu_ring_emit_hdp_invalidate(ring);
|
||||
amdgpu_fence_emit(ring, &f);
|
||||
amdgpu_ring_commit(ring);
|
||||
mutex_unlock(&adev->virt.lock);
|
||||
|
||||
r = dma_fence_wait(f, false);
|
||||
if (r)
|
||||
DRM_ERROR("wait for kiq fence error: %ld.\n", r);
|
||||
dma_fence_put(f);
|
||||
|
||||
val = adev->wb.wb[adev->virt.reg_val_offs];
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
|
||||
{
|
||||
signed long r;
|
||||
struct dma_fence *f;
|
||||
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
|
||||
struct amdgpu_ring *ring = &kiq->ring;
|
||||
|
||||
BUG_ON(!ring->funcs->emit_wreg);
|
||||
|
||||
mutex_lock(&adev->virt.lock);
|
||||
amdgpu_ring_alloc(ring, 32);
|
||||
amdgpu_ring_emit_hdp_flush(ring);
|
||||
amdgpu_ring_emit_wreg(ring, reg, v);
|
||||
amdgpu_ring_emit_hdp_invalidate(ring);
|
||||
amdgpu_fence_emit(ring, &f);
|
||||
amdgpu_ring_commit(ring);
|
||||
mutex_unlock(&adev->virt.lock);
|
||||
|
||||
r = dma_fence_wait(f, false);
|
||||
if (r)
|
||||
DRM_ERROR("wait for kiq fence error: %ld.\n", r);
|
||||
dma_fence_put(f);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_virt_request_full_gpu() - request full gpu access
|
||||
* @amdgpu: amdgpu device.
|
||||
* @init: is driver init time.
|
||||
* When start to init/fini driver, first need to request full gpu access.
|
||||
* Return: Zero if request success, otherwise will return error.
|
||||
*/
|
||||
int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
|
||||
{
|
||||
struct amdgpu_virt *virt = &adev->virt;
|
||||
int r;
|
||||
|
||||
if (virt->ops && virt->ops->req_full_gpu) {
|
||||
r = virt->ops->req_full_gpu(adev, init);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_virt_release_full_gpu() - release full gpu access
|
||||
* @amdgpu: amdgpu device.
|
||||
* @init: is driver init time.
|
||||
* When finishing driver init/fini, need to release full gpu access.
|
||||
* Return: Zero if release success, otherwise will returen error.
|
||||
*/
|
||||
int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
|
||||
{
|
||||
struct amdgpu_virt *virt = &adev->virt;
|
||||
int r;
|
||||
|
||||
if (virt->ops && virt->ops->rel_full_gpu) {
|
||||
r = virt->ops->rel_full_gpu(adev, init);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_virt_reset_gpu() - reset gpu
|
||||
* @amdgpu: amdgpu device.
|
||||
* Send reset command to GPU hypervisor to reset GPU that VM is using
|
||||
* Return: Zero if reset success, otherwise will return error.
|
||||
*/
|
||||
int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_virt *virt = &adev->virt;
|
||||
int r;
|
||||
|
||||
if (virt->ops && virt->ops->reset_gpu) {
|
||||
r = virt->ops->reset_gpu(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -28,22 +28,48 @@
|
|||
#define AMDGPU_SRIOV_CAPS_ENABLE_IOV (1 << 1) /* sr-iov is enabled on this GPU */
|
||||
#define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */
|
||||
#define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */
|
||||
/* GPU virtualization */
|
||||
struct amdgpu_virtualization {
|
||||
uint32_t virtual_caps;
|
||||
#define AMDGPU_SRIOV_CAPS_RUNTIME (1 << 4) /* is out of full access mode */
|
||||
|
||||
/**
|
||||
* struct amdgpu_virt_ops - amdgpu device virt operations
|
||||
*/
|
||||
struct amdgpu_virt_ops {
|
||||
int (*req_full_gpu)(struct amdgpu_device *adev, bool init);
|
||||
int (*rel_full_gpu)(struct amdgpu_device *adev, bool init);
|
||||
int (*reset_gpu)(struct amdgpu_device *adev);
|
||||
};
|
||||
|
||||
/* GPU virtualization */
|
||||
struct amdgpu_virt {
|
||||
uint32_t caps;
|
||||
struct amdgpu_bo *csa_obj;
|
||||
uint64_t csa_vmid0_addr;
|
||||
bool chained_ib_support;
|
||||
uint32_t reg_val_offs;
|
||||
struct mutex lock;
|
||||
struct amdgpu_irq_src ack_irq;
|
||||
struct amdgpu_irq_src rcv_irq;
|
||||
struct delayed_work flr_work;
|
||||
const struct amdgpu_virt_ops *ops;
|
||||
};
|
||||
|
||||
#define AMDGPU_CSA_SIZE (8 * 1024)
|
||||
#define AMDGPU_CSA_VADDR (AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE)
|
||||
|
||||
#define amdgpu_sriov_enabled(adev) \
|
||||
((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV)
|
||||
((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV)
|
||||
|
||||
#define amdgpu_sriov_vf(adev) \
|
||||
((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_IS_VF)
|
||||
((adev)->virt.caps & AMDGPU_SRIOV_CAPS_IS_VF)
|
||||
|
||||
#define amdgpu_sriov_bios(adev) \
|
||||
((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS)
|
||||
((adev)->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS)
|
||||
|
||||
#define amdgpu_sriov_runtime(adev) \
|
||||
((adev)->virt.caps & AMDGPU_SRIOV_CAPS_RUNTIME)
|
||||
|
||||
#define amdgpu_passthrough(adev) \
|
||||
((adev)->virtualization.virtual_caps & AMDGPU_PASSTHROUGH_MODE)
|
||||
((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE)
|
||||
|
||||
static inline bool is_virtual_machine(void)
|
||||
{
|
||||
|
@ -54,4 +80,14 @@ static inline bool is_virtual_machine(void)
|
|||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
struct amdgpu_vm;
|
||||
int amdgpu_allocate_static_csa(struct amdgpu_device *adev);
|
||||
int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
|
||||
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
|
||||
void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
|
||||
int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
|
||||
int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
|
||||
int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1293,7 +1293,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
|
|||
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va,
|
||||
uint64_t saddr, uint64_t offset,
|
||||
uint64_t size, uint32_t flags)
|
||||
uint64_t size, uint64_t flags)
|
||||
{
|
||||
struct amdgpu_bo_va_mapping *mapping;
|
||||
struct amdgpu_vm *vm = bo_va->vm;
|
||||
|
|
|
@ -111,6 +111,8 @@ struct amdgpu_vm {
|
|||
|
||||
/* client id */
|
||||
u64 client_id;
|
||||
/* each VM will map on CSA */
|
||||
struct amdgpu_bo_va *csa_bo_va;
|
||||
};
|
||||
|
||||
struct amdgpu_vm_id {
|
||||
|
@ -195,7 +197,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
|
|||
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va,
|
||||
uint64_t addr, uint64_t offset,
|
||||
uint64_t size, uint32_t flags);
|
||||
uint64_t size, uint64_t flags);
|
||||
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va,
|
||||
uint64_t addr);
|
||||
|
|
|
@ -181,9 +181,6 @@ void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encode
|
|||
if (!amdgpu_encoder->enc_priv)
|
||||
return;
|
||||
|
||||
if (!adev->is_atom_bios)
|
||||
return;
|
||||
|
||||
if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
|
||||
return;
|
||||
|
||||
|
@ -236,9 +233,6 @@ amdgpu_atombios_encoder_fini_backlight(struct amdgpu_encoder *amdgpu_encoder)
|
|||
if (!amdgpu_encoder->enc_priv)
|
||||
return;
|
||||
|
||||
if (!adev->is_atom_bios)
|
||||
return;
|
||||
|
||||
if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
|
||||
return;
|
||||
|
||||
|
|
|
@ -889,7 +889,16 @@ static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
|
|||
|
||||
pi->uvd_power_gated = gate;
|
||||
|
||||
ci_update_uvd_dpm(adev, gate);
|
||||
if (gate) {
|
||||
/* stop the UVD block */
|
||||
amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_PG_STATE_GATE);
|
||||
ci_update_uvd_dpm(adev, gate);
|
||||
} else {
|
||||
amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
ci_update_uvd_dpm(adev, gate);
|
||||
}
|
||||
}
|
||||
|
||||
static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
|
||||
|
@ -4336,13 +4345,13 @@ static u32 ci_get_lowest_enabled_level(struct amdgpu_device *adev,
|
|||
|
||||
|
||||
static int ci_dpm_force_performance_level(struct amdgpu_device *adev,
|
||||
enum amdgpu_dpm_forced_level level)
|
||||
enum amd_dpm_forced_level level)
|
||||
{
|
||||
struct ci_power_info *pi = ci_get_pi(adev);
|
||||
u32 tmp, levels, i;
|
||||
int ret;
|
||||
|
||||
if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) {
|
||||
if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
|
||||
if ((!pi->pcie_dpm_key_disabled) &&
|
||||
pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
|
||||
levels = 0;
|
||||
|
@ -4403,7 +4412,7 @@ static int ci_dpm_force_performance_level(struct amdgpu_device *adev,
|
|||
}
|
||||
}
|
||||
}
|
||||
} else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) {
|
||||
} else if (level == AMD_DPM_FORCED_LEVEL_LOW) {
|
||||
if ((!pi->sclk_dpm_key_disabled) &&
|
||||
pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
|
||||
levels = ci_get_lowest_enabled_level(adev,
|
||||
|
@ -4452,7 +4461,7 @@ static int ci_dpm_force_performance_level(struct amdgpu_device *adev,
|
|||
udelay(1);
|
||||
}
|
||||
}
|
||||
} else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) {
|
||||
} else if (level == AMD_DPM_FORCED_LEVEL_AUTO) {
|
||||
if (!pi->pcie_dpm_key_disabled) {
|
||||
PPSMC_Result smc_result;
|
||||
|
||||
|
@ -6262,7 +6271,7 @@ static int ci_dpm_sw_init(void *handle)
|
|||
/* default to balanced state */
|
||||
adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
|
||||
adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
|
||||
adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
|
||||
adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO;
|
||||
adev->pm.default_sclk = adev->clock.default_sclk;
|
||||
adev->pm.default_mclk = adev->clock.default_mclk;
|
||||
adev->pm.current_sclk = adev->clock.default_sclk;
|
||||
|
@ -6571,8 +6580,9 @@ static int ci_dpm_force_clock_level(struct amdgpu_device *adev,
|
|||
{
|
||||
struct ci_power_info *pi = ci_get_pi(adev);
|
||||
|
||||
if (adev->pm.dpm.forced_level
|
||||
!= AMDGPU_DPM_FORCED_LEVEL_MANUAL)
|
||||
if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
|
||||
AMD_DPM_FORCED_LEVEL_LOW |
|
||||
AMD_DPM_FORCED_LEVEL_HIGH))
|
||||
return -EINVAL;
|
||||
|
||||
switch (type) {
|
||||
|
@ -6739,12 +6749,3 @@ static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev)
|
|||
adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
|
||||
adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version ci_dpm_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SMC,
|
||||
.major = 7,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &ci_dpm_ip_funcs,
|
||||
};
|
||||
|
|
|
@ -1627,14 +1627,13 @@ static uint32_t cik_get_rev_id(struct amdgpu_device *adev)
|
|||
static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
|
||||
{
|
||||
if (is_virtual_machine()) /* passthrough mode */
|
||||
adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
|
||||
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
|
||||
}
|
||||
|
||||
static const struct amdgpu_asic_funcs cik_asic_funcs =
|
||||
{
|
||||
.read_disabled_bios = &cik_read_disabled_bios,
|
||||
.read_bios_from_rom = &cik_read_bios_from_rom,
|
||||
.detect_hw_virtualization = cik_detect_hw_virtualization,
|
||||
.read_register = &cik_read_register,
|
||||
.reset = &cik_asic_reset,
|
||||
.set_vga_state = &cik_vga_set_state,
|
||||
|
@ -1890,6 +1889,8 @@ static const struct amdgpu_ip_block_version cik_common_ip_block =
|
|||
|
||||
int cik_set_ip_blocks(struct amdgpu_device *adev)
|
||||
{
|
||||
cik_detect_hw_virtualization(adev);
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_BONAIRE:
|
||||
amdgpu_ip_block_add(adev, &cik_common_ip_block);
|
||||
|
|
|
@ -651,7 +651,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|||
ib.ptr[3] = 1;
|
||||
ib.ptr[4] = 0xDEADBEEF;
|
||||
ib.length_dw = 5;
|
||||
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
|
||||
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
|
||||
if (r)
|
||||
goto err1;
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,239 +0,0 @@
|
|||
/*
|
||||
* Copyright 2014 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __CZ_DPM_H__
|
||||
#define __CZ_DPM_H__
|
||||
|
||||
#include "smu8_fusion.h"
|
||||
|
||||
#define CZ_AT_DFLT 30
|
||||
#define CZ_NUM_NBPSTATES 4
|
||||
#define CZ_NUM_NBPMEMORY_CLOCK 2
|
||||
#define CZ_MAX_HARDWARE_POWERLEVELS 8
|
||||
#define CZ_MAX_DISPLAY_CLOCK_LEVEL 8
|
||||
#define CZ_MAX_DISPLAYPHY_IDS 10
|
||||
|
||||
#define PPCZ_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102
|
||||
|
||||
#define SMC_RAM_END 0x40000
|
||||
|
||||
#define DPMFlags_SCLK_Enabled 0x00000001
|
||||
#define DPMFlags_UVD_Enabled 0x00000002
|
||||
#define DPMFlags_VCE_Enabled 0x00000004
|
||||
#define DPMFlags_ACP_Enabled 0x00000008
|
||||
#define DPMFlags_ForceHighestValid 0x40000000
|
||||
#define DPMFlags_Debug 0x80000000
|
||||
|
||||
/* Do not change the following, it is also defined in SMU8.h */
|
||||
#define SMU_EnabledFeatureScoreboard_AcpDpmOn 0x00000001
|
||||
#define SMU_EnabledFeatureScoreboard_SclkDpmOn 0x00200000
|
||||
#define SMU_EnabledFeatureScoreboard_UvdDpmOn 0x00800000
|
||||
#define SMU_EnabledFeatureScoreboard_VceDpmOn 0x01000000
|
||||
|
||||
/* temporary solution to SetMinDeepSleepSclk
|
||||
* should indicate by display adaptor
|
||||
* 10k Hz unit*/
|
||||
#define CZ_MIN_DEEP_SLEEP_SCLK 800
|
||||
|
||||
enum cz_pt_config_reg_type {
|
||||
CZ_CONFIGREG_MMR = 0,
|
||||
CZ_CONFIGREG_SMC_IND,
|
||||
CZ_CONFIGREG_DIDT_IND,
|
||||
CZ_CONFIGREG_CACHE,
|
||||
CZ_CONFIGREG_MAX
|
||||
};
|
||||
|
||||
struct cz_pt_config_reg {
|
||||
uint32_t offset;
|
||||
uint32_t mask;
|
||||
uint32_t shift;
|
||||
uint32_t value;
|
||||
enum cz_pt_config_reg_type type;
|
||||
};
|
||||
|
||||
struct cz_dpm_entry {
|
||||
uint32_t soft_min_clk;
|
||||
uint32_t hard_min_clk;
|
||||
uint32_t soft_max_clk;
|
||||
uint32_t hard_max_clk;
|
||||
};
|
||||
|
||||
struct cz_pl {
|
||||
uint32_t sclk;
|
||||
uint8_t vddc_index;
|
||||
uint8_t ds_divider_index;
|
||||
uint8_t ss_divider_index;
|
||||
uint8_t allow_gnb_slow;
|
||||
uint8_t force_nbp_state;
|
||||
uint8_t display_wm;
|
||||
uint8_t vce_wm;
|
||||
};
|
||||
|
||||
struct cz_ps {
|
||||
struct cz_pl levels[CZ_MAX_HARDWARE_POWERLEVELS];
|
||||
uint32_t num_levels;
|
||||
bool need_dfs_bypass;
|
||||
uint8_t dpm0_pg_nb_ps_lo;
|
||||
uint8_t dpm0_pg_nb_ps_hi;
|
||||
uint8_t dpmx_nb_ps_lo;
|
||||
uint8_t dpmx_nb_ps_hi;
|
||||
bool force_high;
|
||||
};
|
||||
|
||||
struct cz_displayphy_entry {
|
||||
uint8_t phy_present;
|
||||
uint8_t active_lane_mapping;
|
||||
uint8_t display_conf_type;
|
||||
uint8_t num_active_lanes;
|
||||
};
|
||||
|
||||
struct cz_displayphy_info {
|
||||
bool phy_access_initialized;
|
||||
struct cz_displayphy_entry entries[CZ_MAX_DISPLAYPHY_IDS];
|
||||
};
|
||||
|
||||
struct cz_sys_info {
|
||||
uint32_t bootup_uma_clk;
|
||||
uint32_t bootup_sclk;
|
||||
uint32_t dentist_vco_freq;
|
||||
uint32_t nb_dpm_enable;
|
||||
uint32_t nbp_memory_clock[CZ_NUM_NBPMEMORY_CLOCK];
|
||||
uint32_t nbp_n_clock[CZ_NUM_NBPSTATES];
|
||||
uint8_t nbp_voltage_index[CZ_NUM_NBPSTATES];
|
||||
uint32_t display_clock[CZ_MAX_DISPLAY_CLOCK_LEVEL];
|
||||
uint16_t bootup_nb_voltage_index;
|
||||
uint8_t htc_tmp_lmt;
|
||||
uint8_t htc_hyst_lmt;
|
||||
uint32_t uma_channel_number;
|
||||
};
|
||||
|
||||
struct cz_power_info {
|
||||
uint32_t active_target[CZ_MAX_HARDWARE_POWERLEVELS];
|
||||
struct cz_sys_info sys_info;
|
||||
struct cz_pl boot_pl;
|
||||
bool disable_nb_ps3_in_battery;
|
||||
bool battery_state;
|
||||
uint32_t lowest_valid;
|
||||
uint32_t highest_valid;
|
||||
uint16_t high_voltage_threshold;
|
||||
/* smc offsets */
|
||||
uint32_t sram_end;
|
||||
uint32_t dpm_table_start;
|
||||
uint32_t soft_regs_start;
|
||||
/* dpm SMU tables */
|
||||
uint8_t uvd_level_count;
|
||||
uint8_t vce_level_count;
|
||||
uint8_t acp_level_count;
|
||||
uint32_t fps_high_threshold;
|
||||
uint32_t fps_low_threshold;
|
||||
/* dpm table */
|
||||
uint32_t dpm_flags;
|
||||
struct cz_dpm_entry sclk_dpm;
|
||||
struct cz_dpm_entry uvd_dpm;
|
||||
struct cz_dpm_entry vce_dpm;
|
||||
struct cz_dpm_entry acp_dpm;
|
||||
|
||||
uint8_t uvd_boot_level;
|
||||
uint8_t uvd_interval;
|
||||
uint8_t vce_boot_level;
|
||||
uint8_t vce_interval;
|
||||
uint8_t acp_boot_level;
|
||||
uint8_t acp_interval;
|
||||
|
||||
uint8_t graphics_boot_level;
|
||||
uint8_t graphics_interval;
|
||||
uint8_t graphics_therm_throttle_enable;
|
||||
uint8_t graphics_voltage_change_enable;
|
||||
uint8_t graphics_clk_slow_enable;
|
||||
uint8_t graphics_clk_slow_divider;
|
||||
|
||||
uint32_t low_sclk_interrupt_threshold;
|
||||
bool uvd_power_gated;
|
||||
bool vce_power_gated;
|
||||
bool acp_power_gated;
|
||||
|
||||
uint32_t active_process_mask;
|
||||
|
||||
uint32_t mgcg_cgtt_local0;
|
||||
uint32_t mgcg_cgtt_local1;
|
||||
uint32_t clock_slow_down_step;
|
||||
uint32_t skip_clock_slow_down;
|
||||
bool enable_nb_ps_policy;
|
||||
uint32_t voting_clients;
|
||||
uint32_t voltage_drop_threshold;
|
||||
uint32_t gfx_pg_threshold;
|
||||
uint32_t max_sclk_level;
|
||||
uint32_t max_uvd_level;
|
||||
uint32_t max_vce_level;
|
||||
/* flags */
|
||||
bool didt_enabled;
|
||||
bool video_start;
|
||||
bool cac_enabled;
|
||||
bool bapm_enabled;
|
||||
bool nb_dpm_enabled_by_driver;
|
||||
bool nb_dpm_enabled;
|
||||
bool auto_thermal_throttling_enabled;
|
||||
bool dpm_enabled;
|
||||
bool need_pptable_upload;
|
||||
/* caps */
|
||||
bool caps_cac;
|
||||
bool caps_power_containment;
|
||||
bool caps_sq_ramping;
|
||||
bool caps_db_ramping;
|
||||
bool caps_td_ramping;
|
||||
bool caps_tcp_ramping;
|
||||
bool caps_sclk_throttle_low_notification;
|
||||
bool caps_fps;
|
||||
bool caps_uvd_dpm;
|
||||
bool caps_uvd_pg;
|
||||
bool caps_vce_dpm;
|
||||
bool caps_vce_pg;
|
||||
bool caps_acp_dpm;
|
||||
bool caps_acp_pg;
|
||||
bool caps_stable_power_state;
|
||||
bool caps_enable_dfs_bypass;
|
||||
bool caps_sclk_ds;
|
||||
bool caps_voltage_island;
|
||||
/* power state */
|
||||
struct amdgpu_ps current_rps;
|
||||
struct cz_ps current_ps;
|
||||
struct amdgpu_ps requested_rps;
|
||||
struct cz_ps requested_ps;
|
||||
|
||||
bool uvd_power_down;
|
||||
bool vce_power_down;
|
||||
bool acp_power_down;
|
||||
|
||||
bool uvd_dynamic_pg;
|
||||
};
|
||||
|
||||
/* cz_smc.c */
|
||||
uint32_t cz_get_argument(struct amdgpu_device *adev);
|
||||
int cz_send_msg_to_smc(struct amdgpu_device *adev, uint16_t msg);
|
||||
int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
|
||||
uint16_t msg, uint32_t parameter);
|
||||
int cz_read_smc_sram_dword(struct amdgpu_device *adev,
|
||||
uint32_t smc_address, uint32_t *value, uint32_t limit);
|
||||
int cz_smu_upload_pptable(struct amdgpu_device *adev);
|
||||
int cz_smu_download_pptable(struct amdgpu_device *adev, void **table);
|
||||
#endif
|
|
@ -1,995 +0,0 @@
|
|||
/*
|
||||
* Copyright 2014 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include <linux/firmware.h>
|
||||
#include "drmP.h"
|
||||
#include "amdgpu.h"
|
||||
#include "smu8.h"
|
||||
#include "smu8_fusion.h"
|
||||
#include "cz_ppsmc.h"
|
||||
#include "cz_smumgr.h"
|
||||
#include "smu_ucode_xfer_cz.h"
|
||||
#include "amdgpu_ucode.h"
|
||||
#include "cz_dpm.h"
|
||||
#include "vi_dpm.h"
|
||||
|
||||
#include "smu/smu_8_0_d.h"
|
||||
#include "smu/smu_8_0_sh_mask.h"
|
||||
#include "gca/gfx_8_0_d.h"
|
||||
#include "gca/gfx_8_0_sh_mask.h"
|
||||
|
||||
uint32_t cz_get_argument(struct amdgpu_device *adev)
|
||||
{
|
||||
return RREG32(mmSMU_MP1_SRBM2P_ARG_0);
|
||||
}
|
||||
|
||||
static struct cz_smu_private_data *cz_smu_get_priv(struct amdgpu_device *adev)
|
||||
{
|
||||
struct cz_smu_private_data *priv =
|
||||
(struct cz_smu_private_data *)(adev->smu.priv);
|
||||
|
||||
return priv;
|
||||
}
|
||||
|
||||
static int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg)
|
||||
{
|
||||
int i;
|
||||
u32 content = 0, tmp;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0),
|
||||
SMU_MP1_SRBM2P_RESP_0, CONTENT);
|
||||
if (content != tmp)
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
/* timeout means wrong logic*/
|
||||
if (i == adev->usec_timeout)
|
||||
return -EINVAL;
|
||||
|
||||
WREG32(mmSMU_MP1_SRBM2P_RESP_0, 0);
|
||||
WREG32(mmSMU_MP1_SRBM2P_MSG_0, msg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cz_send_msg_to_smc(struct amdgpu_device *adev, u16 msg)
|
||||
{
|
||||
int i;
|
||||
u32 content = 0, tmp = 0;
|
||||
|
||||
if (cz_send_msg_to_smc_async(adev, msg))
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0),
|
||||
SMU_MP1_SRBM2P_RESP_0, CONTENT);
|
||||
if (content != tmp)
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
/* timeout means wrong logic*/
|
||||
if (i == adev->usec_timeout)
|
||||
return -EINVAL;
|
||||
|
||||
if (PPSMC_Result_OK != tmp) {
|
||||
dev_err(adev->dev, "SMC Failed to send Message.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
|
||||
u16 msg, u32 parameter)
|
||||
{
|
||||
WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter);
|
||||
return cz_send_msg_to_smc(adev, msg);
|
||||
}
|
||||
|
||||
static int cz_set_smc_sram_address(struct amdgpu_device *adev,
|
||||
u32 smc_address, u32 limit)
|
||||
{
|
||||
if (smc_address & 3)
|
||||
return -EINVAL;
|
||||
if ((smc_address + 3) > limit)
|
||||
return -EINVAL;
|
||||
|
||||
WREG32(mmMP0PUB_IND_INDEX_0, SMN_MP1_SRAM_START_ADDR + smc_address);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cz_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
|
||||
u32 *value, u32 limit)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = cz_set_smc_sram_address(adev, smc_address, limit);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*value = RREG32(mmMP0PUB_IND_DATA_0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
|
||||
u32 value, u32 limit)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = cz_set_smc_sram_address(adev, smc_address, limit);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
WREG32(mmMP0PUB_IND_DATA_0, value);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cz_smu_request_load_fw(struct amdgpu_device *adev)
|
||||
{
|
||||
struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
|
||||
|
||||
uint32_t smc_addr = SMU8_FIRMWARE_HEADER_LOCATION +
|
||||
offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
|
||||
|
||||
cz_write_smc_sram_dword(adev, smc_addr, 0, smc_addr + 4);
|
||||
|
||||
/*prepare toc buffers*/
|
||||
cz_send_msg_to_smc_with_parameter(adev,
|
||||
PPSMC_MSG_DriverDramAddrHi,
|
||||
priv->toc_buffer.mc_addr_high);
|
||||
cz_send_msg_to_smc_with_parameter(adev,
|
||||
PPSMC_MSG_DriverDramAddrLo,
|
||||
priv->toc_buffer.mc_addr_low);
|
||||
cz_send_msg_to_smc(adev, PPSMC_MSG_InitJobs);
|
||||
|
||||
/*execute jobs*/
|
||||
cz_send_msg_to_smc_with_parameter(adev,
|
||||
PPSMC_MSG_ExecuteJob,
|
||||
priv->toc_entry_aram);
|
||||
|
||||
cz_send_msg_to_smc_with_parameter(adev,
|
||||
PPSMC_MSG_ExecuteJob,
|
||||
priv->toc_entry_power_profiling_index);
|
||||
|
||||
cz_send_msg_to_smc_with_parameter(adev,
|
||||
PPSMC_MSG_ExecuteJob,
|
||||
priv->toc_entry_initialize_index);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
*Check if the FW has been loaded, SMU will not return if loading
|
||||
*has not finished.
|
||||
*/
|
||||
static int cz_smu_check_fw_load_finish(struct amdgpu_device *adev,
|
||||
uint32_t fw_mask)
|
||||
{
|
||||
int i;
|
||||
uint32_t index = SMN_MP1_SRAM_START_ADDR +
|
||||
SMU8_FIRMWARE_HEADER_LOCATION +
|
||||
offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
|
||||
|
||||
WREG32(mmMP0PUB_IND_INDEX, index);
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (fw_mask == (RREG32(mmMP0PUB_IND_DATA) & fw_mask))
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
if (i >= adev->usec_timeout) {
|
||||
dev_err(adev->dev,
|
||||
"SMU check loaded firmware failed, expecting 0x%x, getting 0x%x",
|
||||
fw_mask, RREG32(mmMP0PUB_IND_DATA));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* interfaces for different ip blocks to check firmware loading status
|
||||
* 0 for success otherwise failed
|
||||
*/
|
||||
static int cz_smu_check_finished(struct amdgpu_device *adev,
|
||||
enum AMDGPU_UCODE_ID id)
|
||||
{
|
||||
switch (id) {
|
||||
case AMDGPU_UCODE_ID_SDMA0:
|
||||
if (adev->smu.fw_flags & AMDGPU_SDMA0_UCODE_LOADED)
|
||||
return 0;
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_SDMA1:
|
||||
if (adev->smu.fw_flags & AMDGPU_SDMA1_UCODE_LOADED)
|
||||
return 0;
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_CP_CE:
|
||||
if (adev->smu.fw_flags & AMDGPU_CPCE_UCODE_LOADED)
|
||||
return 0;
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_CP_PFP:
|
||||
if (adev->smu.fw_flags & AMDGPU_CPPFP_UCODE_LOADED)
|
||||
return 0;
|
||||
case AMDGPU_UCODE_ID_CP_ME:
|
||||
if (adev->smu.fw_flags & AMDGPU_CPME_UCODE_LOADED)
|
||||
return 0;
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_CP_MEC1:
|
||||
if (adev->smu.fw_flags & AMDGPU_CPMEC1_UCODE_LOADED)
|
||||
return 0;
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_CP_MEC2:
|
||||
if (adev->smu.fw_flags & AMDGPU_CPMEC2_UCODE_LOADED)
|
||||
return 0;
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_RLC_G:
|
||||
if (adev->smu.fw_flags & AMDGPU_CPRLC_UCODE_LOADED)
|
||||
return 0;
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_MAXIMUM:
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int cz_load_mec_firmware(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_firmware_info *ucode =
|
||||
&adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
|
||||
uint32_t reg_data;
|
||||
uint32_t tmp;
|
||||
|
||||
if (ucode->fw == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
/* Disable MEC parsing/prefetching */
|
||||
tmp = RREG32(mmCP_MEC_CNTL);
|
||||
tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
|
||||
WREG32(mmCP_MEC_CNTL, tmp);
|
||||
|
||||
tmp = RREG32(mmCP_CPC_IC_BASE_CNTL);
|
||||
tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
|
||||
tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
|
||||
tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
|
||||
tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
|
||||
WREG32(mmCP_CPC_IC_BASE_CNTL, tmp);
|
||||
|
||||
reg_data = lower_32_bits(ucode->mc_addr) &
|
||||
REG_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
|
||||
WREG32(mmCP_CPC_IC_BASE_LO, reg_data);
|
||||
|
||||
reg_data = upper_32_bits(ucode->mc_addr) &
|
||||
REG_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
|
||||
WREG32(mmCP_CPC_IC_BASE_HI, reg_data);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cz_smu_start(struct amdgpu_device *adev)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
uint32_t fw_to_check = UCODE_ID_RLC_G_MASK |
|
||||
UCODE_ID_SDMA0_MASK |
|
||||
UCODE_ID_SDMA1_MASK |
|
||||
UCODE_ID_CP_CE_MASK |
|
||||
UCODE_ID_CP_ME_MASK |
|
||||
UCODE_ID_CP_PFP_MASK |
|
||||
UCODE_ID_CP_MEC_JT1_MASK |
|
||||
UCODE_ID_CP_MEC_JT2_MASK;
|
||||
|
||||
if (adev->asic_type == CHIP_STONEY)
|
||||
fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
|
||||
|
||||
cz_smu_request_load_fw(adev);
|
||||
ret = cz_smu_check_fw_load_finish(adev, fw_to_check);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* manually load MEC firmware for CZ */
|
||||
if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) {
|
||||
ret = cz_load_mec_firmware(adev);
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "(%d) Mec Firmware load failed\n", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* setup fw load flag */
|
||||
adev->smu.fw_flags = AMDGPU_SDMA0_UCODE_LOADED |
|
||||
AMDGPU_SDMA1_UCODE_LOADED |
|
||||
AMDGPU_CPCE_UCODE_LOADED |
|
||||
AMDGPU_CPPFP_UCODE_LOADED |
|
||||
AMDGPU_CPME_UCODE_LOADED |
|
||||
AMDGPU_CPMEC1_UCODE_LOADED |
|
||||
AMDGPU_CPMEC2_UCODE_LOADED |
|
||||
AMDGPU_CPRLC_UCODE_LOADED;
|
||||
|
||||
if (adev->asic_type == CHIP_STONEY)
|
||||
adev->smu.fw_flags &= ~(AMDGPU_SDMA1_UCODE_LOADED | AMDGPU_CPMEC2_UCODE_LOADED);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static uint32_t cz_convert_fw_type(uint32_t fw_type)
|
||||
{
|
||||
enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
|
||||
|
||||
switch (fw_type) {
|
||||
case UCODE_ID_SDMA0:
|
||||
result = AMDGPU_UCODE_ID_SDMA0;
|
||||
break;
|
||||
case UCODE_ID_SDMA1:
|
||||
result = AMDGPU_UCODE_ID_SDMA1;
|
||||
break;
|
||||
case UCODE_ID_CP_CE:
|
||||
result = AMDGPU_UCODE_ID_CP_CE;
|
||||
break;
|
||||
case UCODE_ID_CP_PFP:
|
||||
result = AMDGPU_UCODE_ID_CP_PFP;
|
||||
break;
|
||||
case UCODE_ID_CP_ME:
|
||||
result = AMDGPU_UCODE_ID_CP_ME;
|
||||
break;
|
||||
case UCODE_ID_CP_MEC_JT1:
|
||||
case UCODE_ID_CP_MEC_JT2:
|
||||
result = AMDGPU_UCODE_ID_CP_MEC1;
|
||||
break;
|
||||
case UCODE_ID_RLC_G:
|
||||
result = AMDGPU_UCODE_ID_RLC_G;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("UCode type is out of range!");
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static uint8_t cz_smu_translate_firmware_enum_to_arg(
|
||||
enum cz_scratch_entry firmware_enum)
|
||||
{
|
||||
uint8_t ret = 0;
|
||||
|
||||
switch (firmware_enum) {
|
||||
case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0:
|
||||
ret = UCODE_ID_SDMA0;
|
||||
break;
|
||||
case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1:
|
||||
ret = UCODE_ID_SDMA1;
|
||||
break;
|
||||
case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE:
|
||||
ret = UCODE_ID_CP_CE;
|
||||
break;
|
||||
case CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
|
||||
ret = UCODE_ID_CP_PFP;
|
||||
break;
|
||||
case CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME:
|
||||
ret = UCODE_ID_CP_ME;
|
||||
break;
|
||||
case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
|
||||
ret = UCODE_ID_CP_MEC_JT1;
|
||||
break;
|
||||
case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
|
||||
ret = UCODE_ID_CP_MEC_JT2;
|
||||
break;
|
||||
case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
|
||||
ret = UCODE_ID_GMCON_RENG;
|
||||
break;
|
||||
case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G:
|
||||
ret = UCODE_ID_RLC_G;
|
||||
break;
|
||||
case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
|
||||
ret = UCODE_ID_RLC_SCRATCH;
|
||||
break;
|
||||
case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
|
||||
ret = UCODE_ID_RLC_SRM_ARAM;
|
||||
break;
|
||||
case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
|
||||
ret = UCODE_ID_RLC_SRM_DRAM;
|
||||
break;
|
||||
case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
|
||||
ret = UCODE_ID_DMCU_ERAM;
|
||||
break;
|
||||
case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
|
||||
ret = UCODE_ID_DMCU_IRAM;
|
||||
break;
|
||||
case CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
|
||||
ret = TASK_ARG_INIT_MM_PWR_LOG;
|
||||
break;
|
||||
case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
|
||||
case CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
|
||||
case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
|
||||
case CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
|
||||
case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START:
|
||||
case CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
|
||||
ret = TASK_ARG_REG_MMIO;
|
||||
break;
|
||||
case CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
|
||||
ret = TASK_ARG_INIT_CLK_TABLE;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cz_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
|
||||
enum cz_scratch_entry firmware_enum,
|
||||
struct cz_buffer_entry *entry)
|
||||
{
|
||||
uint64_t gpu_addr;
|
||||
uint32_t data_size;
|
||||
uint8_t ucode_id = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
|
||||
enum AMDGPU_UCODE_ID id = cz_convert_fw_type(ucode_id);
|
||||
struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
|
||||
const struct gfx_firmware_header_v1_0 *header;
|
||||
|
||||
if (ucode->fw == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
gpu_addr = ucode->mc_addr;
|
||||
header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
|
||||
data_size = le32_to_cpu(header->header.ucode_size_bytes);
|
||||
|
||||
if ((firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1) ||
|
||||
(firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2)) {
|
||||
gpu_addr += le32_to_cpu(header->jt_offset) << 2;
|
||||
data_size = le32_to_cpu(header->jt_size) << 2;
|
||||
}
|
||||
|
||||
entry->mc_addr_low = lower_32_bits(gpu_addr);
|
||||
entry->mc_addr_high = upper_32_bits(gpu_addr);
|
||||
entry->data_size = data_size;
|
||||
entry->firmware_ID = firmware_enum;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cz_smu_populate_single_scratch_entry(struct amdgpu_device *adev,
|
||||
enum cz_scratch_entry scratch_type,
|
||||
uint32_t size_in_byte,
|
||||
struct cz_buffer_entry *entry)
|
||||
{
|
||||
struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
|
||||
uint64_t mc_addr = (((uint64_t) priv->smu_buffer.mc_addr_high) << 32) |
|
||||
priv->smu_buffer.mc_addr_low;
|
||||
mc_addr += size_in_byte;
|
||||
|
||||
priv->smu_buffer_used_bytes += size_in_byte;
|
||||
entry->data_size = size_in_byte;
|
||||
entry->kaddr = priv->smu_buffer.kaddr + priv->smu_buffer_used_bytes;
|
||||
entry->mc_addr_low = lower_32_bits(mc_addr);
|
||||
entry->mc_addr_high = upper_32_bits(mc_addr);
|
||||
entry->firmware_ID = scratch_type;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cz_smu_populate_single_ucode_load_task(struct amdgpu_device *adev,
|
||||
enum cz_scratch_entry firmware_enum,
|
||||
bool is_last)
|
||||
{
|
||||
uint8_t i;
|
||||
struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
|
||||
struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
|
||||
struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++];
|
||||
|
||||
task->type = TASK_TYPE_UCODE_LOAD;
|
||||
task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
|
||||
task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count;
|
||||
|
||||
for (i = 0; i < priv->driver_buffer_length; i++)
|
||||
if (priv->driver_buffer[i].firmware_ID == firmware_enum)
|
||||
break;
|
||||
|
||||
if (i >= priv->driver_buffer_length) {
|
||||
dev_err(adev->dev, "Invalid Firmware Type\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
task->addr.low = priv->driver_buffer[i].mc_addr_low;
|
||||
task->addr.high = priv->driver_buffer[i].mc_addr_high;
|
||||
task->size_bytes = priv->driver_buffer[i].data_size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cz_smu_populate_single_scratch_task(struct amdgpu_device *adev,
|
||||
enum cz_scratch_entry firmware_enum,
|
||||
uint8_t type, bool is_last)
|
||||
{
|
||||
uint8_t i;
|
||||
struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
|
||||
struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
|
||||
struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++];
|
||||
|
||||
task->type = type;
|
||||
task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
|
||||
task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count;
|
||||
|
||||
for (i = 0; i < priv->scratch_buffer_length; i++)
|
||||
if (priv->scratch_buffer[i].firmware_ID == firmware_enum)
|
||||
break;
|
||||
|
||||
if (i >= priv->scratch_buffer_length) {
|
||||
dev_err(adev->dev, "Invalid Firmware Type\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
task->addr.low = priv->scratch_buffer[i].mc_addr_low;
|
||||
task->addr.high = priv->scratch_buffer[i].mc_addr_high;
|
||||
task->size_bytes = priv->scratch_buffer[i].data_size;
|
||||
|
||||
if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == firmware_enum) {
|
||||
struct cz_ih_meta_data *pIHReg_restore =
|
||||
(struct cz_ih_meta_data *)priv->scratch_buffer[i].kaddr;
|
||||
pIHReg_restore->command =
|
||||
METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cz_smu_construct_toc_for_rlc_aram_save(struct amdgpu_device *adev)
|
||||
{
|
||||
struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
|
||||
priv->toc_entry_aram = priv->toc_entry_used_count;
|
||||
cz_smu_populate_single_scratch_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
|
||||
TASK_TYPE_UCODE_SAVE, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cz_smu_construct_toc_for_vddgfx_enter(struct amdgpu_device *adev)
|
||||
{
|
||||
struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
|
||||
struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
|
||||
|
||||
toc->JobList[JOB_GFX_SAVE] = (uint8_t)priv->toc_entry_used_count;
|
||||
cz_smu_populate_single_scratch_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
|
||||
TASK_TYPE_UCODE_SAVE, false);
|
||||
cz_smu_populate_single_scratch_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
|
||||
TASK_TYPE_UCODE_SAVE, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cz_smu_construct_toc_for_vddgfx_exit(struct amdgpu_device *adev)
|
||||
{
|
||||
struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
|
||||
struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
|
||||
|
||||
toc->JobList[JOB_GFX_RESTORE] = (uint8_t)priv->toc_entry_used_count;
|
||||
|
||||
/* populate ucode */
|
||||
if (adev->firmware.smu_load) {
|
||||
cz_smu_populate_single_ucode_load_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
|
||||
cz_smu_populate_single_ucode_load_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
|
||||
cz_smu_populate_single_ucode_load_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
|
||||
cz_smu_populate_single_ucode_load_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
|
||||
if (adev->asic_type == CHIP_STONEY) {
|
||||
cz_smu_populate_single_ucode_load_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
|
||||
} else {
|
||||
cz_smu_populate_single_ucode_load_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
|
||||
}
|
||||
cz_smu_populate_single_ucode_load_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
|
||||
}
|
||||
|
||||
/* populate scratch */
|
||||
cz_smu_populate_single_scratch_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
|
||||
TASK_TYPE_UCODE_LOAD, false);
|
||||
cz_smu_populate_single_scratch_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
|
||||
TASK_TYPE_UCODE_LOAD, false);
|
||||
cz_smu_populate_single_scratch_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
|
||||
TASK_TYPE_UCODE_LOAD, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cz_smu_construct_toc_for_power_profiling(struct amdgpu_device *adev)
|
||||
{
|
||||
struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
|
||||
|
||||
priv->toc_entry_power_profiling_index = priv->toc_entry_used_count;
|
||||
|
||||
cz_smu_populate_single_scratch_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
|
||||
TASK_TYPE_INITIALIZE, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cz_smu_construct_toc_for_bootup(struct amdgpu_device *adev)
|
||||
{
|
||||
struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
|
||||
|
||||
priv->toc_entry_initialize_index = priv->toc_entry_used_count;
|
||||
|
||||
if (adev->firmware.smu_load) {
|
||||
cz_smu_populate_single_ucode_load_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
|
||||
if (adev->asic_type == CHIP_STONEY) {
|
||||
cz_smu_populate_single_ucode_load_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
|
||||
} else {
|
||||
cz_smu_populate_single_ucode_load_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
|
||||
}
|
||||
cz_smu_populate_single_ucode_load_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
|
||||
cz_smu_populate_single_ucode_load_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
|
||||
cz_smu_populate_single_ucode_load_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
|
||||
cz_smu_populate_single_ucode_load_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
|
||||
if (adev->asic_type == CHIP_STONEY) {
|
||||
cz_smu_populate_single_ucode_load_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
|
||||
} else {
|
||||
cz_smu_populate_single_ucode_load_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
|
||||
}
|
||||
cz_smu_populate_single_ucode_load_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cz_smu_construct_toc_for_clock_table(struct amdgpu_device *adev)
|
||||
{
|
||||
struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
|
||||
|
||||
priv->toc_entry_clock_table = priv->toc_entry_used_count;
|
||||
|
||||
cz_smu_populate_single_scratch_task(adev,
|
||||
CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
|
||||
TASK_TYPE_INITIALIZE, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cz_smu_initialize_toc_empty_job_list(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
|
||||
struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
|
||||
|
||||
for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
|
||||
toc->JobList[i] = (uint8_t)IGNORE_JOB;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* cz smu uninitialization
|
||||
*/
|
||||
int cz_smu_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
amdgpu_bo_unref(&adev->smu.toc_buf);
|
||||
amdgpu_bo_unref(&adev->smu.smu_buf);
|
||||
kfree(adev->smu.priv);
|
||||
adev->smu.priv = NULL;
|
||||
if (adev->firmware.smu_load)
|
||||
amdgpu_ucode_fini_bo(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cz_smu_download_pptable(struct amdgpu_device *adev, void **table)
|
||||
{
|
||||
uint8_t i;
|
||||
struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
|
||||
|
||||
for (i = 0; i < priv->scratch_buffer_length; i++)
|
||||
if (priv->scratch_buffer[i].firmware_ID ==
|
||||
CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
|
||||
break;
|
||||
|
||||
if (i >= priv->scratch_buffer_length) {
|
||||
dev_err(adev->dev, "Invalid Scratch Type\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*table = (struct SMU8_Fusion_ClkTable *)priv->scratch_buffer[i].kaddr;
|
||||
|
||||
/* prepare buffer for pptable */
|
||||
cz_send_msg_to_smc_with_parameter(adev,
|
||||
PPSMC_MSG_SetClkTableAddrHi,
|
||||
priv->scratch_buffer[i].mc_addr_high);
|
||||
cz_send_msg_to_smc_with_parameter(adev,
|
||||
PPSMC_MSG_SetClkTableAddrLo,
|
||||
priv->scratch_buffer[i].mc_addr_low);
|
||||
cz_send_msg_to_smc_with_parameter(adev,
|
||||
PPSMC_MSG_ExecuteJob,
|
||||
priv->toc_entry_clock_table);
|
||||
|
||||
/* actual downloading */
|
||||
cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToDram);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cz_smu_upload_pptable(struct amdgpu_device *adev)
|
||||
{
|
||||
uint8_t i;
|
||||
struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
|
||||
|
||||
for (i = 0; i < priv->scratch_buffer_length; i++)
|
||||
if (priv->scratch_buffer[i].firmware_ID ==
|
||||
CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
|
||||
break;
|
||||
|
||||
if (i >= priv->scratch_buffer_length) {
|
||||
dev_err(adev->dev, "Invalid Scratch Type\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* prepare SMU */
|
||||
cz_send_msg_to_smc_with_parameter(adev,
|
||||
PPSMC_MSG_SetClkTableAddrHi,
|
||||
priv->scratch_buffer[i].mc_addr_high);
|
||||
cz_send_msg_to_smc_with_parameter(adev,
|
||||
PPSMC_MSG_SetClkTableAddrLo,
|
||||
priv->scratch_buffer[i].mc_addr_low);
|
||||
cz_send_msg_to_smc_with_parameter(adev,
|
||||
PPSMC_MSG_ExecuteJob,
|
||||
priv->toc_entry_clock_table);
|
||||
|
||||
/* actual uploading */
|
||||
cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToSmu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* cz smumgr functions initialization
|
||||
*/
|
||||
static const struct amdgpu_smumgr_funcs cz_smumgr_funcs = {
|
||||
.check_fw_load_finish = cz_smu_check_finished,
|
||||
.request_smu_load_fw = NULL,
|
||||
.request_smu_specific_fw = NULL,
|
||||
};
|
||||
|
||||
/*
|
||||
* cz smu initialization
|
||||
*/
|
||||
int cz_smu_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
uint64_t mc_addr = 0;
|
||||
struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
|
||||
struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
|
||||
void *toc_buf_ptr = NULL;
|
||||
void *smu_buf_ptr = NULL;
|
||||
|
||||
struct cz_smu_private_data *priv =
|
||||
kzalloc(sizeof(struct cz_smu_private_data), GFP_KERNEL);
|
||||
if (priv == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
/* allocate firmware buffers */
|
||||
if (adev->firmware.smu_load)
|
||||
amdgpu_ucode_init_bo(adev);
|
||||
|
||||
adev->smu.priv = priv;
|
||||
adev->smu.fw_flags = 0;
|
||||
priv->toc_buffer.data_size = 4096;
|
||||
|
||||
priv->smu_buffer.data_size =
|
||||
ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
|
||||
ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
|
||||
ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
|
||||
ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
|
||||
ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
|
||||
|
||||
/* prepare toc buffer and smu buffer:
|
||||
* 1. create amdgpu_bo for toc buffer and smu buffer
|
||||
* 2. pin mc address
|
||||
* 3. map kernel virtual address
|
||||
*/
|
||||
ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE,
|
||||
true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
|
||||
toc_buf);
|
||||
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE,
|
||||
true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
|
||||
smu_buf);
|
||||
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* toc buffer reserve/pin/map */
|
||||
ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
|
||||
if (ret) {
|
||||
amdgpu_bo_unref(&adev->smu.toc_buf);
|
||||
dev_err(adev->dev, "(%d) SMC TOC buffer reserve failed\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr);
|
||||
if (ret) {
|
||||
amdgpu_bo_unreserve(adev->smu.toc_buf);
|
||||
amdgpu_bo_unref(&adev->smu.toc_buf);
|
||||
dev_err(adev->dev, "(%d) SMC TOC buffer pin failed\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
|
||||
if (ret)
|
||||
goto smu_init_failed;
|
||||
|
||||
amdgpu_bo_unreserve(adev->smu.toc_buf);
|
||||
|
||||
priv->toc_buffer.mc_addr_low = lower_32_bits(mc_addr);
|
||||
priv->toc_buffer.mc_addr_high = upper_32_bits(mc_addr);
|
||||
priv->toc_buffer.kaddr = toc_buf_ptr;
|
||||
|
||||
/* smu buffer reserve/pin/map */
|
||||
ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
|
||||
if (ret) {
|
||||
amdgpu_bo_unref(&adev->smu.smu_buf);
|
||||
dev_err(adev->dev, "(%d) SMC Internal buffer reserve failed\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr);
|
||||
if (ret) {
|
||||
amdgpu_bo_unreserve(adev->smu.smu_buf);
|
||||
amdgpu_bo_unref(&adev->smu.smu_buf);
|
||||
dev_err(adev->dev, "(%d) SMC Internal buffer pin failed\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
|
||||
if (ret)
|
||||
goto smu_init_failed;
|
||||
|
||||
amdgpu_bo_unreserve(adev->smu.smu_buf);
|
||||
|
||||
priv->smu_buffer.mc_addr_low = lower_32_bits(mc_addr);
|
||||
priv->smu_buffer.mc_addr_high = upper_32_bits(mc_addr);
|
||||
priv->smu_buffer.kaddr = smu_buf_ptr;
|
||||
|
||||
if (adev->firmware.smu_load) {
|
||||
if (cz_smu_populate_single_firmware_entry(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
|
||||
&priv->driver_buffer[priv->driver_buffer_length++]))
|
||||
goto smu_init_failed;
|
||||
|
||||
if (adev->asic_type == CHIP_STONEY) {
|
||||
if (cz_smu_populate_single_firmware_entry(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
|
||||
&priv->driver_buffer[priv->driver_buffer_length++]))
|
||||
goto smu_init_failed;
|
||||
} else {
|
||||
if (cz_smu_populate_single_firmware_entry(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
|
||||
&priv->driver_buffer[priv->driver_buffer_length++]))
|
||||
goto smu_init_failed;
|
||||
}
|
||||
if (cz_smu_populate_single_firmware_entry(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
|
||||
&priv->driver_buffer[priv->driver_buffer_length++]))
|
||||
goto smu_init_failed;
|
||||
if (cz_smu_populate_single_firmware_entry(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
|
||||
&priv->driver_buffer[priv->driver_buffer_length++]))
|
||||
goto smu_init_failed;
|
||||
if (cz_smu_populate_single_firmware_entry(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
|
||||
&priv->driver_buffer[priv->driver_buffer_length++]))
|
||||
goto smu_init_failed;
|
||||
if (cz_smu_populate_single_firmware_entry(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
|
||||
&priv->driver_buffer[priv->driver_buffer_length++]))
|
||||
goto smu_init_failed;
|
||||
if (adev->asic_type == CHIP_STONEY) {
|
||||
if (cz_smu_populate_single_firmware_entry(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
|
||||
&priv->driver_buffer[priv->driver_buffer_length++]))
|
||||
goto smu_init_failed;
|
||||
} else {
|
||||
if (cz_smu_populate_single_firmware_entry(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
|
||||
&priv->driver_buffer[priv->driver_buffer_length++]))
|
||||
goto smu_init_failed;
|
||||
}
|
||||
if (cz_smu_populate_single_firmware_entry(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
|
||||
&priv->driver_buffer[priv->driver_buffer_length++]))
|
||||
goto smu_init_failed;
|
||||
}
|
||||
|
||||
if (cz_smu_populate_single_scratch_entry(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
|
||||
UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
|
||||
&priv->scratch_buffer[priv->scratch_buffer_length++]))
|
||||
goto smu_init_failed;
|
||||
if (cz_smu_populate_single_scratch_entry(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
|
||||
UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
|
||||
&priv->scratch_buffer[priv->scratch_buffer_length++]))
|
||||
goto smu_init_failed;
|
||||
if (cz_smu_populate_single_scratch_entry(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
|
||||
UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
|
||||
&priv->scratch_buffer[priv->scratch_buffer_length++]))
|
||||
goto smu_init_failed;
|
||||
if (cz_smu_populate_single_scratch_entry(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
|
||||
sizeof(struct SMU8_MultimediaPowerLogData),
|
||||
&priv->scratch_buffer[priv->scratch_buffer_length++]))
|
||||
goto smu_init_failed;
|
||||
if (cz_smu_populate_single_scratch_entry(adev,
|
||||
CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
|
||||
sizeof(struct SMU8_Fusion_ClkTable),
|
||||
&priv->scratch_buffer[priv->scratch_buffer_length++]))
|
||||
goto smu_init_failed;
|
||||
|
||||
cz_smu_initialize_toc_empty_job_list(adev);
|
||||
cz_smu_construct_toc_for_rlc_aram_save(adev);
|
||||
cz_smu_construct_toc_for_vddgfx_enter(adev);
|
||||
cz_smu_construct_toc_for_vddgfx_exit(adev);
|
||||
cz_smu_construct_toc_for_power_profiling(adev);
|
||||
cz_smu_construct_toc_for_bootup(adev);
|
||||
cz_smu_construct_toc_for_clock_table(adev);
|
||||
/* init the smumgr functions */
|
||||
adev->smu.smumgr_funcs = &cz_smumgr_funcs;
|
||||
|
||||
return 0;
|
||||
|
||||
smu_init_failed:
|
||||
amdgpu_bo_unref(toc_buf);
|
||||
amdgpu_bo_unref(smu_buf);
|
||||
|
||||
return ret;
|
||||
}
|
|
@ -1,94 +0,0 @@
|
|||
/*
|
||||
* Copyright 2014 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#ifndef __CZ_SMC_H__
|
||||
#define __CZ_SMC_H__
|
||||
|
||||
#define MAX_NUM_FIRMWARE 8
|
||||
#define MAX_NUM_SCRATCH 11
|
||||
#define CZ_SCRATCH_SIZE_NONGFX_CLOCKGATING 1024
|
||||
#define CZ_SCRATCH_SIZE_NONGFX_GOLDENSETTING 2048
|
||||
#define CZ_SCRATCH_SIZE_SDMA_METADATA 1024
|
||||
#define CZ_SCRATCH_SIZE_IH ((2*256+1)*4)
|
||||
|
||||
enum cz_scratch_entry {
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0 = 0,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
|
||||
CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT,
|
||||
CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING,
|
||||
CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS,
|
||||
CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT,
|
||||
CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START,
|
||||
CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS,
|
||||
CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE
|
||||
};
|
||||
|
||||
struct cz_buffer_entry {
|
||||
uint32_t data_size;
|
||||
uint32_t mc_addr_low;
|
||||
uint32_t mc_addr_high;
|
||||
void *kaddr;
|
||||
enum cz_scratch_entry firmware_ID;
|
||||
};
|
||||
|
||||
struct cz_register_index_data_pair {
|
||||
uint32_t offset;
|
||||
uint32_t value;
|
||||
};
|
||||
|
||||
struct cz_ih_meta_data {
|
||||
uint32_t command;
|
||||
struct cz_register_index_data_pair register_index_value_pair[1];
|
||||
};
|
||||
|
||||
struct cz_smu_private_data {
|
||||
uint8_t driver_buffer_length;
|
||||
uint8_t scratch_buffer_length;
|
||||
uint16_t toc_entry_used_count;
|
||||
uint16_t toc_entry_initialize_index;
|
||||
uint16_t toc_entry_power_profiling_index;
|
||||
uint16_t toc_entry_aram;
|
||||
uint16_t toc_entry_ih_register_restore_task_index;
|
||||
uint16_t toc_entry_clock_table;
|
||||
uint16_t ih_register_restore_task_size;
|
||||
uint16_t smu_buffer_used_bytes;
|
||||
|
||||
struct cz_buffer_entry toc_buffer;
|
||||
struct cz_buffer_entry smu_buffer;
|
||||
struct cz_buffer_entry driver_buffer[MAX_NUM_FIRMWARE];
|
||||
struct cz_buffer_entry scratch_buffer[MAX_NUM_SCRATCH];
|
||||
};
|
||||
|
||||
#endif
|
|
@ -25,7 +25,7 @@
|
|||
#include "amdgpu_ih.h"
|
||||
#include "amdgpu_gfx.h"
|
||||
#include "amdgpu_ucode.h"
|
||||
#include "si/clearstate_si.h"
|
||||
#include "clearstate_si.h"
|
||||
#include "bif/bif_3_0_d.h"
|
||||
#include "bif/bif_3_0_sh_mask.h"
|
||||
#include "oss/oss_1_0_d.h"
|
||||
|
@ -1794,14 +1794,9 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
|
|||
|
||||
static void gfx_v6_0_scratch_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
adev->gfx.scratch.num_reg = 7;
|
||||
adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
|
||||
for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
|
||||
adev->gfx.scratch.free[i] = true;
|
||||
adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i;
|
||||
}
|
||||
adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
|
||||
}
|
||||
|
||||
static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring)
|
||||
|
@ -1975,7 +1970,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|||
ib.ptr[2] = 0xDEADBEEF;
|
||||
ib.length_dw = 3;
|
||||
|
||||
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
|
||||
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
|
||||
if (r)
|
||||
goto err2;
|
||||
|
||||
|
|
|
@ -2003,14 +2003,9 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
|
|||
*/
|
||||
static void gfx_v7_0_scratch_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
adev->gfx.scratch.num_reg = 7;
|
||||
adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
|
||||
for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
|
||||
adev->gfx.scratch.free[i] = true;
|
||||
adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i;
|
||||
}
|
||||
adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2321,7 +2316,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|||
ib.ptr[2] = 0xDEADBEEF;
|
||||
ib.length_dw = 3;
|
||||
|
||||
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
|
||||
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
|
||||
if (r)
|
||||
goto err2;
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -375,9 +375,16 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
|
|||
/* size in MB on si */
|
||||
adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
||||
adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
||||
adev->mc.visible_vram_size = adev->mc.aper_size;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
adev->mc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
|
||||
adev->mc.aper_size = adev->mc.real_vram_size;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* In case the PCI BAR is larger than the actual amount of vram */
|
||||
adev->mc.visible_vram_size = adev->mc.aper_size;
|
||||
if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
|
||||
adev->mc.visible_vram_size = adev->mc.real_vram_size;
|
||||
|
||||
|
|
|
@ -467,9 +467,16 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
|
|||
/* size in MB on si */
|
||||
adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
||||
adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
||||
adev->mc.visible_vram_size = adev->mc.aper_size;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
adev->mc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
|
||||
adev->mc.aper_size = adev->mc.real_vram_size;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* In case the PCI BAR is larger than the actual amount of vram */
|
||||
adev->mc.visible_vram_size = adev->mc.aper_size;
|
||||
if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
|
||||
adev->mc.visible_vram_size = adev->mc.real_vram_size;
|
||||
|
||||
|
@ -1439,6 +1446,21 @@ static int gmc_v8_0_set_powergating_state(void *handle,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void gmc_v8_0_get_clockgating_state(void *handle, u32 *flags)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int data;
|
||||
|
||||
/* AMD_CG_SUPPORT_MC_MGCG */
|
||||
data = RREG32(mmMC_HUB_MISC_HUB_CG);
|
||||
if (data & MC_HUB_MISC_HUB_CG__ENABLE_MASK)
|
||||
*flags |= AMD_CG_SUPPORT_MC_MGCG;
|
||||
|
||||
/* AMD_CG_SUPPORT_MC_LS */
|
||||
if (data & MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK)
|
||||
*flags |= AMD_CG_SUPPORT_MC_LS;
|
||||
}
|
||||
|
||||
static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
|
||||
.name = "gmc_v8_0",
|
||||
.early_init = gmc_v8_0_early_init,
|
||||
|
@ -1457,6 +1479,7 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
|
|||
.post_soft_reset = gmc_v8_0_post_soft_reset,
|
||||
.set_clockgating_state = gmc_v8_0_set_clockgating_state,
|
||||
.set_powergating_state = gmc_v8_0_set_powergating_state,
|
||||
.get_clockgating_state = gmc_v8_0_get_clockgating_state,
|
||||
};
|
||||
|
||||
static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = {
|
||||
|
|
|
@ -1230,6 +1230,7 @@ static void kv_update_current_ps(struct amdgpu_device *adev,
|
|||
pi->current_rps = *rps;
|
||||
pi->current_ps = *new_ps;
|
||||
pi->current_rps.ps_priv = &pi->current_ps;
|
||||
adev->pm.dpm.current_ps = &pi->current_rps;
|
||||
}
|
||||
|
||||
static void kv_update_requested_ps(struct amdgpu_device *adev,
|
||||
|
@ -1241,6 +1242,7 @@ static void kv_update_requested_ps(struct amdgpu_device *adev,
|
|||
pi->requested_rps = *rps;
|
||||
pi->requested_ps = *new_ps;
|
||||
pi->requested_rps.ps_priv = &pi->requested_ps;
|
||||
adev->pm.dpm.requested_ps = &pi->requested_rps;
|
||||
}
|
||||
|
||||
static void kv_dpm_enable_bapm(struct amdgpu_device *adev, bool enable)
|
||||
|
@ -1904,19 +1906,19 @@ static int kv_enable_nb_dpm(struct amdgpu_device *adev,
|
|||
}
|
||||
|
||||
static int kv_dpm_force_performance_level(struct amdgpu_device *adev,
|
||||
enum amdgpu_dpm_forced_level level)
|
||||
enum amd_dpm_forced_level level)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) {
|
||||
if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
|
||||
ret = kv_force_dpm_highest(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) {
|
||||
} else if (level == AMD_DPM_FORCED_LEVEL_LOW) {
|
||||
ret = kv_force_dpm_lowest(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) {
|
||||
} else if (level == AMD_DPM_FORCED_LEVEL_AUTO) {
|
||||
ret = kv_unforce_levels(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -3009,7 +3011,6 @@ static int kv_dpm_late_init(void *handle)
|
|||
kv_dpm_powergate_samu(adev, true);
|
||||
kv_dpm_powergate_vce(adev, true);
|
||||
kv_dpm_powergate_uvd(adev, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3029,7 +3030,7 @@ static int kv_dpm_sw_init(void *handle)
|
|||
/* default to balanced state */
|
||||
adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
|
||||
adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
|
||||
adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
|
||||
adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO;
|
||||
adev->pm.default_sclk = adev->clock.default_sclk;
|
||||
adev->pm.default_mclk = adev->clock.default_mclk;
|
||||
adev->pm.current_sclk = adev->clock.default_sclk;
|
||||
|
@ -3078,6 +3079,9 @@ static int kv_dpm_hw_init(void *handle)
|
|||
int ret;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (!amdgpu_dpm)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
kv_dpm_setup_asic(adev);
|
||||
ret = kv_dpm_enable(adev);
|
||||
|
@ -3245,15 +3249,52 @@ static int kv_dpm_set_powergating_state(void *handle,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline bool kv_are_power_levels_equal(const struct kv_pl *kv_cpl1,
|
||||
const struct kv_pl *kv_cpl2)
|
||||
{
|
||||
return ((kv_cpl1->sclk == kv_cpl2->sclk) &&
|
||||
(kv_cpl1->vddc_index == kv_cpl2->vddc_index) &&
|
||||
(kv_cpl1->ds_divider_index == kv_cpl2->ds_divider_index) &&
|
||||
(kv_cpl1->force_nbp_state == kv_cpl2->force_nbp_state));
|
||||
}
|
||||
|
||||
static int kv_check_state_equal(struct amdgpu_device *adev,
|
||||
struct amdgpu_ps *cps,
|
||||
struct amdgpu_ps *rps,
|
||||
bool *equal)
|
||||
{
|
||||
if (equal == NULL)
|
||||
struct kv_ps *kv_cps;
|
||||
struct kv_ps *kv_rps;
|
||||
int i;
|
||||
|
||||
if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
*equal = false;
|
||||
kv_cps = kv_get_ps(cps);
|
||||
kv_rps = kv_get_ps(rps);
|
||||
|
||||
if (kv_cps == NULL) {
|
||||
*equal = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (kv_cps->num_levels != kv_rps->num_levels) {
|
||||
*equal = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < kv_cps->num_levels; i++) {
|
||||
if (!kv_are_power_levels_equal(&(kv_cps->levels[i]),
|
||||
&(kv_rps->levels[i]))) {
|
||||
*equal = false;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* If all performance levels are the same try to use the UVD clocks to break the tie.*/
|
||||
*equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
|
||||
*equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3307,12 +3348,3 @@ static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev)
|
|||
adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
|
||||
adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version kv_dpm_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SMC,
|
||||
.major = 7,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &kv_dpm_ip_funcs,
|
||||
};
|
||||
|
|
|
@ -0,0 +1,592 @@
|
|||
/*
|
||||
* Copyright 2017 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Xiangliang.Yu@amd.com
|
||||
*/
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "vi.h"
|
||||
#include "bif/bif_5_0_d.h"
|
||||
#include "bif/bif_5_0_sh_mask.h"
|
||||
#include "vid.h"
|
||||
#include "gca/gfx_8_0_d.h"
|
||||
#include "gca/gfx_8_0_sh_mask.h"
|
||||
#include "gmc_v8_0.h"
|
||||
#include "gfx_v8_0.h"
|
||||
#include "sdma_v3_0.h"
|
||||
#include "tonga_ih.h"
|
||||
#include "gmc/gmc_8_2_d.h"
|
||||
#include "gmc/gmc_8_2_sh_mask.h"
|
||||
#include "oss/oss_3_0_d.h"
|
||||
#include "oss/oss_3_0_sh_mask.h"
|
||||
#include "gca/gfx_8_0_sh_mask.h"
|
||||
#include "dce/dce_10_0_d.h"
|
||||
#include "dce/dce_10_0_sh_mask.h"
|
||||
#include "smu/smu_7_1_3_d.h"
|
||||
#include "mxgpu_vi.h"
|
||||
|
||||
/* VI golden setting */
|
||||
static const u32 xgpu_fiji_mgcg_cgcg_init[] = {
|
||||
mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
|
||||
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
|
||||
mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
|
||||
mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
|
||||
mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
|
||||
mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
|
||||
mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
|
||||
mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
|
||||
mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
|
||||
mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
|
||||
mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
|
||||
mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
|
||||
mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
|
||||
mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
|
||||
mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
|
||||
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
|
||||
mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
|
||||
mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
|
||||
mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
|
||||
mmPCIE_INDEX, 0xffffffff, 0x0140001c,
|
||||
mmPCIE_DATA, 0x000f0000, 0x00000000,
|
||||
mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
|
||||
mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
|
||||
mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
|
||||
mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
|
||||
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
|
||||
mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
|
||||
mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
|
||||
mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
|
||||
mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
|
||||
mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100,
|
||||
};
|
||||
|
||||
static const u32 xgpu_fiji_golden_settings_a10[] = {
|
||||
mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
|
||||
mmDB_DEBUG2, 0xf00fffff, 0x00000400,
|
||||
mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
|
||||
mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
|
||||
mmFBC_MISC, 0x1f311fff, 0x12300000,
|
||||
mmHDMI_CONTROL, 0x31000111, 0x00000011,
|
||||
mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
|
||||
mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
|
||||
mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
|
||||
mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
|
||||
mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
|
||||
mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
|
||||
mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
|
||||
mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
|
||||
mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
|
||||
mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
|
||||
mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
|
||||
mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
|
||||
mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
|
||||
mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff,
|
||||
mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
|
||||
mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
|
||||
mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
|
||||
mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
|
||||
mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
|
||||
};
|
||||
|
||||
static const u32 xgpu_fiji_golden_common_all[] = {
|
||||
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
|
||||
mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a,
|
||||
mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e,
|
||||
mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
|
||||
mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
|
||||
mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
|
||||
mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
|
||||
mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
|
||||
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
|
||||
mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009,
|
||||
};
|
||||
|
||||
static const u32 xgpu_tonga_mgcg_cgcg_init[] = {
|
||||
mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
|
||||
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
|
||||
mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
|
||||
mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
|
||||
mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
|
||||
mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
|
||||
mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
|
||||
mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
|
||||
mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
|
||||
mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
|
||||
mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
|
||||
mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
|
||||
mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
|
||||
mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
|
||||
mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
|
||||
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
|
||||
mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
|
||||
mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
|
||||
mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
|
||||
mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
|
||||
mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
|
||||
mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
|
||||
mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
|
||||
mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
|
||||
mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
|
||||
mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
|
||||
mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
|
||||
mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
|
||||
mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
|
||||
mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
|
||||
mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
|
||||
mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
|
||||
mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
|
||||
mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
|
||||
mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
|
||||
mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
|
||||
mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
|
||||
mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
|
||||
mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
|
||||
mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
|
||||
mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
|
||||
mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
|
||||
mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
|
||||
mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
|
||||
mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
|
||||
mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
|
||||
mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
|
||||
mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
|
||||
mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
|
||||
mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
|
||||
mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
|
||||
mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
|
||||
mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
|
||||
mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
|
||||
mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
|
||||
mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
|
||||
mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
|
||||
mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
|
||||
mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
|
||||
mmPCIE_INDEX, 0xffffffff, 0x0140001c,
|
||||
mmPCIE_DATA, 0x000f0000, 0x00000000,
|
||||
mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
|
||||
mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
|
||||
mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
|
||||
mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
|
||||
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
|
||||
mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
|
||||
mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
|
||||
mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
|
||||
mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
|
||||
mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100,
|
||||
};
|
||||
|
||||
static const u32 xgpu_tonga_golden_settings_a11[] = {
|
||||
mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
|
||||
mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
|
||||
mmDB_DEBUG2, 0xf00fffff, 0x00000400,
|
||||
mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
|
||||
mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
|
||||
mmFBC_MISC, 0x1f311fff, 0x12300000,
|
||||
mmGB_GPU_ID, 0x0000000f, 0x00000000,
|
||||
mmHDMI_CONTROL, 0x31000111, 0x00000011,
|
||||
mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
|
||||
mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
|
||||
mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
|
||||
mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
|
||||
mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x000000fc,
|
||||
mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
|
||||
mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
|
||||
mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
|
||||
mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
|
||||
mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
|
||||
mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
|
||||
mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
|
||||
mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
|
||||
mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
|
||||
mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
|
||||
mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
|
||||
mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
|
||||
mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
|
||||
mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
|
||||
mmTCC_CTRL, 0x00100000, 0xf31fff7f,
|
||||
mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
|
||||
mmTCP_ADDR_CONFIG, 0x000003ff, 0x000002fb,
|
||||
mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b,
|
||||
mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876,
|
||||
mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
|
||||
mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
|
||||
mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
|
||||
mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
|
||||
mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
|
||||
};
|
||||
|
||||
static const u32 xgpu_tonga_golden_common_all[] = {
|
||||
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
|
||||
mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
|
||||
mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A,
|
||||
mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002,
|
||||
mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
|
||||
mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
|
||||
mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
|
||||
};
|
||||
|
||||
void xgpu_vi_init_golden_registers(struct amdgpu_device *adev)
|
||||
{
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_FIJI:
|
||||
amdgpu_program_register_sequence(adev,
|
||||
xgpu_fiji_mgcg_cgcg_init,
|
||||
(const u32)ARRAY_SIZE(
|
||||
xgpu_fiji_mgcg_cgcg_init));
|
||||
amdgpu_program_register_sequence(adev,
|
||||
xgpu_fiji_golden_settings_a10,
|
||||
(const u32)ARRAY_SIZE(
|
||||
xgpu_fiji_golden_settings_a10));
|
||||
amdgpu_program_register_sequence(adev,
|
||||
xgpu_fiji_golden_common_all,
|
||||
(const u32)ARRAY_SIZE(
|
||||
xgpu_fiji_golden_common_all));
|
||||
break;
|
||||
case CHIP_TONGA:
|
||||
amdgpu_program_register_sequence(adev,
|
||||
xgpu_tonga_mgcg_cgcg_init,
|
||||
(const u32)ARRAY_SIZE(
|
||||
xgpu_tonga_mgcg_cgcg_init));
|
||||
amdgpu_program_register_sequence(adev,
|
||||
xgpu_tonga_golden_settings_a11,
|
||||
(const u32)ARRAY_SIZE(
|
||||
xgpu_tonga_golden_settings_a11));
|
||||
amdgpu_program_register_sequence(adev,
|
||||
xgpu_tonga_golden_common_all,
|
||||
(const u32)ARRAY_SIZE(
|
||||
xgpu_tonga_golden_common_all));
|
||||
break;
|
||||
default:
|
||||
BUG_ON("Doesn't support chip type.\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Mailbox communication between GPU hypervisor and VFs
|
||||
*/
|
||||
static void xgpu_vi_mailbox_send_ack(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
reg = RREG32(mmMAILBOX_CONTROL);
|
||||
reg = REG_SET_FIELD(reg, MAILBOX_CONTROL, RCV_MSG_ACK, 1);
|
||||
WREG32(mmMAILBOX_CONTROL, reg);
|
||||
}
|
||||
|
||||
static void xgpu_vi_mailbox_set_valid(struct amdgpu_device *adev, bool val)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
reg = RREG32(mmMAILBOX_CONTROL);
|
||||
reg = REG_SET_FIELD(reg, MAILBOX_CONTROL,
|
||||
TRN_MSG_VALID, val ? 1 : 0);
|
||||
WREG32(mmMAILBOX_CONTROL, reg);
|
||||
}
|
||||
|
||||
static void xgpu_vi_mailbox_trans_msg(struct amdgpu_device *adev,
|
||||
enum idh_event event)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
reg = RREG32(mmMAILBOX_MSGBUF_TRN_DW0);
|
||||
reg = REG_SET_FIELD(reg, MAILBOX_MSGBUF_TRN_DW0,
|
||||
MSGBUF_DATA, event);
|
||||
WREG32(mmMAILBOX_MSGBUF_TRN_DW0, reg);
|
||||
|
||||
xgpu_vi_mailbox_set_valid(adev, true);
|
||||
}
|
||||
|
||||
static int xgpu_vi_mailbox_rcv_msg(struct amdgpu_device *adev,
|
||||
enum idh_event event)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
reg = RREG32(mmMAILBOX_MSGBUF_RCV_DW0);
|
||||
if (reg != event)
|
||||
return -ENOENT;
|
||||
|
||||
/* send ack to PF */
|
||||
xgpu_vi_mailbox_send_ack(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int xgpu_vi_poll_ack(struct amdgpu_device *adev)
|
||||
{
|
||||
int r = 0, timeout = VI_MAILBOX_TIMEDOUT;
|
||||
u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, TRN_MSG_ACK);
|
||||
u32 reg;
|
||||
|
||||
reg = RREG32(mmMAILBOX_CONTROL);
|
||||
while (!(reg & mask)) {
|
||||
if (timeout <= 0) {
|
||||
pr_err("Doesn't get ack from pf.\n");
|
||||
r = -ETIME;
|
||||
break;
|
||||
}
|
||||
msleep(1);
|
||||
timeout -= 1;
|
||||
|
||||
reg = RREG32(mmMAILBOX_CONTROL);
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int xgpu_vi_poll_msg(struct amdgpu_device *adev, enum idh_event event)
|
||||
{
|
||||
int r = 0, timeout = VI_MAILBOX_TIMEDOUT;
|
||||
|
||||
r = xgpu_vi_mailbox_rcv_msg(adev, event);
|
||||
while (r) {
|
||||
if (timeout <= 0) {
|
||||
pr_err("Doesn't get ack from pf.\n");
|
||||
r = -ETIME;
|
||||
break;
|
||||
}
|
||||
msleep(1);
|
||||
timeout -= 1;
|
||||
|
||||
r = xgpu_vi_mailbox_rcv_msg(adev, event);
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int xgpu_vi_send_access_requests(struct amdgpu_device *adev,
|
||||
enum idh_request request)
|
||||
{
|
||||
int r;
|
||||
|
||||
xgpu_vi_mailbox_trans_msg(adev, request);
|
||||
|
||||
/* start to poll ack */
|
||||
r = xgpu_vi_poll_ack(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
xgpu_vi_mailbox_set_valid(adev, false);
|
||||
|
||||
/* start to check msg if request is idh_req_gpu_init_access */
|
||||
if (request == IDH_REQ_GPU_INIT_ACCESS) {
|
||||
r = xgpu_vi_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int xgpu_vi_request_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
return xgpu_vi_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
|
||||
}
|
||||
|
||||
static int xgpu_vi_request_full_gpu_access(struct amdgpu_device *adev,
|
||||
bool init)
|
||||
{
|
||||
enum idh_event event;
|
||||
|
||||
event = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
|
||||
return xgpu_vi_send_access_requests(adev, event);
|
||||
}
|
||||
|
||||
static int xgpu_vi_release_full_gpu_access(struct amdgpu_device *adev,
|
||||
bool init)
|
||||
{
|
||||
enum idh_event event;
|
||||
int r = 0;
|
||||
|
||||
event = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
|
||||
r = xgpu_vi_send_access_requests(adev, event);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/* add support mailbox interrupts */
|
||||
static int xgpu_vi_mailbox_ack_irq(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
DRM_DEBUG("get ack intr and do nothing.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int xgpu_vi_set_mailbox_ack_irq(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *src,
|
||||
unsigned type,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
u32 tmp = RREG32(mmMAILBOX_INT_CNTL);
|
||||
|
||||
tmp = REG_SET_FIELD(tmp, MAILBOX_INT_CNTL, ACK_INT_EN,
|
||||
(state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
|
||||
WREG32(mmMAILBOX_INT_CNTL, tmp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
|
||||
{
|
||||
struct amdgpu_virt *virt = container_of(work,
|
||||
struct amdgpu_virt, flr_work.work);
|
||||
struct amdgpu_device *adev = container_of(virt,
|
||||
struct amdgpu_device, virt);
|
||||
int r = 0;
|
||||
|
||||
r = xgpu_vi_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL);
|
||||
if (r)
|
||||
DRM_ERROR("failed to get flr cmpl msg from hypervior.\n");
|
||||
|
||||
/* TODO: need to restore gfx states */
|
||||
}
|
||||
|
||||
static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *src,
|
||||
unsigned type,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
u32 tmp = RREG32(mmMAILBOX_INT_CNTL);
|
||||
|
||||
tmp = REG_SET_FIELD(tmp, MAILBOX_INT_CNTL, VALID_INT_EN,
|
||||
(state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
|
||||
WREG32(mmMAILBOX_INT_CNTL, tmp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int xgpu_vi_mailbox_rcv_irq(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
int r;
|
||||
|
||||
adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
|
||||
r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
|
||||
/* do nothing for other msg */
|
||||
if (r)
|
||||
return 0;
|
||||
|
||||
/* TODO: need to save gfx states */
|
||||
schedule_delayed_work(&adev->virt.flr_work,
|
||||
msecs_to_jiffies(VI_MAILBOX_RESET_TIME));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct amdgpu_irq_src_funcs xgpu_vi_mailbox_ack_irq_funcs = {
|
||||
.set = xgpu_vi_set_mailbox_ack_irq,
|
||||
.process = xgpu_vi_mailbox_ack_irq,
|
||||
};
|
||||
|
||||
static const struct amdgpu_irq_src_funcs xgpu_vi_mailbox_rcv_irq_funcs = {
|
||||
.set = xgpu_vi_set_mailbox_rcv_irq,
|
||||
.process = xgpu_vi_mailbox_rcv_irq,
|
||||
};
|
||||
|
||||
void xgpu_vi_mailbox_set_irq_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
adev->virt.ack_irq.num_types = 1;
|
||||
adev->virt.ack_irq.funcs = &xgpu_vi_mailbox_ack_irq_funcs;
|
||||
adev->virt.rcv_irq.num_types = 1;
|
||||
adev->virt.rcv_irq.funcs = &xgpu_vi_mailbox_rcv_irq_funcs;
|
||||
}
|
||||
|
||||
int xgpu_vi_mailbox_add_irq_id(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = amdgpu_irq_add_id(adev, 135, &adev->virt.rcv_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_irq_add_id(adev, 138, &adev->virt.ack_irq);
|
||||
if (r) {
|
||||
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int xgpu_vi_mailbox_get_irq(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
|
||||
if (r)
|
||||
return r;
|
||||
r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
|
||||
if (r) {
|
||||
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
|
||||
return r;
|
||||
}
|
||||
|
||||
INIT_DELAYED_WORK(&adev->virt.flr_work, xgpu_vi_mailbox_flr_work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void xgpu_vi_mailbox_put_irq(struct amdgpu_device *adev)
|
||||
{
|
||||
cancel_delayed_work_sync(&adev->virt.flr_work);
|
||||
amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
|
||||
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
|
||||
}
|
||||
|
||||
const struct amdgpu_virt_ops xgpu_vi_virt_ops = {
|
||||
.req_full_gpu = xgpu_vi_request_full_gpu_access,
|
||||
.rel_full_gpu = xgpu_vi_release_full_gpu_access,
|
||||
.reset_gpu = xgpu_vi_request_reset,
|
||||
};
|
|
@ -0,0 +1,55 @@
|
|||
/*
|
||||
* Copyright 2017 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __MXGPU_VI_H__
|
||||
#define __MXGPU_VI_H__
|
||||
|
||||
#define VI_MAILBOX_TIMEDOUT 150
|
||||
#define VI_MAILBOX_RESET_TIME 12
|
||||
|
||||
/* VI mailbox messages request */
|
||||
enum idh_request {
|
||||
IDH_REQ_GPU_INIT_ACCESS = 1,
|
||||
IDH_REL_GPU_INIT_ACCESS,
|
||||
IDH_REQ_GPU_FINI_ACCESS,
|
||||
IDH_REL_GPU_FINI_ACCESS,
|
||||
IDH_REQ_GPU_RESET_ACCESS
|
||||
};
|
||||
|
||||
/* VI mailbox messages data */
|
||||
enum idh_event {
|
||||
IDH_CLR_MSG_BUF = 0,
|
||||
IDH_READY_TO_ACCESS_GPU,
|
||||
IDH_FLR_NOTIFICATION,
|
||||
IDH_FLR_NOTIFICATION_CMPL,
|
||||
IDH_EVENT_MAX
|
||||
};
|
||||
|
||||
extern const struct amdgpu_virt_ops xgpu_vi_virt_ops;
|
||||
|
||||
void xgpu_vi_init_golden_registers(struct amdgpu_device *adev);
|
||||
void xgpu_vi_mailbox_set_irq_funcs(struct amdgpu_device *adev);
|
||||
int xgpu_vi_mailbox_add_irq_id(struct amdgpu_device *adev);
|
||||
int xgpu_vi_mailbox_get_irq(struct amdgpu_device *adev);
|
||||
void xgpu_vi_mailbox_put_irq(struct amdgpu_device *adev);
|
||||
|
||||
#endif
|
|
@ -701,7 +701,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|||
ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
|
||||
ib.length_dw = 8;
|
||||
|
||||
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
|
||||
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
|
||||
if (r)
|
||||
goto err1;
|
||||
|
||||
|
|
|
@ -910,7 +910,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|||
ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
|
||||
ib.length_dw = 8;
|
||||
|
||||
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
|
||||
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
|
||||
if (r)
|
||||
goto err1;
|
||||
|
||||
|
@ -1533,6 +1533,22 @@ static int sdma_v3_0_set_powergating_state(void *handle,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void sdma_v3_0_get_clockgating_state(void *handle, u32 *flags)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int data;
|
||||
|
||||
/* AMD_CG_SUPPORT_SDMA_MGCG */
|
||||
data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[0]);
|
||||
if (!(data & SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK))
|
||||
*flags |= AMD_CG_SUPPORT_SDMA_MGCG;
|
||||
|
||||
/* AMD_CG_SUPPORT_SDMA_LS */
|
||||
data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[0]);
|
||||
if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
|
||||
*flags |= AMD_CG_SUPPORT_SDMA_LS;
|
||||
}
|
||||
|
||||
static const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
|
||||
.name = "sdma_v3_0",
|
||||
.early_init = sdma_v3_0_early_init,
|
||||
|
@ -1551,6 +1567,7 @@ static const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
|
|||
.soft_reset = sdma_v3_0_soft_reset,
|
||||
.set_clockgating_state = sdma_v3_0_set_clockgating_state,
|
||||
.set_powergating_state = sdma_v3_0_set_powergating_state,
|
||||
.get_clockgating_state = sdma_v3_0_get_clockgating_state,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -24,7 +24,7 @@
|
|||
#include <drm/drmP.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_trace.h"
|
||||
#include "si/sid.h"
|
||||
#include "sid.h"
|
||||
|
||||
const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
|
||||
{
|
||||
|
@ -301,7 +301,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|||
ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff;
|
||||
ib.ptr[3] = 0xDEADBEEF;
|
||||
ib.length_dw = 4;
|
||||
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
|
||||
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
|
||||
if (r)
|
||||
goto err1;
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
#include "amdgpu_pm.h"
|
||||
#include "amdgpu_dpm.h"
|
||||
#include "amdgpu_atombios.h"
|
||||
#include "si/sid.h"
|
||||
#include "sid.h"
|
||||
#include "r600_dpm.h"
|
||||
#include "si_dpm.h"
|
||||
#include "atom.h"
|
||||
|
@ -3009,29 +3009,6 @@ static int si_init_smc_spll_table(struct amdgpu_device *adev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
struct si_dpm_quirk {
|
||||
u32 chip_vendor;
|
||||
u32 chip_device;
|
||||
u32 subsys_vendor;
|
||||
u32 subsys_device;
|
||||
u32 max_sclk;
|
||||
u32 max_mclk;
|
||||
};
|
||||
|
||||
/* cards with dpm stability problems */
|
||||
static struct si_dpm_quirk si_dpm_quirk_list[] = {
|
||||
/* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
|
||||
{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
|
||||
{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
|
||||
{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 },
|
||||
{ PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
|
||||
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
|
||||
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
|
||||
{ PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
|
||||
{ PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
|
||||
{ 0, 0, 0, 0 },
|
||||
};
|
||||
|
||||
static u16 si_get_lower_of_leakage_and_vce_voltage(struct amdgpu_device *adev,
|
||||
u16 vce_voltage)
|
||||
{
|
||||
|
@ -3477,18 +3454,8 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
|
|||
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
|
||||
u32 max_sclk = 0, max_mclk = 0;
|
||||
int i;
|
||||
struct si_dpm_quirk *p = si_dpm_quirk_list;
|
||||
|
||||
/* limit all SI kickers */
|
||||
if (adev->asic_type == CHIP_PITCAIRN) {
|
||||
if ((adev->pdev->revision == 0x81) ||
|
||||
(adev->pdev->device == 0x6810) ||
|
||||
(adev->pdev->device == 0x6811) ||
|
||||
(adev->pdev->device == 0x6816) ||
|
||||
(adev->pdev->device == 0x6817) ||
|
||||
(adev->pdev->device == 0x6806))
|
||||
max_mclk = 120000;
|
||||
} else if (adev->asic_type == CHIP_HAINAN) {
|
||||
if (adev->asic_type == CHIP_HAINAN) {
|
||||
if ((adev->pdev->revision == 0x81) ||
|
||||
(adev->pdev->revision == 0x83) ||
|
||||
(adev->pdev->revision == 0xC3) ||
|
||||
|
@ -3498,18 +3465,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
|
|||
max_sclk = 75000;
|
||||
}
|
||||
}
|
||||
/* Apply dpm quirks */
|
||||
while (p && p->chip_device != 0) {
|
||||
if (adev->pdev->vendor == p->chip_vendor &&
|
||||
adev->pdev->device == p->chip_device &&
|
||||
adev->pdev->subsystem_vendor == p->subsys_vendor &&
|
||||
adev->pdev->subsystem_device == p->subsys_device) {
|
||||
max_sclk = p->max_sclk;
|
||||
max_mclk = p->max_mclk;
|
||||
break;
|
||||
}
|
||||
++p;
|
||||
}
|
||||
|
||||
if (rps->vce_active) {
|
||||
rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
|
||||
|
@ -3906,25 +3861,25 @@ static int si_restrict_performance_levels_before_switch(struct amdgpu_device *ad
|
|||
}
|
||||
|
||||
static int si_dpm_force_performance_level(struct amdgpu_device *adev,
|
||||
enum amdgpu_dpm_forced_level level)
|
||||
enum amd_dpm_forced_level level)
|
||||
{
|
||||
struct amdgpu_ps *rps = adev->pm.dpm.current_ps;
|
||||
struct si_ps *ps = si_get_ps(rps);
|
||||
u32 levels = ps->performance_level_count;
|
||||
|
||||
if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) {
|
||||
if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
|
||||
if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
|
||||
return -EINVAL;
|
||||
|
||||
if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK)
|
||||
return -EINVAL;
|
||||
} else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) {
|
||||
} else if (level == AMD_DPM_FORCED_LEVEL_LOW) {
|
||||
if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
|
||||
return -EINVAL;
|
||||
|
||||
if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK)
|
||||
return -EINVAL;
|
||||
} else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) {
|
||||
} else if (level == AMD_DPM_FORCED_LEVEL_AUTO) {
|
||||
if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -7746,7 +7701,7 @@ static int si_dpm_sw_init(void *handle)
|
|||
/* default to balanced state */
|
||||
adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
|
||||
adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
|
||||
adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
|
||||
adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO;
|
||||
adev->pm.default_sclk = adev->clock.default_sclk;
|
||||
adev->pm.default_mclk = adev->clock.default_mclk;
|
||||
adev->pm.current_sclk = adev->clock.default_sclk;
|
||||
|
@ -8072,11 +8027,3 @@ static void si_dpm_set_irq_funcs(struct amdgpu_device *adev)
|
|||
adev->pm.dpm.thermal.irq.funcs = &si_dpm_irq_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version si_dpm_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_SMC,
|
||||
.major = 6,
|
||||
.minor = 0,
|
||||
.rev = 0,
|
||||
.funcs = &si_dpm_ip_funcs,
|
||||
};
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
#include "drmP.h"
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_ih.h"
|
||||
#include "si/sid.h"
|
||||
#include "sid.h"
|
||||
#include "si_ih.h"
|
||||
|
||||
static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev);
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
#include <linux/firmware.h>
|
||||
#include "drmP.h"
|
||||
#include "amdgpu.h"
|
||||
#include "si/sid.h"
|
||||
#include "sid.h"
|
||||
#include "ppsmc.h"
|
||||
#include "amdgpu_ucode.h"
|
||||
#include "sislands_smc.h"
|
||||
|
|
|
@ -1,101 +0,0 @@
|
|||
/*
|
||||
* Copyright 2014 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SMU_UCODE_XFER_VI_H
|
||||
#define SMU_UCODE_XFER_VI_H
|
||||
|
||||
#define SMU_DRAMData_TOC_VERSION 1
|
||||
#define MAX_IH_REGISTER_COUNT 65535
|
||||
#define SMU_DIGEST_SIZE_BYTES 20
|
||||
#define SMU_FB_SIZE_BYTES 1048576
|
||||
#define SMU_MAX_ENTRIES 12
|
||||
|
||||
#define UCODE_ID_SMU 0
|
||||
#define UCODE_ID_SDMA0 1
|
||||
#define UCODE_ID_SDMA1 2
|
||||
#define UCODE_ID_CP_CE 3
|
||||
#define UCODE_ID_CP_PFP 4
|
||||
#define UCODE_ID_CP_ME 5
|
||||
#define UCODE_ID_CP_MEC 6
|
||||
#define UCODE_ID_CP_MEC_JT1 7
|
||||
#define UCODE_ID_CP_MEC_JT2 8
|
||||
#define UCODE_ID_GMCON_RENG 9
|
||||
#define UCODE_ID_RLC_G 10
|
||||
#define UCODE_ID_IH_REG_RESTORE 11
|
||||
#define UCODE_ID_VBIOS 12
|
||||
#define UCODE_ID_MISC_METADATA 13
|
||||
#define UCODE_ID_SMU_SK 14
|
||||
#define UCODE_ID_RLC_SCRATCH 32
|
||||
#define UCODE_ID_RLC_SRM_ARAM 33
|
||||
#define UCODE_ID_RLC_SRM_DRAM 34
|
||||
#define UCODE_ID_MEC_STORAGE 35
|
||||
#define UCODE_ID_VBIOS_PARAMETERS 36
|
||||
#define UCODE_META_DATA 0xFF
|
||||
|
||||
#define UCODE_ID_SMU_MASK 0x00000001
|
||||
#define UCODE_ID_SDMA0_MASK 0x00000002
|
||||
#define UCODE_ID_SDMA1_MASK 0x00000004
|
||||
#define UCODE_ID_CP_CE_MASK 0x00000008
|
||||
#define UCODE_ID_CP_PFP_MASK 0x00000010
|
||||
#define UCODE_ID_CP_ME_MASK 0x00000020
|
||||
#define UCODE_ID_CP_MEC_MASK 0x00000040
|
||||
#define UCODE_ID_CP_MEC_JT1_MASK 0x00000080
|
||||
#define UCODE_ID_CP_MEC_JT2_MASK 0x00000100
|
||||
#define UCODE_ID_GMCON_RENG_MASK 0x00000200
|
||||
#define UCODE_ID_RLC_G_MASK 0x00000400
|
||||
#define UCODE_ID_IH_REG_RESTORE_MASK 0x00000800
|
||||
#define UCODE_ID_VBIOS_MASK 0x00001000
|
||||
|
||||
#define UCODE_FLAG_UNHALT_MASK 0x1
|
||||
|
||||
struct SMU_Entry {
|
||||
#ifndef __BIG_ENDIAN
|
||||
uint16_t id;
|
||||
uint16_t version;
|
||||
uint32_t image_addr_high;
|
||||
uint32_t image_addr_low;
|
||||
uint32_t meta_data_addr_high;
|
||||
uint32_t meta_data_addr_low;
|
||||
uint32_t data_size_byte;
|
||||
uint16_t flags;
|
||||
uint16_t num_register_entries;
|
||||
#else
|
||||
uint16_t version;
|
||||
uint16_t id;
|
||||
uint32_t image_addr_high;
|
||||
uint32_t image_addr_low;
|
||||
uint32_t meta_data_addr_high;
|
||||
uint32_t meta_data_addr_low;
|
||||
uint32_t data_size_byte;
|
||||
uint16_t num_register_entries;
|
||||
uint16_t flags;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct SMU_DRAMData_TOC {
|
||||
uint32_t structure_version;
|
||||
uint32_t num_entries;
|
||||
struct SMU_Entry entry[SMU_MAX_ENTRIES];
|
||||
};
|
||||
|
||||
#endif
|
|
@ -822,16 +822,44 @@ static int uvd_v5_0_set_powergating_state(void *handle,
|
|||
* the smc and the hw blocks
|
||||
*/
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int ret = 0;
|
||||
|
||||
if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
|
||||
return 0;
|
||||
|
||||
if (state == AMD_PG_STATE_GATE) {
|
||||
uvd_v5_0_stop(adev);
|
||||
return 0;
|
||||
adev->uvd.is_powergated = true;
|
||||
} else {
|
||||
return uvd_v5_0_start(adev);
|
||||
ret = uvd_v5_0_start(adev);
|
||||
if (ret)
|
||||
goto out;
|
||||
adev->uvd.is_powergated = false;
|
||||
}
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void uvd_v5_0_get_clockgating_state(void *handle, u32 *flags)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int data;
|
||||
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
|
||||
if (adev->uvd.is_powergated) {
|
||||
DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* AMD_CG_SUPPORT_UVD_MGCG */
|
||||
data = RREG32(mmUVD_CGC_CTRL);
|
||||
if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
|
||||
*flags |= AMD_CG_SUPPORT_UVD_MGCG;
|
||||
|
||||
out:
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
}
|
||||
|
||||
static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
|
||||
|
@ -849,6 +877,7 @@ static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
|
|||
.soft_reset = uvd_v5_0_soft_reset,
|
||||
.set_clockgating_state = uvd_v5_0_set_clockgating_state,
|
||||
.set_powergating_state = uvd_v5_0_set_powergating_state,
|
||||
.get_clockgating_state = uvd_v5_0_get_clockgating_state,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
|
||||
|
|
|
@ -1047,6 +1047,7 @@ static int uvd_v6_0_set_powergating_state(void *handle,
|
|||
* the smc and the hw blocks
|
||||
*/
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int ret = 0;
|
||||
|
||||
if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
|
||||
return 0;
|
||||
|
@ -1055,10 +1056,37 @@ static int uvd_v6_0_set_powergating_state(void *handle,
|
|||
|
||||
if (state == AMD_PG_STATE_GATE) {
|
||||
uvd_v6_0_stop(adev);
|
||||
return 0;
|
||||
adev->uvd.is_powergated = true;
|
||||
} else {
|
||||
return uvd_v6_0_start(adev);
|
||||
ret = uvd_v6_0_start(adev);
|
||||
if (ret)
|
||||
goto out;
|
||||
adev->uvd.is_powergated = false;
|
||||
}
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void uvd_v6_0_get_clockgating_state(void *handle, u32 *flags)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int data;
|
||||
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
|
||||
if (adev->uvd.is_powergated) {
|
||||
DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* AMD_CG_SUPPORT_UVD_MGCG */
|
||||
data = RREG32(mmUVD_CGC_CTRL);
|
||||
if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
|
||||
*flags |= AMD_CG_SUPPORT_UVD_MGCG;
|
||||
|
||||
out:
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
}
|
||||
|
||||
static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
|
||||
|
@ -1079,6 +1107,7 @@ static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
|
|||
.post_soft_reset = uvd_v6_0_post_soft_reset,
|
||||
.set_clockgating_state = uvd_v6_0_set_clockgating_state,
|
||||
.set_powergating_state = uvd_v6_0_set_powergating_state,
|
||||
.get_clockgating_state = uvd_v6_0_get_clockgating_state,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
|
||||
|
|
|
@ -230,6 +230,10 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
|
|||
struct amdgpu_ring *ring;
|
||||
int idx, r;
|
||||
|
||||
vce_v3_0_override_vce_clock_gating(adev, true);
|
||||
if (!(adev->flags & AMD_IS_APU))
|
||||
amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
|
||||
|
||||
ring = &adev->vce.ring[0];
|
||||
WREG32(mmVCE_RB_RPTR, ring->wptr);
|
||||
WREG32(mmVCE_RB_WPTR, ring->wptr);
|
||||
|
@ -708,18 +712,6 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void vce_v3_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
|
||||
|
||||
if (enable)
|
||||
tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
|
||||
else
|
||||
tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
|
||||
|
||||
WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
|
||||
}
|
||||
|
||||
static int vce_v3_0_set_clockgating_state(void *handle,
|
||||
enum amd_clockgating_state state)
|
||||
{
|
||||
|
@ -727,11 +719,6 @@ static int vce_v3_0_set_clockgating_state(void *handle,
|
|||
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
|
||||
int i;
|
||||
|
||||
if ((adev->asic_type == CHIP_POLARIS10) ||
|
||||
(adev->asic_type == CHIP_TONGA) ||
|
||||
(adev->asic_type == CHIP_FIJI))
|
||||
vce_v3_0_set_bypass_mode(adev, enable);
|
||||
|
||||
if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
|
||||
return 0;
|
||||
|
||||
|
@ -777,15 +764,46 @@ static int vce_v3_0_set_powergating_state(void *handle,
|
|||
* the smc and the hw blocks
|
||||
*/
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int ret = 0;
|
||||
|
||||
if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
|
||||
return 0;
|
||||
|
||||
if (state == AMD_PG_STATE_GATE)
|
||||
if (state == AMD_PG_STATE_GATE) {
|
||||
adev->vce.is_powergated = true;
|
||||
/* XXX do we need a vce_v3_0_stop()? */
|
||||
return 0;
|
||||
else
|
||||
return vce_v3_0_start(adev);
|
||||
} else {
|
||||
ret = vce_v3_0_start(adev);
|
||||
if (ret)
|
||||
goto out;
|
||||
adev->vce.is_powergated = false;
|
||||
}
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vce_v3_0_get_clockgating_state(void *handle, u32 *flags)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int data;
|
||||
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
|
||||
if (adev->vce.is_powergated) {
|
||||
DRM_INFO("Cannot get clockgating state when VCE is powergated.\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
|
||||
|
||||
/* AMD_CG_SUPPORT_VCE_MGCG */
|
||||
data = RREG32(mmVCE_CLOCK_GATING_A);
|
||||
if (data & (0x04 << 4))
|
||||
*flags |= AMD_CG_SUPPORT_VCE_MGCG;
|
||||
|
||||
out:
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
}
|
||||
|
||||
static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
|
||||
|
@ -839,6 +857,7 @@ static const struct amd_ip_funcs vce_v3_0_ip_funcs = {
|
|||
.post_soft_reset = vce_v3_0_post_soft_reset,
|
||||
.set_clockgating_state = vce_v3_0_set_clockgating_state,
|
||||
.set_powergating_state = vce_v3_0_set_powergating_state,
|
||||
.get_clockgating_state = vce_v3_0_get_clockgating_state,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
|
||||
|
|
|
@ -20,9 +20,7 @@
|
|||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include "drmP.h"
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_atombios.h"
|
||||
|
@ -78,17 +76,7 @@
|
|||
#include "amdgpu_acp.h"
|
||||
#endif
|
||||
#include "dce_virtual.h"
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/tonga_k_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
|
||||
#include "mxgpu_vi.h"
|
||||
|
||||
/*
|
||||
* Indirect registers accessor
|
||||
|
@ -285,6 +273,12 @@ static void vi_init_golden_registers(struct amdgpu_device *adev)
|
|||
/* Some of the registers might be dependent on GRBM_GFX_INDEX */
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
xgpu_vi_init_golden_registers(adev);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_TOPAZ:
|
||||
amdgpu_program_register_sequence(adev,
|
||||
|
@ -458,14 +452,14 @@ static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
|
|||
/* bit0: 0 means pf and 1 means vf */
|
||||
/* bit31: 0 means disable IOV and 1 means enable */
|
||||
if (reg & 1)
|
||||
adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_IS_VF;
|
||||
adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
|
||||
|
||||
if (reg & 0x80000000)
|
||||
adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
|
||||
adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
|
||||
|
||||
if (reg == 0) {
|
||||
if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
|
||||
adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
|
||||
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -801,7 +795,37 @@ static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
|
|||
|
||||
static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
|
||||
{
|
||||
/* todo */
|
||||
int r, i;
|
||||
struct atom_clock_dividers dividers;
|
||||
u32 tmp;
|
||||
|
||||
r = amdgpu_atombios_get_clock_dividers(adev,
|
||||
COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
|
||||
ecclk, false, ÷rs);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
for (i = 0; i < 100; i++) {
|
||||
if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
|
||||
break;
|
||||
mdelay(10);
|
||||
}
|
||||
if (i == 100)
|
||||
return -ETIMEDOUT;
|
||||
|
||||
tmp = RREG32_SMC(ixCG_ECLK_CNTL);
|
||||
tmp &= ~(CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK |
|
||||
CG_ECLK_CNTL__ECLK_DIVIDER_MASK);
|
||||
tmp |= dividers.post_divider;
|
||||
WREG32_SMC(ixCG_ECLK_CNTL, tmp);
|
||||
|
||||
for (i = 0; i < 100; i++) {
|
||||
if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
|
||||
break;
|
||||
mdelay(10);
|
||||
}
|
||||
if (i == 100)
|
||||
return -ETIMEDOUT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -869,7 +893,6 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
|
|||
{
|
||||
.read_disabled_bios = &vi_read_disabled_bios,
|
||||
.read_bios_from_rom = &vi_read_bios_from_rom,
|
||||
.detect_hw_virtualization = vi_detect_hw_virtualization,
|
||||
.read_register = &vi_read_register,
|
||||
.reset = &vi_asic_reset,
|
||||
.set_vga_state = &vi_vga_set_state,
|
||||
|
@ -905,6 +928,11 @@ static int vi_common_early_init(void *handle)
|
|||
(amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC)))
|
||||
smc_enabled = true;
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
amdgpu_virt_init_setting(adev);
|
||||
xgpu_vi_mailbox_set_irq_funcs(adev);
|
||||
}
|
||||
|
||||
adev->rev_id = vi_get_rev_id(adev);
|
||||
adev->external_rev_id = 0xFF;
|
||||
switch (adev->asic_type) {
|
||||
|
@ -1061,10 +1089,6 @@ static int vi_common_early_init(void *handle)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* in early init stage, vbios code won't work */
|
||||
if (adev->asic_funcs->detect_hw_virtualization)
|
||||
amdgpu_asic_detect_hw_virtualization(adev);
|
||||
|
||||
if (amdgpu_smc_load_fw && smc_enabled)
|
||||
adev->firmware.smu_load = true;
|
||||
|
||||
|
@ -1073,8 +1097,23 @@ static int vi_common_early_init(void *handle)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int vi_common_late_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
xgpu_vi_mailbox_get_irq(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vi_common_sw_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
xgpu_vi_mailbox_add_irq_id(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1106,6 +1145,9 @@ static int vi_common_hw_fini(void *handle)
|
|||
/* enable the doorbell aperture */
|
||||
vi_enable_doorbell_aperture(adev, false);
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
xgpu_vi_mailbox_put_irq(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1190,6 +1232,23 @@ static void vi_update_hdp_light_sleep(struct amdgpu_device *adev,
|
|||
WREG32(mmHDP_MEM_POWER_LS, data);
|
||||
}
|
||||
|
||||
static void vi_update_drm_light_sleep(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
uint32_t temp, data;
|
||||
|
||||
temp = data = RREG32(0x157a);
|
||||
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
|
||||
data |= 1;
|
||||
else
|
||||
data &= ~1;
|
||||
|
||||
if (temp != data)
|
||||
WREG32(0x157a, data);
|
||||
}
|
||||
|
||||
|
||||
static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
|
@ -1350,6 +1409,8 @@ static int vi_common_set_clockgating_state(void *handle,
|
|||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
vi_update_hdp_light_sleep(adev,
|
||||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
vi_update_drm_light_sleep(adev,
|
||||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
break;
|
||||
case CHIP_TONGA:
|
||||
case CHIP_POLARIS10:
|
||||
|
@ -1368,10 +1429,36 @@ static int vi_common_set_powergating_state(void *handle,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void vi_common_get_clockgating_state(void *handle, u32 *flags)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int data;
|
||||
|
||||
/* AMD_CG_SUPPORT_BIF_LS */
|
||||
data = RREG32_PCIE(ixPCIE_CNTL2);
|
||||
if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
|
||||
*flags |= AMD_CG_SUPPORT_BIF_LS;
|
||||
|
||||
/* AMD_CG_SUPPORT_HDP_LS */
|
||||
data = RREG32(mmHDP_MEM_POWER_LS);
|
||||
if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
|
||||
*flags |= AMD_CG_SUPPORT_HDP_LS;
|
||||
|
||||
/* AMD_CG_SUPPORT_HDP_MGCG */
|
||||
data = RREG32(mmHDP_HOST_PATH_CNTL);
|
||||
if (!(data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK))
|
||||
*flags |= AMD_CG_SUPPORT_HDP_MGCG;
|
||||
|
||||
/* AMD_CG_SUPPORT_ROM_MGCG */
|
||||
data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
|
||||
if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
|
||||
*flags |= AMD_CG_SUPPORT_ROM_MGCG;
|
||||
}
|
||||
|
||||
static const struct amd_ip_funcs vi_common_ip_funcs = {
|
||||
.name = "vi_common",
|
||||
.early_init = vi_common_early_init,
|
||||
.late_init = NULL,
|
||||
.late_init = vi_common_late_init,
|
||||
.sw_init = vi_common_sw_init,
|
||||
.sw_fini = vi_common_sw_fini,
|
||||
.hw_init = vi_common_hw_init,
|
||||
|
@ -1383,6 +1470,7 @@ static const struct amd_ip_funcs vi_common_ip_funcs = {
|
|||
.soft_reset = vi_common_soft_reset,
|
||||
.set_clockgating_state = vi_common_set_clockgating_state,
|
||||
.set_powergating_state = vi_common_set_powergating_state,
|
||||
.get_clockgating_state = vi_common_get_clockgating_state,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ip_block_version vi_common_ip_block =
|
||||
|
@ -1396,6 +1484,12 @@ static const struct amdgpu_ip_block_version vi_common_ip_block =
|
|||
|
||||
int vi_set_ip_blocks(struct amdgpu_device *adev)
|
||||
{
|
||||
/* in early init stage, vbios code won't work */
|
||||
vi_detect_hw_virtualization(adev);
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
adev->virt.ops = &xgpu_vi_virt_ops;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_TOPAZ:
|
||||
/* topaz has no DCE, UVD, VCE */
|
||||
|
@ -1413,28 +1507,32 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
|
|||
amdgpu_ip_block_add(adev, &gmc_v8_5_ip_block);
|
||||
amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
|
||||
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
|
||||
if (adev->enable_virtual_display)
|
||||
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
|
||||
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
else
|
||||
amdgpu_ip_block_add(adev, &dce_v10_1_ip_block);
|
||||
amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
|
||||
amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
|
||||
amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block);
|
||||
amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block);
|
||||
amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
|
||||
}
|
||||
break;
|
||||
case CHIP_TONGA:
|
||||
amdgpu_ip_block_add(adev, &vi_common_ip_block);
|
||||
amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
|
||||
amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
|
||||
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
|
||||
if (adev->enable_virtual_display)
|
||||
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
|
||||
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
else
|
||||
amdgpu_ip_block_add(adev, &dce_v10_0_ip_block);
|
||||
amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
|
||||
amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
|
||||
amdgpu_ip_block_add(adev, &uvd_v5_0_ip_block);
|
||||
amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
amdgpu_ip_block_add(adev, &uvd_v5_0_ip_block);
|
||||
amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
|
||||
}
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS10:
|
||||
|
|
|
@ -28,4 +28,116 @@ void vi_srbm_select(struct amdgpu_device *adev,
|
|||
u32 me, u32 pipe, u32 queue, u32 vmid);
|
||||
int vi_set_ip_blocks(struct amdgpu_device *adev);
|
||||
|
||||
struct amdgpu_ce_ib_state
|
||||
{
|
||||
uint32_t ce_ib_completion_status;
|
||||
uint32_t ce_constegnine_count;
|
||||
uint32_t ce_ibOffset_ib1;
|
||||
uint32_t ce_ibOffset_ib2;
|
||||
}; /* Total of 4 DWORD */
|
||||
|
||||
struct amdgpu_de_ib_state
|
||||
{
|
||||
uint32_t ib_completion_status;
|
||||
uint32_t de_constEngine_count;
|
||||
uint32_t ib_offset_ib1;
|
||||
uint32_t ib_offset_ib2;
|
||||
uint32_t preamble_begin_ib1;
|
||||
uint32_t preamble_begin_ib2;
|
||||
uint32_t preamble_end_ib1;
|
||||
uint32_t preamble_end_ib2;
|
||||
uint32_t draw_indirect_baseLo;
|
||||
uint32_t draw_indirect_baseHi;
|
||||
uint32_t disp_indirect_baseLo;
|
||||
uint32_t disp_indirect_baseHi;
|
||||
uint32_t gds_backup_addrlo;
|
||||
uint32_t gds_backup_addrhi;
|
||||
uint32_t index_base_addrlo;
|
||||
uint32_t index_base_addrhi;
|
||||
uint32_t sample_cntl;
|
||||
}; /* Total of 17 DWORD */
|
||||
|
||||
struct amdgpu_ce_ib_state_chained_ib
|
||||
{
|
||||
/* section of non chained ib part */
|
||||
uint32_t ce_ib_completion_status;
|
||||
uint32_t ce_constegnine_count;
|
||||
uint32_t ce_ibOffset_ib1;
|
||||
uint32_t ce_ibOffset_ib2;
|
||||
|
||||
/* section of chained ib */
|
||||
uint32_t ce_chainib_addrlo_ib1;
|
||||
uint32_t ce_chainib_addrlo_ib2;
|
||||
uint32_t ce_chainib_addrhi_ib1;
|
||||
uint32_t ce_chainib_addrhi_ib2;
|
||||
uint32_t ce_chainib_size_ib1;
|
||||
uint32_t ce_chainib_size_ib2;
|
||||
}; /* total 10 DWORD */
|
||||
|
||||
struct amdgpu_de_ib_state_chained_ib
|
||||
{
|
||||
/* section of non chained ib part */
|
||||
uint32_t ib_completion_status;
|
||||
uint32_t de_constEngine_count;
|
||||
uint32_t ib_offset_ib1;
|
||||
uint32_t ib_offset_ib2;
|
||||
|
||||
/* section of chained ib */
|
||||
uint32_t chain_ib_addrlo_ib1;
|
||||
uint32_t chain_ib_addrlo_ib2;
|
||||
uint32_t chain_ib_addrhi_ib1;
|
||||
uint32_t chain_ib_addrhi_ib2;
|
||||
uint32_t chain_ib_size_ib1;
|
||||
uint32_t chain_ib_size_ib2;
|
||||
|
||||
/* section of non chained ib part */
|
||||
uint32_t preamble_begin_ib1;
|
||||
uint32_t preamble_begin_ib2;
|
||||
uint32_t preamble_end_ib1;
|
||||
uint32_t preamble_end_ib2;
|
||||
|
||||
/* section of chained ib */
|
||||
uint32_t chain_ib_pream_addrlo_ib1;
|
||||
uint32_t chain_ib_pream_addrlo_ib2;
|
||||
uint32_t chain_ib_pream_addrhi_ib1;
|
||||
uint32_t chain_ib_pream_addrhi_ib2;
|
||||
|
||||
/* section of non chained ib part */
|
||||
uint32_t draw_indirect_baseLo;
|
||||
uint32_t draw_indirect_baseHi;
|
||||
uint32_t disp_indirect_baseLo;
|
||||
uint32_t disp_indirect_baseHi;
|
||||
uint32_t gds_backup_addrlo;
|
||||
uint32_t gds_backup_addrhi;
|
||||
uint32_t index_base_addrlo;
|
||||
uint32_t index_base_addrhi;
|
||||
uint32_t sample_cntl;
|
||||
}; /* Total of 27 DWORD */
|
||||
|
||||
struct amdgpu_gfx_meta_data
|
||||
{
|
||||
/* 4 DWORD, address must be 4KB aligned */
|
||||
struct amdgpu_ce_ib_state ce_payload;
|
||||
uint32_t reserved1[60];
|
||||
/* 17 DWORD, address must be 64B aligned */
|
||||
struct amdgpu_de_ib_state de_payload;
|
||||
/* PFP IB base address which get pre-empted */
|
||||
uint32_t DeIbBaseAddrLo;
|
||||
uint32_t DeIbBaseAddrHi;
|
||||
uint32_t reserved2[941];
|
||||
}; /* Total of 4K Bytes */
|
||||
|
||||
struct amdgpu_gfx_meta_data_chained_ib
|
||||
{
|
||||
/* 10 DWORD, address must be 4KB aligned */
|
||||
struct amdgpu_ce_ib_state_chained_ib ce_payload;
|
||||
uint32_t reserved1[54];
|
||||
/* 27 DWORD, address must be 64B aligned */
|
||||
struct amdgpu_de_ib_state_chained_ib de_payload;
|
||||
/* PFP IB base address which get pre-empted */
|
||||
uint32_t DeIbBaseAddrLo;
|
||||
uint32_t DeIbBaseAddrHi;
|
||||
uint32_t reserved2[931];
|
||||
}; /* Total of 4K Bytes */
|
||||
|
||||
#endif
|
||||
|
|
|
@ -29,8 +29,4 @@ int cz_smu_init(struct amdgpu_device *adev);
|
|||
int cz_smu_start(struct amdgpu_device *adev);
|
||||
int cz_smu_fini(struct amdgpu_device *adev);
|
||||
|
||||
extern const struct amd_ip_funcs tonga_dpm_ip_funcs;
|
||||
extern const struct amd_ip_funcs fiji_dpm_ip_funcs;
|
||||
extern const struct amd_ip_funcs iceland_dpm_ip_funcs;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -360,6 +360,8 @@
|
|||
#define PACKET3_WAIT_ON_CE_COUNTER 0x86
|
||||
#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88
|
||||
#define PACKET3_SWITCH_BUFFER 0x8B
|
||||
#define PACKET3_SET_RESOURCES 0xA0
|
||||
#define PACKET3_MAP_QUEUES 0xA2
|
||||
|
||||
#define VCE_CMD_NO_OP 0x00000000
|
||||
#define VCE_CMD_END 0x00000001
|
||||
|
|
|
@ -80,6 +80,18 @@ enum amd_clockgating_state {
|
|||
AMD_CG_STATE_UNGATE,
|
||||
};
|
||||
|
||||
enum amd_dpm_forced_level {
|
||||
AMD_DPM_FORCED_LEVEL_AUTO = 0x1,
|
||||
AMD_DPM_FORCED_LEVEL_MANUAL = 0x2,
|
||||
AMD_DPM_FORCED_LEVEL_LOW = 0x4,
|
||||
AMD_DPM_FORCED_LEVEL_HIGH = 0x8,
|
||||
AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x10,
|
||||
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x20,
|
||||
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x40,
|
||||
AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x80,
|
||||
AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x100,
|
||||
};
|
||||
|
||||
enum amd_powergating_state {
|
||||
AMD_PG_STATE_GATE = 0,
|
||||
AMD_PG_STATE_UNGATE,
|
||||
|
@ -206,6 +218,8 @@ struct amd_ip_funcs {
|
|||
/* enable/disable pg for the IP block */
|
||||
int (*set_powergating_state)(void *handle,
|
||||
enum amd_powergating_state state);
|
||||
/* get current clockgating status */
|
||||
void (*get_clockgating_state)(void *handle, u32 *flags);
|
||||
};
|
||||
|
||||
#endif /* __AMD_SHARED_H__ */
|
||||
|
|
|
@ -4552,6 +4552,14 @@
|
|||
#define mmDP4_DP_DPHY_PRBS_CNTL 0x4eb5
|
||||
#define mmDP5_DP_DPHY_PRBS_CNTL 0x4fb5
|
||||
#define mmDP6_DP_DPHY_PRBS_CNTL 0x54b5
|
||||
#define mmDP_DPHY_SCRAM_CNTL 0x4ab6
|
||||
#define mmDP0_DP_DPHY_SCRAM_CNTL 0x4ab6
|
||||
#define mmDP1_DP_DPHY_SCRAM_CNTL 0x4bb6
|
||||
#define mmDP2_DP_DPHY_SCRAM_CNTL 0x4cb6
|
||||
#define mmDP3_DP_DPHY_SCRAM_CNTL 0x4db6
|
||||
#define mmDP4_DP_DPHY_SCRAM_CNTL 0x4eb6
|
||||
#define mmDP5_DP_DPHY_SCRAM_CNTL 0x4fb6
|
||||
#define mmDP6_DP_DPHY_SCRAM_CNTL 0x54b6
|
||||
#define mmDP_DPHY_CRC_EN 0x4ab7
|
||||
#define mmDP0_DP_DPHY_CRC_EN 0x4ab7
|
||||
#define mmDP1_DP_DPHY_CRC_EN 0x4bb7
|
||||
|
|
|
@ -8690,6 +8690,10 @@
|
|||
#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
|
||||
#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7fffff00
|
||||
#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
|
||||
#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x10
|
||||
#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4
|
||||
#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x3ff00
|
||||
#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8
|
||||
#define DP_DPHY_CRC_EN__DPHY_CRC_EN_MASK 0x1
|
||||
#define DP_DPHY_CRC_EN__DPHY_CRC_EN__SHIFT 0x0
|
||||
#define DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN_MASK 0x10
|
||||
|
|
|
@ -4544,6 +4544,15 @@
|
|||
#define mmDP6_DP_DPHY_PRBS_CNTL 0x54b5
|
||||
#define mmDP7_DP_DPHY_PRBS_CNTL 0x56b5
|
||||
#define mmDP8_DP_DPHY_PRBS_CNTL 0x57b5
|
||||
#define mmDP_DPHY_SCRAM_CNTL 0x4ab6
|
||||
#define mmDP0_DP_DPHY_SCRAM_CNTL 0x4ab6
|
||||
#define mmDP1_DP_DPHY_SCRAM_CNTL 0x4bb6
|
||||
#define mmDP2_DP_DPHY_SCRAM_CNTL 0x4cb6
|
||||
#define mmDP3_DP_DPHY_SCRAM_CNTL 0x4db6
|
||||
#define mmDP4_DP_DPHY_SCRAM_CNTL 0x4eb6
|
||||
#define mmDP5_DP_DPHY_SCRAM_CNTL 0x4fb6
|
||||
#define mmDP6_DP_DPHY_SCRAM_CNTL 0x54b6
|
||||
#define mmDP8_DP_DPHY_SCRAM_CNTL 0x56b6
|
||||
#define mmDP_DPHY_BS_SR_SWAP_CNTL 0x4adc
|
||||
#define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x4adc
|
||||
#define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x4bdc
|
||||
|
|
|
@ -6004,6 +6004,8 @@
|
|||
#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_INT_MASK__SHIFT 0xc
|
||||
#define HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x1
|
||||
#define HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0
|
||||
#define HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN_MASK 0x2
|
||||
#define HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN__SHIFT 0x1
|
||||
#define HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x4
|
||||
#define HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2
|
||||
#define HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x8
|
||||
|
@ -8364,6 +8366,10 @@
|
|||
#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
|
||||
#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7fffff00
|
||||
#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
|
||||
#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x10
|
||||
#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4
|
||||
#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x3ff00
|
||||
#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8
|
||||
#define DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x3ff
|
||||
#define DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0
|
||||
#define DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x8000
|
||||
|
|
|
@ -5776,6 +5776,15 @@
|
|||
#define mmDP6_DP_DPHY_PRBS_CNTL 0x54b5
|
||||
#define mmDP7_DP_DPHY_PRBS_CNTL 0x56b5
|
||||
#define mmDP8_DP_DPHY_PRBS_CNTL 0x57b5
|
||||
#define mmDP_DPHY_SCRAM_CNTL 0x4ab6
|
||||
#define mmDP0_DP_DPHY_SCRAM_CNTL 0x4ab6
|
||||
#define mmDP1_DP_DPHY_SCRAM_CNTL 0x4bb6
|
||||
#define mmDP2_DP_DPHY_SCRAM_CNTL 0x4cb6
|
||||
#define mmDP3_DP_DPHY_SCRAM_CNTL 0x4db6
|
||||
#define mmDP4_DP_DPHY_SCRAM_CNTL 0x4eb6
|
||||
#define mmDP5_DP_DPHY_SCRAM_CNTL 0x4fb6
|
||||
#define mmDP6_DP_DPHY_SCRAM_CNTL 0x54b6
|
||||
#define mmDP8_DP_DPHY_SCRAM_CNTL 0x56b6
|
||||
#define mmDP_DPHY_BS_SR_SWAP_CNTL 0x4adc
|
||||
#define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x4adc
|
||||
#define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x4bdc
|
||||
|
|
|
@ -7088,6 +7088,8 @@
|
|||
#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_INT_MASK__SHIFT 0xc
|
||||
#define HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x1
|
||||
#define HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0
|
||||
#define HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN_MASK 0x2
|
||||
#define HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN__SHIFT 0x1
|
||||
#define HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x4
|
||||
#define HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2
|
||||
#define HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x8
|
||||
|
@ -9626,6 +9628,10 @@
|
|||
#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
|
||||
#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7fffff00
|
||||
#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
|
||||
#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x10
|
||||
#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4
|
||||
#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x3ff00
|
||||
#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8
|
||||
#define DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x3ff
|
||||
#define DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0
|
||||
#define DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x8000
|
||||
|
|
|
@ -3920,6 +3920,14 @@
|
|||
#define mmDP4_DP_DPHY_PRBS_CNTL 0x48d4
|
||||
#define mmDP5_DP_DPHY_PRBS_CNTL 0x4bd4
|
||||
#define mmDP6_DP_DPHY_PRBS_CNTL 0x4ed4
|
||||
#define mmDP_DPHY_SCRAM_CNTL 0x1cd5
|
||||
#define mmDP0_DP_DPHY_SCRAM_CNTL 0x1cd5
|
||||
#define mmDP1_DP_DPHY_SCRAM_CNTL 0x1fd5
|
||||
#define mmDP2_DP_DPHY_SCRAM_CNTL 0x42d5
|
||||
#define mmDP3_DP_DPHY_SCRAM_CNTL 0x45d5
|
||||
#define mmDP4_DP_DPHY_SCRAM_CNTL 0x48d5
|
||||
#define mmDP5_DP_DPHY_SCRAM_CNTL 0x4bd5
|
||||
#define mmDP6_DP_DPHY_SCRAM_CNTL 0x4ed5
|
||||
#define mmDP_DPHY_CRC_EN 0x1cd6
|
||||
#define mmDP0_DP_DPHY_CRC_EN 0x1cd6
|
||||
#define mmDP1_DP_DPHY_CRC_EN 0x1fd6
|
||||
|
|
|
@ -9214,6 +9214,10 @@
|
|||
#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
|
||||
#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7fffff00
|
||||
#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
|
||||
#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x10
|
||||
#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4
|
||||
#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x3ff00
|
||||
#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8
|
||||
#define DP_DPHY_CRC_EN__DPHY_CRC_EN_MASK 0x1
|
||||
#define DP_DPHY_CRC_EN__DPHY_CRC_EN__SHIFT 0x0
|
||||
#define DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN_MASK 0x10
|
||||
|
|
|
@ -1,105 +0,0 @@
|
|||
/*
|
||||
* Copyright 2010 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Alex Deucher
|
||||
*/
|
||||
#ifndef __SI_REG_H__
|
||||
#define __SI_REG_H__
|
||||
|
||||
/* SI */
|
||||
#define SI_DC_GPIO_HPD_MASK 0x196c
|
||||
#define SI_DC_GPIO_HPD_A 0x196d
|
||||
#define SI_DC_GPIO_HPD_EN 0x196e
|
||||
#define SI_DC_GPIO_HPD_Y 0x196f
|
||||
|
||||
#define SI_GRPH_CONTROL 0x1a01
|
||||
# define SI_GRPH_DEPTH(x) (((x) & 0x3) << 0)
|
||||
# define SI_GRPH_DEPTH_8BPP 0
|
||||
# define SI_GRPH_DEPTH_16BPP 1
|
||||
# define SI_GRPH_DEPTH_32BPP 2
|
||||
# define SI_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
|
||||
# define SI_ADDR_SURF_2_BANK 0
|
||||
# define SI_ADDR_SURF_4_BANK 1
|
||||
# define SI_ADDR_SURF_8_BANK 2
|
||||
# define SI_ADDR_SURF_16_BANK 3
|
||||
# define SI_GRPH_Z(x) (((x) & 0x3) << 4)
|
||||
# define SI_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
|
||||
# define SI_ADDR_SURF_BANK_WIDTH_1 0
|
||||
# define SI_ADDR_SURF_BANK_WIDTH_2 1
|
||||
# define SI_ADDR_SURF_BANK_WIDTH_4 2
|
||||
# define SI_ADDR_SURF_BANK_WIDTH_8 3
|
||||
# define SI_GRPH_FORMAT(x) (((x) & 0x7) << 8)
|
||||
/* 8 BPP */
|
||||
# define SI_GRPH_FORMAT_INDEXED 0
|
||||
/* 16 BPP */
|
||||
# define SI_GRPH_FORMAT_ARGB1555 0
|
||||
# define SI_GRPH_FORMAT_ARGB565 1
|
||||
# define SI_GRPH_FORMAT_ARGB4444 2
|
||||
# define SI_GRPH_FORMAT_AI88 3
|
||||
# define SI_GRPH_FORMAT_MONO16 4
|
||||
# define SI_GRPH_FORMAT_BGRA5551 5
|
||||
/* 32 BPP */
|
||||
# define SI_GRPH_FORMAT_ARGB8888 0
|
||||
# define SI_GRPH_FORMAT_ARGB2101010 1
|
||||
# define SI_GRPH_FORMAT_32BPP_DIG 2
|
||||
# define SI_GRPH_FORMAT_8B_ARGB2101010 3
|
||||
# define SI_GRPH_FORMAT_BGRA1010102 4
|
||||
# define SI_GRPH_FORMAT_8B_BGRA1010102 5
|
||||
# define SI_GRPH_FORMAT_RGB111110 6
|
||||
# define SI_GRPH_FORMAT_BGR101111 7
|
||||
# define SI_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
|
||||
# define SI_ADDR_SURF_BANK_HEIGHT_1 0
|
||||
# define SI_ADDR_SURF_BANK_HEIGHT_2 1
|
||||
# define SI_ADDR_SURF_BANK_HEIGHT_4 2
|
||||
# define SI_ADDR_SURF_BANK_HEIGHT_8 3
|
||||
# define SI_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
|
||||
# define SI_ADDR_SURF_TILE_SPLIT_64B 0
|
||||
# define SI_ADDR_SURF_TILE_SPLIT_128B 1
|
||||
# define SI_ADDR_SURF_TILE_SPLIT_256B 2
|
||||
# define SI_ADDR_SURF_TILE_SPLIT_512B 3
|
||||
# define SI_ADDR_SURF_TILE_SPLIT_1KB 4
|
||||
# define SI_ADDR_SURF_TILE_SPLIT_2KB 5
|
||||
# define SI_ADDR_SURF_TILE_SPLIT_4KB 6
|
||||
# define SI_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
|
||||
# define SI_ADDR_SURF_MACRO_TILE_ASPECT_1 0
|
||||
# define SI_ADDR_SURF_MACRO_TILE_ASPECT_2 1
|
||||
# define SI_ADDR_SURF_MACRO_TILE_ASPECT_4 2
|
||||
# define SI_ADDR_SURF_MACRO_TILE_ASPECT_8 3
|
||||
# define SI_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
|
||||
# define SI_GRPH_ARRAY_LINEAR_GENERAL 0
|
||||
# define SI_GRPH_ARRAY_LINEAR_ALIGNED 1
|
||||
# define SI_GRPH_ARRAY_1D_TILED_THIN1 2
|
||||
# define SI_GRPH_ARRAY_2D_TILED_THIN1 4
|
||||
# define SI_GRPH_PIPE_CONFIG(x) (((x) & 0x1f) << 24)
|
||||
# define SI_ADDR_SURF_P2 0
|
||||
# define SI_ADDR_SURF_P4_8x16 4
|
||||
# define SI_ADDR_SURF_P4_16x16 5
|
||||
# define SI_ADDR_SURF_P4_16x32 6
|
||||
# define SI_ADDR_SURF_P4_32x32 7
|
||||
# define SI_ADDR_SURF_P8_16x16_8x16 8
|
||||
# define SI_ADDR_SURF_P8_16x32_8x16 9
|
||||
# define SI_ADDR_SURF_P8_32x32_8x16 10
|
||||
# define SI_ADDR_SURF_P8_16x32_16x16 11
|
||||
# define SI_ADDR_SURF_P8_32x32_16x16 12
|
||||
# define SI_ADDR_SURF_P8_32x32_16x32 13
|
||||
# define SI_ADDR_SURF_P8_32x64_32x32 14
|
||||
|
||||
#endif
|
|
@ -622,6 +622,8 @@ typedef int (*cgs_query_system_info)(struct cgs_device *cgs_device,
|
|||
|
||||
typedef int (*cgs_is_virtualization_enabled_t)(void *cgs_device);
|
||||
|
||||
typedef int (*cgs_enter_safe_mode)(struct cgs_device *cgs_device, bool en);
|
||||
|
||||
struct cgs_ops {
|
||||
/* memory management calls (similar to KFD interface) */
|
||||
cgs_gpu_mem_info_t gpu_mem_info;
|
||||
|
@ -674,6 +676,7 @@ struct cgs_ops {
|
|||
/* get system info */
|
||||
cgs_query_system_info query_system_info;
|
||||
cgs_is_virtualization_enabled_t is_virtualization_enabled;
|
||||
cgs_enter_safe_mode enter_safe_mode;
|
||||
};
|
||||
|
||||
struct cgs_os_ops; /* To be define in OS-specific CGS header */
|
||||
|
@ -779,4 +782,8 @@ struct cgs_device
|
|||
|
||||
#define cgs_is_virtualization_enabled(cgs_device) \
|
||||
CGS_CALL(is_virtualization_enabled, cgs_device)
|
||||
|
||||
#define cgs_enter_safe_mode(cgs_device, en) \
|
||||
CGS_CALL(enter_safe_mode, cgs_device, en)
|
||||
|
||||
#endif /* _CGS_COMMON_H */
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -151,7 +151,7 @@ static int thermal_interrupt_callback(void *private_data,
|
|||
unsigned src_id, const uint32_t *iv_entry)
|
||||
{
|
||||
/* TO DO hanle PEM_Event_ThermalNotification (struct pp_eventmgr *)private_data*/
|
||||
printk("current thermal is out of range \n");
|
||||
pr_info("current thermal is out of range \n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -60,9 +60,8 @@ static void pem_fini(struct pp_eventmgr *eventmgr)
|
|||
pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data);
|
||||
}
|
||||
|
||||
int eventmgr_init(struct pp_instance *handle)
|
||||
int eventmgr_early_init(struct pp_instance *handle)
|
||||
{
|
||||
int result = 0;
|
||||
struct pp_eventmgr *eventmgr;
|
||||
|
||||
if (handle == NULL)
|
||||
|
@ -79,12 +78,6 @@ int eventmgr_init(struct pp_instance *handle)
|
|||
eventmgr->pp_eventmgr_init = pem_init;
|
||||
eventmgr->pp_eventmgr_fini = pem_fini;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
int eventmgr_fini(struct pp_eventmgr *eventmgr)
|
||||
{
|
||||
kfree(eventmgr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -38,10 +38,13 @@
|
|||
int pem_task_update_allowed_performance_levels(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
|
||||
{
|
||||
|
||||
if (eventmgr == NULL || eventmgr->hwmgr == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (pem_is_hw_access_blocked(eventmgr))
|
||||
return 0;
|
||||
|
||||
phm_force_dpm_levels(eventmgr->hwmgr, AMD_DPM_FORCED_LEVEL_AUTO);
|
||||
phm_force_dpm_levels(eventmgr->hwmgr, eventmgr->hwmgr->dpm_level);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -240,10 +240,16 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
|
|||
|
||||
static const struct phm_master_table_item cz_enable_clock_power_gatings_list[] = {
|
||||
/*we don't need an exit table here, because there is only D3 cold on Kv*/
|
||||
{ phm_cf_want_uvd_power_gating, cz_tf_uvd_power_gating_initialize },
|
||||
{ phm_cf_want_vce_power_gating, cz_tf_vce_power_gating_initialize },
|
||||
{
|
||||
.isFunctionNeededInRuntimeTable = phm_cf_want_uvd_power_gating,
|
||||
.tableFunction = cz_tf_uvd_power_gating_initialize
|
||||
},
|
||||
{
|
||||
.isFunctionNeededInRuntimeTable = phm_cf_want_vce_power_gating,
|
||||
.tableFunction = cz_tf_vce_power_gating_initialize
|
||||
},
|
||||
/* to do { NULL, cz_tf_xdma_power_gating_enable }, */
|
||||
{ NULL, NULL }
|
||||
{ }
|
||||
};
|
||||
|
||||
const struct phm_master_table_header cz_phm_enable_clock_power_gatings_master = {
|
||||
|
|
|
@ -20,13 +20,13 @@
|
|||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include "pp_debug.h"
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include "atom-types.h"
|
||||
#include "atombios.h"
|
||||
#include "processpptables.h"
|
||||
#include "pp_debug.h"
|
||||
#include "cgs_common.h"
|
||||
#include "smu/smu_8_0_d.h"
|
||||
#include "smu8_fusion.h"
|
||||
|
@ -38,7 +38,6 @@
|
|||
#include "cz_hwmgr.h"
|
||||
#include "power_state.h"
|
||||
#include "cz_clockpowergating.h"
|
||||
#include "pp_debug.h"
|
||||
|
||||
#define ixSMUSVI_NB_CURRENTVID 0xD8230044
|
||||
#define CURRENT_NB_VID_MASK 0xff000000
|
||||
|
@ -288,7 +287,7 @@ static int cz_init_dynamic_state_adjustment_rule_settings(
|
|||
kzalloc(table_size, GFP_KERNEL);
|
||||
|
||||
if (NULL == table_clk_vlt) {
|
||||
printk(KERN_ERR "[ powerplay ] Can not allocate memory!\n");
|
||||
pr_err("Can not allocate memory!\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -329,12 +328,12 @@ static int cz_get_system_info_data(struct pp_hwmgr *hwmgr)
|
|||
&size, &frev, &crev);
|
||||
|
||||
if (crev != 9) {
|
||||
printk(KERN_ERR "[ powerplay ] Unsupported IGP table: %d %d\n", frev, crev);
|
||||
pr_err("Unsupported IGP table: %d %d\n", frev, crev);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (info == NULL) {
|
||||
printk(KERN_ERR "[ powerplay ] Could not retrieve the Integrated System Info Table!\n");
|
||||
pr_err("Could not retrieve the Integrated System Info Table!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -361,7 +360,7 @@ static int cz_get_system_info_data(struct pp_hwmgr *hwmgr)
|
|||
|
||||
if (cz_hwmgr->sys_info.htc_tmp_lmt <=
|
||||
cz_hwmgr->sys_info.htc_hyst_lmt) {
|
||||
printk(KERN_ERR "[ powerplay ] The htcTmpLmt should be larger than htcHystLmt.\n");
|
||||
pr_err("The htcTmpLmt should be larger than htcHystLmt.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -723,7 +722,7 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr,
|
|||
|
||||
clock = hwmgr->display_config.min_core_set_clock;
|
||||
if (clock == 0)
|
||||
printk(KERN_INFO "[ powerplay ] min_core_set_clock not set\n");
|
||||
pr_info("min_core_set_clock not set\n");
|
||||
|
||||
if (cz_hwmgr->sclk_dpm.hard_min_clk != clock) {
|
||||
cz_hwmgr->sclk_dpm.hard_min_clk = clock;
|
||||
|
@ -888,13 +887,13 @@ static int cz_tf_update_low_mem_pstate(struct pp_hwmgr *hwmgr,
|
|||
}
|
||||
|
||||
static const struct phm_master_table_item cz_set_power_state_list[] = {
|
||||
{NULL, cz_tf_update_sclk_limit},
|
||||
{NULL, cz_tf_set_deep_sleep_sclk_threshold},
|
||||
{NULL, cz_tf_set_watermark_threshold},
|
||||
{NULL, cz_tf_set_enabled_levels},
|
||||
{NULL, cz_tf_enable_nb_dpm},
|
||||
{NULL, cz_tf_update_low_mem_pstate},
|
||||
{NULL, NULL}
|
||||
{ .tableFunction = cz_tf_update_sclk_limit },
|
||||
{ .tableFunction = cz_tf_set_deep_sleep_sclk_threshold },
|
||||
{ .tableFunction = cz_tf_set_watermark_threshold },
|
||||
{ .tableFunction = cz_tf_set_enabled_levels },
|
||||
{ .tableFunction = cz_tf_enable_nb_dpm },
|
||||
{ .tableFunction = cz_tf_update_low_mem_pstate },
|
||||
{ }
|
||||
};
|
||||
|
||||
static const struct phm_master_table_header cz_set_power_state_master = {
|
||||
|
@ -904,15 +903,15 @@ static const struct phm_master_table_header cz_set_power_state_master = {
|
|||
};
|
||||
|
||||
static const struct phm_master_table_item cz_setup_asic_list[] = {
|
||||
{NULL, cz_tf_reset_active_process_mask},
|
||||
{NULL, cz_tf_upload_pptable_to_smu},
|
||||
{NULL, cz_tf_init_sclk_limit},
|
||||
{NULL, cz_tf_init_uvd_limit},
|
||||
{NULL, cz_tf_init_vce_limit},
|
||||
{NULL, cz_tf_init_acp_limit},
|
||||
{NULL, cz_tf_init_power_gate_state},
|
||||
{NULL, cz_tf_init_sclk_threshold},
|
||||
{NULL, NULL}
|
||||
{ .tableFunction = cz_tf_reset_active_process_mask },
|
||||
{ .tableFunction = cz_tf_upload_pptable_to_smu },
|
||||
{ .tableFunction = cz_tf_init_sclk_limit },
|
||||
{ .tableFunction = cz_tf_init_uvd_limit },
|
||||
{ .tableFunction = cz_tf_init_vce_limit },
|
||||
{ .tableFunction = cz_tf_init_acp_limit },
|
||||
{ .tableFunction = cz_tf_init_power_gate_state },
|
||||
{ .tableFunction = cz_tf_init_sclk_threshold },
|
||||
{ }
|
||||
};
|
||||
|
||||
static const struct phm_master_table_header cz_setup_asic_master = {
|
||||
|
@ -957,10 +956,10 @@ static int cz_tf_reset_cc6_data(struct pp_hwmgr *hwmgr,
|
|||
}
|
||||
|
||||
static const struct phm_master_table_item cz_power_down_asic_list[] = {
|
||||
{NULL, cz_tf_power_up_display_clock_sys_pll},
|
||||
{NULL, cz_tf_clear_nb_dpm_flag},
|
||||
{NULL, cz_tf_reset_cc6_data},
|
||||
{NULL, NULL}
|
||||
{ .tableFunction = cz_tf_power_up_display_clock_sys_pll },
|
||||
{ .tableFunction = cz_tf_clear_nb_dpm_flag },
|
||||
{ .tableFunction = cz_tf_reset_cc6_data },
|
||||
{ }
|
||||
};
|
||||
|
||||
static const struct phm_master_table_header cz_power_down_asic_master = {
|
||||
|
@ -1068,8 +1067,8 @@ static int cz_tf_check_for_dpm_enabled(struct pp_hwmgr *hwmgr,
|
|||
}
|
||||
|
||||
static const struct phm_master_table_item cz_disable_dpm_list[] = {
|
||||
{ NULL, cz_tf_check_for_dpm_enabled},
|
||||
{NULL, NULL},
|
||||
{ .tableFunction = cz_tf_check_for_dpm_enabled },
|
||||
{ },
|
||||
};
|
||||
|
||||
|
||||
|
@ -1080,13 +1079,13 @@ static const struct phm_master_table_header cz_disable_dpm_master = {
|
|||
};
|
||||
|
||||
static const struct phm_master_table_item cz_enable_dpm_list[] = {
|
||||
{ NULL, cz_tf_check_for_dpm_disabled },
|
||||
{ NULL, cz_tf_program_voting_clients },
|
||||
{ NULL, cz_tf_start_dpm},
|
||||
{ NULL, cz_tf_program_bootup_state},
|
||||
{ NULL, cz_tf_enable_didt },
|
||||
{ NULL, cz_tf_reset_acp_boot_level },
|
||||
{NULL, NULL},
|
||||
{ .tableFunction = cz_tf_check_for_dpm_disabled },
|
||||
{ .tableFunction = cz_tf_program_voting_clients },
|
||||
{ .tableFunction = cz_tf_start_dpm },
|
||||
{ .tableFunction = cz_tf_program_bootup_state },
|
||||
{ .tableFunction = cz_tf_enable_didt },
|
||||
{ .tableFunction = cz_tf_reset_acp_boot_level },
|
||||
{ },
|
||||
};
|
||||
|
||||
static const struct phm_master_table_header cz_enable_dpm_master = {
|
||||
|
@ -1162,13 +1161,13 @@ static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
|
|||
|
||||
result = cz_initialize_dpm_defaults(hwmgr);
|
||||
if (result != 0) {
|
||||
printk(KERN_ERR "[ powerplay ] cz_initialize_dpm_defaults failed\n");
|
||||
pr_err("cz_initialize_dpm_defaults failed\n");
|
||||
return result;
|
||||
}
|
||||
|
||||
result = cz_get_system_info_data(hwmgr);
|
||||
if (result != 0) {
|
||||
printk(KERN_ERR "[ powerplay ] cz_get_system_info_data failed\n");
|
||||
pr_err("cz_get_system_info_data failed\n");
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -1177,40 +1176,40 @@ static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
|
|||
result = phm_construct_table(hwmgr, &cz_setup_asic_master,
|
||||
&(hwmgr->setup_asic));
|
||||
if (result != 0) {
|
||||
printk(KERN_ERR "[ powerplay ] Fail to construct setup ASIC\n");
|
||||
pr_err("Fail to construct setup ASIC\n");
|
||||
return result;
|
||||
}
|
||||
|
||||
result = phm_construct_table(hwmgr, &cz_power_down_asic_master,
|
||||
&(hwmgr->power_down_asic));
|
||||
if (result != 0) {
|
||||
printk(KERN_ERR "[ powerplay ] Fail to construct power down ASIC\n");
|
||||
pr_err("Fail to construct power down ASIC\n");
|
||||
return result;
|
||||
}
|
||||
|
||||
result = phm_construct_table(hwmgr, &cz_disable_dpm_master,
|
||||
&(hwmgr->disable_dynamic_state_management));
|
||||
if (result != 0) {
|
||||
printk(KERN_ERR "[ powerplay ] Fail to disable_dynamic_state\n");
|
||||
pr_err("Fail to disable_dynamic_state\n");
|
||||
return result;
|
||||
}
|
||||
result = phm_construct_table(hwmgr, &cz_enable_dpm_master,
|
||||
&(hwmgr->enable_dynamic_state_management));
|
||||
if (result != 0) {
|
||||
printk(KERN_ERR "[ powerplay ] Fail to enable_dynamic_state\n");
|
||||
pr_err("Fail to enable_dynamic_state\n");
|
||||
return result;
|
||||
}
|
||||
result = phm_construct_table(hwmgr, &cz_set_power_state_master,
|
||||
&(hwmgr->set_power_state));
|
||||
if (result != 0) {
|
||||
printk(KERN_ERR "[ powerplay ] Fail to construct set_power_state\n");
|
||||
pr_err("Fail to construct set_power_state\n");
|
||||
return result;
|
||||
}
|
||||
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = CZ_MAX_HARDWARE_POWERLEVELS;
|
||||
|
||||
result = phm_construct_table(hwmgr, &cz_phm_enable_clock_power_gatings_master, &(hwmgr->enable_clock_power_gatings));
|
||||
if (result != 0) {
|
||||
printk(KERN_ERR "[ powerplay ] Fail to construct enable_clock_power_gatings\n");
|
||||
pr_err("Fail to construct enable_clock_power_gatings\n");
|
||||
return result;
|
||||
}
|
||||
return result;
|
||||
|
@ -1218,9 +1217,15 @@ static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
|
|||
|
||||
static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
if (hwmgr != NULL && hwmgr->backend != NULL) {
|
||||
if (hwmgr != NULL) {
|
||||
phm_destroy_table(hwmgr, &(hwmgr->enable_clock_power_gatings));
|
||||
phm_destroy_table(hwmgr, &(hwmgr->set_power_state));
|
||||
phm_destroy_table(hwmgr, &(hwmgr->enable_dynamic_state_management));
|
||||
phm_destroy_table(hwmgr, &(hwmgr->disable_dynamic_state_management));
|
||||
phm_destroy_table(hwmgr, &(hwmgr->power_down_asic));
|
||||
phm_destroy_table(hwmgr, &(hwmgr->setup_asic));
|
||||
kfree(hwmgr->backend);
|
||||
kfree(hwmgr);
|
||||
hwmgr->backend = NULL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1939,7 +1944,7 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = {
|
|||
.read_sensor = cz_read_sensor,
|
||||
};
|
||||
|
||||
int cz_hwmgr_init(struct pp_hwmgr *hwmgr)
|
||||
int cz_init_function_pointers(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
hwmgr->hwmgr_func = &cz_hwmgr_funcs;
|
||||
hwmgr->pptable_func = &pptable_funcs;
|
||||
|
|
|
@ -316,7 +316,6 @@ struct cz_hwmgr {
|
|||
|
||||
struct pp_hwmgr;
|
||||
|
||||
int cz_hwmgr_init(struct pp_hwmgr *hwmgr);
|
||||
int cz_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr);
|
||||
int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr);
|
||||
int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr);
|
||||
|
|
|
@ -35,7 +35,7 @@ static int phm_run_table(struct pp_hwmgr *hwmgr,
|
|||
phm_table_function *function;
|
||||
|
||||
if (rt_table->function_list == NULL) {
|
||||
pr_debug("[ powerplay ] this function not implement!\n");
|
||||
pr_debug("this function not implement!\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -63,14 +63,14 @@ int phm_dispatch_table(struct pp_hwmgr *hwmgr,
|
|||
void *temp_storage;
|
||||
|
||||
if (hwmgr == NULL || rt_table == NULL) {
|
||||
printk(KERN_ERR "[ powerplay ] Invalid Parameter!\n");
|
||||
pr_err("Invalid Parameter!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (0 != rt_table->storage_size) {
|
||||
temp_storage = kzalloc(rt_table->storage_size, GFP_KERNEL);
|
||||
if (temp_storage == NULL) {
|
||||
printk(KERN_ERR "[ powerplay ] Could not allocate table temporary storage\n");
|
||||
pr_err("Could not allocate table temporary storage\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
} else {
|
||||
|
@ -95,7 +95,7 @@ int phm_construct_table(struct pp_hwmgr *hwmgr,
|
|||
phm_table_function *rtf;
|
||||
|
||||
if (hwmgr == NULL || master_table == NULL || rt_table == NULL) {
|
||||
printk(KERN_ERR "[ powerplay ] Invalid Parameter!\n");
|
||||
pr_err("Invalid Parameter!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -116,7 +116,7 @@ int phm_construct_table(struct pp_hwmgr *hwmgr,
|
|||
for (table_item = master_table->master_list;
|
||||
NULL != table_item->tableFunction; table_item++) {
|
||||
if ((rtf - run_time_list) > function_count) {
|
||||
printk(KERN_ERR "[ powerplay ] Check function results have changed\n");
|
||||
pr_err("Check function results have changed\n");
|
||||
kfree(run_time_list);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -128,7 +128,7 @@ int phm_construct_table(struct pp_hwmgr *hwmgr,
|
|||
}
|
||||
|
||||
if ((rtf - run_time_list) > function_count) {
|
||||
printk(KERN_ERR "[ powerplay ] Check function results have changed\n");
|
||||
pr_err("Check function results have changed\n");
|
||||
kfree(run_time_list);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -144,7 +144,7 @@ int phm_destroy_table(struct pp_hwmgr *hwmgr,
|
|||
struct phm_runtime_table_header *rt_table)
|
||||
{
|
||||
if (hwmgr == NULL || rt_table == NULL) {
|
||||
printk(KERN_ERR "[ powerplay ] Invalid Parameter\n");
|
||||
pr_err("Invalid Parameter\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -20,11 +20,11 @@
|
|||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include "pp_debug.h"
|
||||
#include <linux/errno.h>
|
||||
#include "hwmgr.h"
|
||||
#include "hardwaremanager.h"
|
||||
#include "power_state.h"
|
||||
#include "pp_debug.h"
|
||||
|
||||
#define PHM_FUNC_CHECK(hw) \
|
||||
do { \
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "pp_debug.h"
|
||||
#include "linux/delay.h"
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
|
@ -29,13 +31,12 @@
|
|||
#include "power_state.h"
|
||||
#include "hwmgr.h"
|
||||
#include "pppcielanes.h"
|
||||
#include "pp_debug.h"
|
||||
#include "ppatomctrl.h"
|
||||
#include "ppsmc.h"
|
||||
#include "pp_acpi.h"
|
||||
#include "amd_acpi.h"
|
||||
|
||||
extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr);
|
||||
extern int cz_init_function_pointers(struct pp_hwmgr *hwmgr);
|
||||
|
||||
static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr);
|
||||
static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr);
|
||||
|
@ -49,11 +50,11 @@ uint8_t convert_to_vid(uint16_t vddc)
|
|||
return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
|
||||
}
|
||||
|
||||
int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
|
||||
int hwmgr_early_init(struct pp_instance *handle)
|
||||
{
|
||||
struct pp_hwmgr *hwmgr;
|
||||
|
||||
if ((handle == NULL) || (pp_init == NULL))
|
||||
if (handle == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
|
||||
|
@ -62,19 +63,20 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
|
|||
|
||||
handle->hwmgr = hwmgr;
|
||||
hwmgr->smumgr = handle->smu_mgr;
|
||||
hwmgr->device = pp_init->device;
|
||||
hwmgr->chip_family = pp_init->chip_family;
|
||||
hwmgr->chip_id = pp_init->chip_id;
|
||||
hwmgr->device = handle->device;
|
||||
hwmgr->chip_family = handle->chip_family;
|
||||
hwmgr->chip_id = handle->chip_id;
|
||||
hwmgr->feature_mask = handle->feature_mask;
|
||||
hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
|
||||
hwmgr->power_source = PP_PowerSource_AC;
|
||||
hwmgr->pp_table_version = PP_TABLE_V1;
|
||||
|
||||
hwmgr->dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
|
||||
hwmgr_init_default_caps(hwmgr);
|
||||
hwmgr_set_user_specify_caps(hwmgr);
|
||||
|
||||
switch (hwmgr->chip_family) {
|
||||
case AMDGPU_FAMILY_CZ:
|
||||
cz_hwmgr_init(hwmgr);
|
||||
cz_init_function_pointers(hwmgr);
|
||||
break;
|
||||
case AMDGPU_FAMILY_VI:
|
||||
switch (hwmgr->chip_id) {
|
||||
|
@ -102,7 +104,7 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
|
|||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
smu7_hwmgr_init(hwmgr);
|
||||
smu7_init_function_pointers(hwmgr);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -111,28 +113,7 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int hwmgr_fini(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
if (hwmgr == NULL || hwmgr->ps == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
/* do hwmgr finish*/
|
||||
kfree(hwmgr->hardcode_pp_table);
|
||||
|
||||
kfree(hwmgr->backend);
|
||||
|
||||
kfree(hwmgr->start_thermal_controller.function_list);
|
||||
|
||||
kfree(hwmgr->set_temperature_range.function_list);
|
||||
|
||||
kfree(hwmgr->ps);
|
||||
kfree(hwmgr->current_ps);
|
||||
kfree(hwmgr->request_ps);
|
||||
kfree(hwmgr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int hw_init_power_state_table(struct pp_hwmgr *hwmgr)
|
||||
static int hw_init_power_state_table(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
int result;
|
||||
unsigned int i;
|
||||
|
@ -156,12 +137,20 @@ int hw_init_power_state_table(struct pp_hwmgr *hwmgr)
|
|||
return -ENOMEM;
|
||||
|
||||
hwmgr->request_ps = kzalloc(size, GFP_KERNEL);
|
||||
if (hwmgr->request_ps == NULL)
|
||||
if (hwmgr->request_ps == NULL) {
|
||||
kfree(hwmgr->ps);
|
||||
hwmgr->ps = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
hwmgr->current_ps = kzalloc(size, GFP_KERNEL);
|
||||
if (hwmgr->current_ps == NULL)
|
||||
if (hwmgr->current_ps == NULL) {
|
||||
kfree(hwmgr->request_ps);
|
||||
kfree(hwmgr->ps);
|
||||
hwmgr->request_ps = NULL;
|
||||
hwmgr->ps = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
state = hwmgr->ps;
|
||||
|
||||
|
@ -181,10 +170,77 @@ int hw_init_power_state_table(struct pp_hwmgr *hwmgr)
|
|||
state = (struct pp_power_state *)((unsigned long)state + size);
|
||||
}
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hw_fini_power_state_table(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
if (hwmgr == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
kfree(hwmgr->current_ps);
|
||||
kfree(hwmgr->request_ps);
|
||||
kfree(hwmgr->ps);
|
||||
hwmgr->request_ps = NULL;
|
||||
hwmgr->ps = NULL;
|
||||
hwmgr->current_ps = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int hwmgr_hw_init(struct pp_instance *handle)
|
||||
{
|
||||
struct pp_hwmgr *hwmgr;
|
||||
int ret = 0;
|
||||
|
||||
if (handle == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
hwmgr = handle->hwmgr;
|
||||
|
||||
if (hwmgr->pptable_func == NULL ||
|
||||
hwmgr->pptable_func->pptable_init == NULL ||
|
||||
hwmgr->hwmgr_func->backend_init == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
ret = hwmgr->pptable_func->pptable_init(hwmgr);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = hwmgr->hwmgr_func->backend_init(hwmgr);
|
||||
if (ret)
|
||||
goto err1;
|
||||
|
||||
ret = hw_init_power_state_table(hwmgr);
|
||||
if (ret)
|
||||
goto err2;
|
||||
return 0;
|
||||
err2:
|
||||
if (hwmgr->hwmgr_func->backend_fini)
|
||||
hwmgr->hwmgr_func->backend_fini(hwmgr);
|
||||
err1:
|
||||
if (hwmgr->pptable_func->pptable_fini)
|
||||
hwmgr->pptable_func->pptable_fini(hwmgr);
|
||||
err:
|
||||
pr_err("amdgpu: powerplay initialization failed\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
int hwmgr_hw_fini(struct pp_instance *handle)
|
||||
{
|
||||
struct pp_hwmgr *hwmgr;
|
||||
|
||||
if (handle == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
hwmgr = handle->hwmgr;
|
||||
|
||||
if (hwmgr->hwmgr_func->backend_fini)
|
||||
hwmgr->hwmgr_func->backend_fini(hwmgr);
|
||||
if (hwmgr->pptable_func->pptable_fini)
|
||||
hwmgr->pptable_func->pptable_fini(hwmgr);
|
||||
return hw_fini_power_state_table(hwmgr);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns once the part of the register indicated by the mask has
|
||||
|
@ -197,7 +253,7 @@ int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
|
|||
uint32_t cur_value;
|
||||
|
||||
if (hwmgr == NULL || hwmgr->device == NULL) {
|
||||
printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!");
|
||||
pr_err("Invalid Hardware Manager!");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -227,7 +283,7 @@ void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
|
|||
uint32_t mask)
|
||||
{
|
||||
if (hwmgr == NULL || hwmgr->device == NULL) {
|
||||
printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!");
|
||||
pr_err("Invalid Hardware Manager!");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -288,7 +344,7 @@ int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table)
|
|||
|
||||
memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table));
|
||||
kfree(table);
|
||||
|
||||
table = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -549,7 +605,7 @@ int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr
|
|||
table_clk_vlt = kzalloc(table_size, GFP_KERNEL);
|
||||
|
||||
if (NULL == table_clk_vlt) {
|
||||
printk(KERN_ERR "[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n");
|
||||
pr_err("Can not allocate space for vddc_dep_on_dal_pwrl! \n");
|
||||
return -ENOMEM;
|
||||
} else {
|
||||
table_clk_vlt->count = 4;
|
||||
|
@ -569,21 +625,6 @@ int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr
|
|||
return 0;
|
||||
}
|
||||
|
||||
int phm_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
if (NULL != hwmgr->dyn_state.vddc_dep_on_dal_pwrl) {
|
||||
kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
|
||||
hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
|
||||
}
|
||||
|
||||
if (NULL != hwmgr->backend) {
|
||||
kfree(hwmgr->backend);
|
||||
hwmgr->backend = NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask)
|
||||
{
|
||||
uint32_t level = 0;
|
||||
|
@ -625,7 +666,7 @@ void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
|
|||
return;
|
||||
}
|
||||
}
|
||||
printk(KERN_ERR "DAL requested level can not"
|
||||
pr_err("DAL requested level can not"
|
||||
" found a available voltage in VDDC DPM Table \n");
|
||||
}
|
||||
|
||||
|
@ -683,14 +724,14 @@ void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
|
|||
|
||||
int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
if (amdgpu_pp_feature_mask & PP_SCLK_DEEP_SLEEP_MASK)
|
||||
if (hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK)
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_SclkDeepSleep);
|
||||
else
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_SclkDeepSleep);
|
||||
|
||||
if (amdgpu_pp_feature_mask & PP_POWER_CONTAINMENT_MASK) {
|
||||
if (hwmgr->feature_mask & PP_POWER_CONTAINMENT_MASK) {
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_PowerContainment);
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
|
@ -701,7 +742,6 @@ int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr)
|
|||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_CAC);
|
||||
}
|
||||
hwmgr->feature_mask = amdgpu_pp_feature_mask;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -727,16 +767,9 @@ int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
|
|||
|
||||
int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
/* power tune caps Assume disabled */
|
||||
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_SQRamping);
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_DBRamping);
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_TDRamping);
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_TCPRamping);
|
||||
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_RegulatorHot);
|
||||
|
||||
|
@ -746,9 +779,19 @@ int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
|
|||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_TablelessHardwareInterface);
|
||||
|
||||
if ((hwmgr->chip_id == CHIP_POLARIS11) || (hwmgr->chip_id == CHIP_POLARIS12))
|
||||
|
||||
if (hwmgr->chip_id != CHIP_POLARIS10)
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_SPLLShutdownSupport);
|
||||
|
||||
if (hwmgr->chip_id != CHIP_POLARIS11) {
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_DBRamping);
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_TDRamping);
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_TCPRamping);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -20,13 +20,13 @@
|
|||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include "pp_debug.h"
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "ppatomctrl.h"
|
||||
#include "atombios.h"
|
||||
#include "cgs_common.h"
|
||||
#include "pp_debug.h"
|
||||
#include "ppevvmath.h"
|
||||
|
||||
#define MEM_ID_MASK 0xff000000
|
||||
|
@ -145,10 +145,10 @@ int atomctrl_initialize_mc_reg_table(
|
|||
GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev);
|
||||
|
||||
if (module_index >= vram_info->ucNumOfVRAMModule) {
|
||||
printk(KERN_ERR "[ powerplay ] Invalid VramInfo table.");
|
||||
pr_err("Invalid VramInfo table.");
|
||||
result = -1;
|
||||
} else if (vram_info->sHeader.ucTableFormatRevision < 2) {
|
||||
printk(KERN_ERR "[ powerplay ] Invalid VramInfo table.");
|
||||
pr_err("Invalid VramInfo table.");
|
||||
result = -1;
|
||||
}
|
||||
|
||||
|
@ -688,7 +688,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
|
|||
fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM7), 1000);
|
||||
break;
|
||||
default:
|
||||
printk(KERN_ERR "DPM Level not supported\n");
|
||||
pr_err("DPM Level not supported\n");
|
||||
fPowerDPMx = Convert_ULONG_ToFraction(1);
|
||||
fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM0), 1000);
|
||||
}
|
||||
|
|
|
@ -20,13 +20,13 @@
|
|||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include "pp_debug.h"
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "process_pptables_v1_0.h"
|
||||
#include "ppatomctrl.h"
|
||||
#include "atombios.h"
|
||||
#include "pp_debug.h"
|
||||
#include "hwmgr.h"
|
||||
#include "cgs_common.h"
|
||||
#include "pptable_v1_0.h"
|
||||
|
@ -535,7 +535,7 @@ static int get_pcie_table(
|
|||
if ((uint32_t)atom_pcie_table->ucNumEntries <= pcie_count)
|
||||
pcie_count = (uint32_t)atom_pcie_table->ucNumEntries;
|
||||
else
|
||||
printk(KERN_ERR "[ powerplay ] Number of Pcie Entries exceed the number of SCLK Dpm Levels! \
|
||||
pr_err("Number of Pcie Entries exceed the number of SCLK Dpm Levels! \
|
||||
Disregarding the excess entries... \n");
|
||||
|
||||
pcie_table->count = pcie_count;
|
||||
|
@ -577,7 +577,7 @@ static int get_pcie_table(
|
|||
if ((uint32_t)atom_pcie_table->ucNumEntries <= pcie_count)
|
||||
pcie_count = (uint32_t)atom_pcie_table->ucNumEntries;
|
||||
else
|
||||
printk(KERN_ERR "[ powerplay ] Number of Pcie Entries exceed the number of SCLK Dpm Levels! \
|
||||
pr_err("Number of Pcie Entries exceed the number of SCLK Dpm Levels! \
|
||||
Disregarding the excess entries... \n");
|
||||
|
||||
pcie_table->count = pcie_count;
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include "pp_debug.h"
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
|
@ -27,7 +28,6 @@
|
|||
#include "processpptables.h"
|
||||
#include <atom-types.h>
|
||||
#include <atombios.h>
|
||||
#include "pp_debug.h"
|
||||
#include "pptable.h"
|
||||
#include "power_state.h"
|
||||
#include "hwmgr.h"
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
*/
|
||||
|
||||
#ifndef _SMU7_CLOCK_POWER_GATING_H_
|
||||
#define _SMU7_CLOCK__POWER_GATING_H_
|
||||
#define _SMU7_CLOCK_POWER_GATING_H_
|
||||
|
||||
#include "smu7_hwmgr.h"
|
||||
#include "pp_asicblocks.h"
|
||||
|
|
|
@ -20,13 +20,13 @@
|
|||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include "pp_debug.h"
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/fb.h>
|
||||
#include <asm/div64.h>
|
||||
#include "linux/delay.h"
|
||||
#include "pp_acpi.h"
|
||||
#include "pp_debug.h"
|
||||
#include "ppatomctrl.h"
|
||||
#include "atombios.h"
|
||||
#include "pptable_v1_0.h"
|
||||
|
@ -40,6 +40,8 @@
|
|||
|
||||
#include "hwmgr.h"
|
||||
#include "smu7_hwmgr.h"
|
||||
#include "smu7_smumgr.h"
|
||||
#include "smu_ucode_xfer_vi.h"
|
||||
#include "smu7_powertune.h"
|
||||
#include "smu7_dyn_defaults.h"
|
||||
#include "smu7_thermal.h"
|
||||
|
@ -88,6 +90,8 @@ enum DPM_EVENT_SRC {
|
|||
};
|
||||
|
||||
static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
|
||||
static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
|
||||
enum pp_clock_type type, uint32_t mask);
|
||||
|
||||
static struct smu7_power_state *cast_phw_smu7_power_state(
|
||||
struct pp_hw_power_state *hw_ps)
|
||||
|
@ -994,7 +998,7 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
|
|||
SWRST_COMMAND_1, RESETLC, 0x0);
|
||||
|
||||
if (smu7_enable_sclk_mclk_dpm(hwmgr)) {
|
||||
printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!");
|
||||
pr_err("Failed to enable Sclk DPM and Mclk DPM!");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -1079,7 +1083,7 @@ static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
|
|||
|
||||
switch (sources) {
|
||||
default:
|
||||
printk(KERN_ERR "Unknown throttling event sources.");
|
||||
pr_err("Unknown throttling event sources.");
|
||||
/* fall through */
|
||||
case 0:
|
||||
protection = false;
|
||||
|
@ -1292,6 +1296,10 @@ int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
|||
PP_ASSERT_WITH_CODE((tmp_result == 0),
|
||||
"Failed to disable SMC CAC!", result = tmp_result);
|
||||
|
||||
tmp_result = smu7_disable_didt_config(hwmgr);
|
||||
PP_ASSERT_WITH_CODE((tmp_result == 0),
|
||||
"Failed to disable DIDT!", result = tmp_result);
|
||||
|
||||
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
|
||||
CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
|
||||
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
|
||||
|
@ -1499,7 +1507,7 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
|
|||
data->vddcgfx_leakage.count++;
|
||||
}
|
||||
} else {
|
||||
printk("Error retrieving EVV voltage value!\n");
|
||||
pr_info("Error retrieving EVV voltage value!\n");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -1527,7 +1535,7 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
|
|||
if (vddc >= 2000 || vddc == 0)
|
||||
return -EINVAL;
|
||||
} else {
|
||||
printk(KERN_WARNING "failed to retrieving EVV voltage!\n");
|
||||
pr_warning("failed to retrieving EVV voltage!\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1567,7 +1575,7 @@ static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr,
|
|||
}
|
||||
|
||||
if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
|
||||
printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
|
||||
pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2032,7 +2040,7 @@ static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr,
|
|||
}
|
||||
|
||||
if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
|
||||
printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
|
||||
pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
|
||||
}
|
||||
|
||||
|
||||
|
@ -2267,6 +2275,21 @@ static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
if (NULL != hwmgr->dyn_state.vddc_dep_on_dal_pwrl) {
|
||||
kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
|
||||
hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
|
||||
}
|
||||
pp_smu7_thermal_fini(hwmgr);
|
||||
if (NULL != hwmgr->backend) {
|
||||
kfree(hwmgr->backend);
|
||||
hwmgr->backend = NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct smu7_hwmgr *data;
|
||||
|
@ -2277,6 +2300,7 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
|
|||
return -ENOMEM;
|
||||
|
||||
hwmgr->backend = data;
|
||||
pp_smu7_thermal_initialize(hwmgr);
|
||||
|
||||
smu7_patch_voltage_workaround(hwmgr);
|
||||
smu7_init_dpm_defaults(hwmgr);
|
||||
|
@ -2285,7 +2309,7 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
|
|||
result = smu7_get_evv_voltages(hwmgr);
|
||||
|
||||
if (result) {
|
||||
printk("Get EVV Voltage Failed. Abort Driver loading!\n");
|
||||
pr_info("Get EVV Voltage Failed. Abort Driver loading!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -2334,7 +2358,7 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
|
|||
smu7_thermal_parameter_init(hwmgr);
|
||||
} else {
|
||||
/* Ignore return value in here, we are cleaning up a mess. */
|
||||
phm_hwmgr_backend_fini(hwmgr);
|
||||
smu7_hwmgr_backend_fini(hwmgr);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -2466,36 +2490,155 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
|
|||
}
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
|
||||
uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *pcie_mask)
|
||||
{
|
||||
uint32_t percentage;
|
||||
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
|
||||
struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
|
||||
int32_t tmp_mclk;
|
||||
int32_t tmp_sclk;
|
||||
int32_t count;
|
||||
|
||||
if (golden_dpm_table->mclk_table.count < 1)
|
||||
return -EINVAL;
|
||||
|
||||
percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value /
|
||||
golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
|
||||
|
||||
if (golden_dpm_table->mclk_table.count == 1) {
|
||||
percentage = 70;
|
||||
tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
|
||||
*mclk_mask = golden_dpm_table->mclk_table.count - 1;
|
||||
} else {
|
||||
tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value;
|
||||
*mclk_mask = golden_dpm_table->mclk_table.count - 2;
|
||||
}
|
||||
|
||||
tmp_sclk = tmp_mclk * percentage / 100;
|
||||
|
||||
if (hwmgr->pp_table_version == PP_TABLE_V0) {
|
||||
for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
|
||||
count >= 0; count--) {
|
||||
if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) {
|
||||
tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk;
|
||||
*sclk_mask = count;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
|
||||
*sclk_mask = 0;
|
||||
|
||||
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
|
||||
*sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
|
||||
} else if (hwmgr->pp_table_version == PP_TABLE_V1) {
|
||||
struct phm_ppt_v1_information *table_info =
|
||||
(struct phm_ppt_v1_information *)(hwmgr->pptable);
|
||||
|
||||
for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) {
|
||||
if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) {
|
||||
tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk;
|
||||
*sclk_mask = count;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
|
||||
*sclk_mask = 0;
|
||||
|
||||
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
|
||||
*sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
|
||||
}
|
||||
|
||||
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK)
|
||||
*mclk_mask = 0;
|
||||
else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
|
||||
*mclk_mask = golden_dpm_table->mclk_table.count - 1;
|
||||
|
||||
*pcie_mask = data->dpm_table.pcie_speed_table.count - 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
|
||||
enum amd_dpm_forced_level level)
|
||||
{
|
||||
int ret = 0;
|
||||
uint32_t sclk_mask = 0;
|
||||
uint32_t mclk_mask = 0;
|
||||
uint32_t pcie_mask = 0;
|
||||
uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
|
||||
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
|
||||
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
|
||||
AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
|
||||
|
||||
if (level == hwmgr->dpm_level)
|
||||
return ret;
|
||||
|
||||
if (!(hwmgr->dpm_level & profile_mode_mask)) {
|
||||
/* enter profile mode, save current level, disable gfx cg*/
|
||||
if (level & profile_mode_mask) {
|
||||
hwmgr->saved_dpm_level = hwmgr->dpm_level;
|
||||
cgs_set_clockgating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_GFX,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
}
|
||||
} else {
|
||||
/* exit profile mode, restore level, enable gfx cg*/
|
||||
if (!(level & profile_mode_mask)) {
|
||||
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
|
||||
level = hwmgr->saved_dpm_level;
|
||||
cgs_set_clockgating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_GFX,
|
||||
AMD_CG_STATE_GATE);
|
||||
}
|
||||
}
|
||||
|
||||
switch (level) {
|
||||
case AMD_DPM_FORCED_LEVEL_HIGH:
|
||||
ret = smu7_force_dpm_highest(hwmgr);
|
||||
if (ret)
|
||||
return ret;
|
||||
hwmgr->dpm_level = level;
|
||||
break;
|
||||
case AMD_DPM_FORCED_LEVEL_LOW:
|
||||
ret = smu7_force_dpm_lowest(hwmgr);
|
||||
if (ret)
|
||||
return ret;
|
||||
hwmgr->dpm_level = level;
|
||||
break;
|
||||
case AMD_DPM_FORCED_LEVEL_AUTO:
|
||||
ret = smu7_unforce_dpm_levels(hwmgr);
|
||||
if (ret)
|
||||
return ret;
|
||||
hwmgr->dpm_level = level;
|
||||
break;
|
||||
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
|
||||
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
|
||||
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
|
||||
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
|
||||
ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
|
||||
if (ret)
|
||||
return ret;
|
||||
hwmgr->dpm_level = level;
|
||||
smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
|
||||
smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
|
||||
smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
|
||||
break;
|
||||
case AMD_DPM_FORCED_LEVEL_MANUAL:
|
||||
hwmgr->dpm_level = level;
|
||||
break;
|
||||
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
hwmgr->dpm_level = level;
|
||||
if (level & (AMD_DPM_FORCED_LEVEL_PROFILE_PEAK | AMD_DPM_FORCED_LEVEL_HIGH))
|
||||
smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
|
||||
else
|
||||
smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
|
||||
|
@ -2898,11 +3041,11 @@ static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr,
|
|||
if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
|
||||
if (dep_mclk_table->entries[0].clk !=
|
||||
data->vbios_boot_state.mclk_bootup_value)
|
||||
printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
|
||||
pr_err("Single MCLK entry VDDCI/MCLK dependency table "
|
||||
"does not match VBIOS boot MCLK level");
|
||||
if (dep_mclk_table->entries[0].vddci !=
|
||||
data->vbios_boot_state.vddci_bootup_value)
|
||||
printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
|
||||
pr_err("Single VDDCI entry VDDCI/MCLK dependency table "
|
||||
"does not match VBIOS boot VDDCI level");
|
||||
}
|
||||
|
||||
|
@ -3046,11 +3189,11 @@ static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr,
|
|||
if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
|
||||
if (dep_mclk_table->entries[0].clk !=
|
||||
data->vbios_boot_state.mclk_bootup_value)
|
||||
printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
|
||||
pr_err("Single MCLK entry VDDCI/MCLK dependency table "
|
||||
"does not match VBIOS boot MCLK level");
|
||||
if (dep_mclk_table->entries[0].v !=
|
||||
data->vbios_boot_state.vddci_bootup_value)
|
||||
printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
|
||||
pr_err("Single VDDCI entry VDDCI/MCLK dependency table "
|
||||
"does not match VBIOS boot VDDCI level");
|
||||
}
|
||||
|
||||
|
@ -3590,9 +3733,9 @@ static int smu7_notify_link_speed_change_after_state_change(
|
|||
|
||||
if (acpi_pcie_perf_request(hwmgr->device, request, false)) {
|
||||
if (PP_PCIEGen2 == target_link_speed)
|
||||
printk("PSPP request to switch to Gen2 from Gen3 Failed!");
|
||||
pr_info("PSPP request to switch to Gen2 from Gen3 Failed!");
|
||||
else
|
||||
printk("PSPP request to switch to Gen1 from Gen2 Failed!");
|
||||
pr_info("PSPP request to switch to Gen1 from Gen2 Failed!");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4029,7 +4172,9 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
|
|||
{
|
||||
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
|
||||
|
||||
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
|
||||
if (hwmgr->dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
|
||||
AMD_DPM_FORCED_LEVEL_LOW |
|
||||
AMD_DPM_FORCED_LEVEL_HIGH))
|
||||
return -EINVAL;
|
||||
|
||||
switch (type) {
|
||||
|
@ -4324,9 +4469,35 @@ static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int smu7_request_firmware(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
int ret;
|
||||
struct cgs_firmware_info info = {0};
|
||||
|
||||
ret = cgs_get_firmware_info(hwmgr->device,
|
||||
smu7_convert_fw_type_to_cgs(UCODE_ID_SMU),
|
||||
&info);
|
||||
if (ret || !info.kptr)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int smu7_release_firmware(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = cgs_rel_firmware(hwmgr->device,
|
||||
smu7_convert_fw_type_to_cgs(UCODE_ID_SMU));
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
|
||||
.backend_init = &smu7_hwmgr_backend_init,
|
||||
.backend_fini = &phm_hwmgr_backend_fini,
|
||||
.backend_fini = &smu7_hwmgr_backend_fini,
|
||||
.asic_setup = &smu7_setup_asic_task,
|
||||
.dynamic_state_management_enable = &smu7_enable_dpm_tasks,
|
||||
.apply_state_adjust_rules = smu7_apply_state_adjust_rules,
|
||||
|
@ -4371,6 +4542,8 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
|
|||
.get_clock_by_type = smu7_get_clock_by_type,
|
||||
.read_sensor = smu7_read_sensor,
|
||||
.dynamic_state_management_disable = smu7_disable_dpm_tasks,
|
||||
.request_firmware = smu7_request_firmware,
|
||||
.release_firmware = smu7_release_firmware,
|
||||
};
|
||||
|
||||
uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
|
||||
|
@ -4390,7 +4563,7 @@ uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
|
|||
return i;
|
||||
}
|
||||
|
||||
int smu7_hwmgr_init(struct pp_hwmgr *hwmgr)
|
||||
int smu7_init_function_pointers(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
|
@ -4400,7 +4573,6 @@ int smu7_hwmgr_init(struct pp_hwmgr *hwmgr)
|
|||
else if (hwmgr->pp_table_version == PP_TABLE_V1)
|
||||
hwmgr->pptable_func = &pptable_v1_0_funcs;
|
||||
|
||||
pp_smu7_thermal_initialize(hwmgr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -20,17 +20,19 @@
|
|||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include "pp_debug.h"
|
||||
#include "hwmgr.h"
|
||||
#include "smumgr.h"
|
||||
#include "smu7_hwmgr.h"
|
||||
#include "smu7_powertune.h"
|
||||
#include "pp_debug.h"
|
||||
#include "smu7_common.h"
|
||||
|
||||
#define VOLTAGE_SCALE 4
|
||||
|
||||
static uint32_t DIDTBlock_Info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK;
|
||||
|
||||
static uint32_t Polaris11_DIDTBlock_Info = SQ_PCC_MASK | TCP_IR_MASK | TD_PCC_MASK;
|
||||
|
||||
static const struct gpu_pt_config_reg GCCACConfig_Polaris10[] = {
|
||||
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
* Offset Mask Shift Value Type
|
||||
|
@ -261,9 +263,9 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris11[] = {
|
|||
{ ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
|
@ -271,12 +273,12 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris11[] = {
|
|||
{ ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||
|
@ -284,8 +286,8 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris11[] = {
|
|||
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND },
|
||||
|
@ -365,6 +367,105 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris11[] = {
|
|||
{ ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ 0xFFFFFFFF }
|
||||
};
|
||||
|
||||
static const struct gpu_pt_config_reg DIDTConfig_Polaris12[] = {
|
||||
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
* Offset Mask Shift Value Type
|
||||
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
*/
|
||||
{ ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
|
@ -379,49 +480,55 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris11[] = {
|
|||
|
||||
static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
|
||||
{
|
||||
|
||||
uint32_t en = enable ? 1 : 0;
|
||||
uint32_t block_en = 0;
|
||||
int32_t result = 0;
|
||||
uint32_t didt_block;
|
||||
uint32_t data;
|
||||
|
||||
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) {
|
||||
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0);
|
||||
data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
|
||||
data |= ((en << DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK);
|
||||
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0, data);
|
||||
DIDTBlock_Info &= ~SQ_Enable_MASK;
|
||||
DIDTBlock_Info |= en << SQ_Enable_SHIFT;
|
||||
}
|
||||
if (hwmgr->chip_id == CHIP_POLARIS11)
|
||||
didt_block = Polaris11_DIDTBlock_Info;
|
||||
else
|
||||
didt_block = DIDTBlock_Info;
|
||||
|
||||
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) {
|
||||
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0);
|
||||
data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
|
||||
data |= ((en << DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK);
|
||||
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0, data);
|
||||
DIDTBlock_Info &= ~DB_Enable_MASK;
|
||||
DIDTBlock_Info |= en << DB_Enable_SHIFT;
|
||||
}
|
||||
block_en = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) ? en : 0;
|
||||
|
||||
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) {
|
||||
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0);
|
||||
data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
|
||||
data |= ((en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK);
|
||||
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0, data);
|
||||
DIDTBlock_Info &= ~TD_Enable_MASK;
|
||||
DIDTBlock_Info |= en << TD_Enable_SHIFT;
|
||||
}
|
||||
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0);
|
||||
data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
|
||||
data |= ((block_en << DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK);
|
||||
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0, data);
|
||||
didt_block &= ~SQ_Enable_MASK;
|
||||
didt_block |= block_en << SQ_Enable_SHIFT;
|
||||
|
||||
block_en = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) ? en : 0;
|
||||
|
||||
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0);
|
||||
data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
|
||||
data |= ((block_en << DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK);
|
||||
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0, data);
|
||||
didt_block &= ~DB_Enable_MASK;
|
||||
didt_block |= block_en << DB_Enable_SHIFT;
|
||||
|
||||
block_en = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) ? en : 0;
|
||||
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0);
|
||||
data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
|
||||
data |= ((block_en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK);
|
||||
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0, data);
|
||||
didt_block &= ~TD_Enable_MASK;
|
||||
didt_block |= block_en << TD_Enable_SHIFT;
|
||||
|
||||
block_en = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping) ? en : 0;
|
||||
|
||||
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0);
|
||||
data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
|
||||
data |= ((block_en << DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK);
|
||||
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0, data);
|
||||
didt_block &= ~TCP_Enable_MASK;
|
||||
didt_block |= block_en << TCP_Enable_SHIFT;
|
||||
|
||||
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
|
||||
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0);
|
||||
data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
|
||||
data |= ((en << DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK);
|
||||
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0, data);
|
||||
DIDTBlock_Info &= ~TCP_Enable_MASK;
|
||||
DIDTBlock_Info |= en << TCP_Enable_SHIFT;
|
||||
}
|
||||
|
||||
if (enable)
|
||||
result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_Didt_Block_Function, DIDTBlock_Info);
|
||||
result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_Didt_Block_Function, didt_block);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -498,7 +605,6 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
|
|||
sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO;
|
||||
result = cgs_query_system_info(hwmgr->device, &sys_info);
|
||||
|
||||
|
||||
if (result == 0)
|
||||
num_se = sys_info.value;
|
||||
|
||||
|
@ -507,7 +613,7 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
|
|||
phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) ||
|
||||
phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
|
||||
|
||||
/* TO DO Pre DIDT disable clock gating */
|
||||
cgs_enter_safe_mode(hwmgr->device, true);
|
||||
value = 0;
|
||||
value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX);
|
||||
for (count = 0; count < num_se; count++) {
|
||||
|
@ -521,11 +627,16 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
|
|||
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
|
||||
result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10);
|
||||
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
|
||||
} else if ((hwmgr->chip_id == CHIP_POLARIS11) || (hwmgr->chip_id == CHIP_POLARIS12)) {
|
||||
} else if (hwmgr->chip_id == CHIP_POLARIS11) {
|
||||
result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11);
|
||||
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
|
||||
result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11);
|
||||
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
|
||||
} else if (hwmgr->chip_id == CHIP_POLARIS12) {
|
||||
result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11);
|
||||
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
|
||||
result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris12);
|
||||
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
|
||||
}
|
||||
}
|
||||
cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value2);
|
||||
|
@ -533,7 +644,13 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
|
|||
result = smu7_enable_didt(hwmgr, true);
|
||||
PP_ASSERT_WITH_CODE((result == 0), "EnableDiDt failed.", return result);
|
||||
|
||||
/* TO DO Post DIDT enable clock gating */
|
||||
if (hwmgr->chip_id == CHIP_POLARIS11) {
|
||||
result = smum_send_msg_to_smc(hwmgr->smumgr,
|
||||
(uint16_t)(PPSMC_MSG_EnableDpmDidt));
|
||||
PP_ASSERT_WITH_CODE((0 == result),
|
||||
"Failed to enable DPM DIDT.", return result);
|
||||
}
|
||||
cgs_enter_safe_mode(hwmgr->device, false);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -547,11 +664,20 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
|
|||
phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) ||
|
||||
phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) ||
|
||||
phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
|
||||
/* TO DO Pre DIDT disable clock gating */
|
||||
|
||||
cgs_enter_safe_mode(hwmgr->device, true);
|
||||
|
||||
result = smu7_enable_didt(hwmgr, false);
|
||||
PP_ASSERT_WITH_CODE((result == 0), "Post DIDT enable clock gating failed.", return result);
|
||||
/* TO DO Post DIDT enable clock gating */
|
||||
PP_ASSERT_WITH_CODE((result == 0),
|
||||
"Post DIDT enable clock gating failed.",
|
||||
return result);
|
||||
if (hwmgr->chip_id == CHIP_POLARIS11) {
|
||||
result = smum_send_msg_to_smc(hwmgr->smumgr,
|
||||
(uint16_t)(PPSMC_MSG_DisableDpmDidt));
|
||||
PP_ASSERT_WITH_CODE((0 == result),
|
||||
"Failed to disable DPM DIDT.", return result);
|
||||
}
|
||||
cgs_enter_safe_mode(hwmgr->device, false);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -651,7 +777,7 @@ int smu7_enable_power_containment(struct pp_hwmgr *hwmgr)
|
|||
POWERCONTAINMENT_FEATURE_PkgPwrLimit;
|
||||
|
||||
if (smu7_set_power_limit(hwmgr, default_limit))
|
||||
printk(KERN_ERR "Failed to set Default Power Limit in SMC!");
|
||||
pr_err("Failed to set Default Power Limit in SMC!");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -506,18 +506,18 @@ static int tf_smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr,
|
|||
|
||||
static const struct phm_master_table_item
|
||||
phm_thermal_start_thermal_controller_master_list[] = {
|
||||
{NULL, tf_smu7_thermal_initialize},
|
||||
{NULL, tf_smu7_thermal_set_temperature_range},
|
||||
{NULL, tf_smu7_thermal_enable_alert},
|
||||
{NULL, smum_thermal_avfs_enable},
|
||||
{ .tableFunction = tf_smu7_thermal_initialize },
|
||||
{ .tableFunction = tf_smu7_thermal_set_temperature_range },
|
||||
{ .tableFunction = tf_smu7_thermal_enable_alert },
|
||||
{ .tableFunction = smum_thermal_avfs_enable },
|
||||
/* We should restrict performance levels to low before we halt the SMC.
|
||||
* On the other hand we are still in boot state when we do this
|
||||
* so it would be pointless.
|
||||
* If this assumption changes we have to revisit this table.
|
||||
*/
|
||||
{NULL, smum_thermal_setup_fan_table},
|
||||
{NULL, tf_smu7_thermal_start_smc_fan_control},
|
||||
{NULL, NULL}
|
||||
{ .tableFunction = smum_thermal_setup_fan_table },
|
||||
{ .tableFunction = tf_smu7_thermal_start_smc_fan_control },
|
||||
{ }
|
||||
};
|
||||
|
||||
static const struct phm_master_table_header
|
||||
|
@ -529,10 +529,10 @@ phm_thermal_start_thermal_controller_master = {
|
|||
|
||||
static const struct phm_master_table_item
|
||||
phm_thermal_set_temperature_range_master_list[] = {
|
||||
{NULL, tf_smu7_thermal_disable_alert},
|
||||
{NULL, tf_smu7_thermal_set_temperature_range},
|
||||
{NULL, tf_smu7_thermal_enable_alert},
|
||||
{NULL, NULL}
|
||||
{ .tableFunction = tf_smu7_thermal_disable_alert },
|
||||
{ .tableFunction = tf_smu7_thermal_set_temperature_range },
|
||||
{ .tableFunction = tf_smu7_thermal_enable_alert },
|
||||
{ }
|
||||
};
|
||||
|
||||
static const struct phm_master_table_header
|
||||
|
@ -575,3 +575,9 @@ int pp_smu7_thermal_initialize(struct pp_hwmgr *hwmgr)
|
|||
return result;
|
||||
}
|
||||
|
||||
void pp_smu7_thermal_fini(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
|
||||
phm_destroy_table(hwmgr, &(hwmgr->start_thermal_controller));
|
||||
return;
|
||||
}
|
|
@ -47,6 +47,7 @@ extern int smu7_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
|
|||
extern int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
|
||||
extern int smu7_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
|
||||
extern int pp_smu7_thermal_initialize(struct pp_hwmgr *hwmgr);
|
||||
extern void pp_smu7_thermal_fini(struct pp_hwmgr *hwmgr);
|
||||
extern int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
|
||||
extern int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
|
||||
extern int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
|
||||
|
|
|
@ -29,7 +29,10 @@
|
|||
#include "amd_shared.h"
|
||||
#include "cgs_common.h"
|
||||
|
||||
extern int amdgpu_dpm;
|
||||
extern const struct amd_ip_funcs pp_ip_funcs;
|
||||
extern const struct amd_powerplay_funcs pp_dpm_funcs;
|
||||
|
||||
#define PP_DPM_DISABLED 0xCCCC
|
||||
|
||||
enum amd_pp_sensors {
|
||||
AMDGPU_PP_SENSOR_GFX_SCLK = 0,
|
||||
|
@ -135,17 +138,12 @@ enum amd_pp_event {
|
|||
AMD_PP_EVENT_MAX
|
||||
};
|
||||
|
||||
enum amd_dpm_forced_level {
|
||||
AMD_DPM_FORCED_LEVEL_AUTO = 0,
|
||||
AMD_DPM_FORCED_LEVEL_LOW = 1,
|
||||
AMD_DPM_FORCED_LEVEL_HIGH = 2,
|
||||
AMD_DPM_FORCED_LEVEL_MANUAL = 3,
|
||||
};
|
||||
|
||||
struct amd_pp_init {
|
||||
struct cgs_device *device;
|
||||
uint32_t chip_family;
|
||||
uint32_t chip_id;
|
||||
bool pm_en;
|
||||
uint32_t feature_mask;
|
||||
};
|
||||
|
||||
enum amd_pp_display_config_type{
|
||||
|
@ -371,10 +369,10 @@ struct amd_powerplay {
|
|||
const struct amd_powerplay_funcs *pp_funcs;
|
||||
};
|
||||
|
||||
int amd_powerplay_init(struct amd_pp_init *pp_init,
|
||||
struct amd_powerplay *amd_pp);
|
||||
int amd_powerplay_create(struct amd_pp_init *pp_init,
|
||||
void **handle);
|
||||
|
||||
int amd_powerplay_fini(void *handle);
|
||||
int amd_powerplay_destroy(void *handle);
|
||||
|
||||
int amd_powerplay_reset(void *handle);
|
||||
|
||||
|
|
|
@ -119,7 +119,6 @@ struct pp_eventmgr {
|
|||
void (*pp_eventmgr_fini)(struct pp_eventmgr *eventmgr);
|
||||
};
|
||||
|
||||
int eventmgr_init(struct pp_instance *handle);
|
||||
int eventmgr_fini(struct pp_eventmgr *eventmgr);
|
||||
int eventmgr_early_init(struct pp_instance *handle);
|
||||
|
||||
#endif /* _EVENTMGR_H_ */
|
||||
|
|
|
@ -38,8 +38,6 @@ struct pp_hwmgr;
|
|||
struct phm_fan_speed_info;
|
||||
struct pp_atomctrl_voltage_table;
|
||||
|
||||
extern unsigned amdgpu_pp_feature_mask;
|
||||
|
||||
#define VOLTAGE_SCALE 4
|
||||
|
||||
uint8_t convert_to_vid(uint16_t vddc);
|
||||
|
@ -358,6 +356,8 @@ struct pp_hwmgr_func {
|
|||
int (*get_mclk_od)(struct pp_hwmgr *hwmgr);
|
||||
int (*set_mclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
|
||||
int (*read_sensor)(struct pp_hwmgr *hwmgr, int idx, int32_t *value);
|
||||
int (*request_firmware)(struct pp_hwmgr *hwmgr);
|
||||
int (*release_firmware)(struct pp_hwmgr *hwmgr);
|
||||
};
|
||||
|
||||
struct pp_table_func {
|
||||
|
@ -612,6 +612,7 @@ struct pp_hwmgr {
|
|||
uint32_t num_vce_state_tables;
|
||||
|
||||
enum amd_dpm_forced_level dpm_level;
|
||||
enum amd_dpm_forced_level saved_dpm_level;
|
||||
bool block_hw_access;
|
||||
struct phm_gfx_arbiter gfx_arbiter;
|
||||
struct phm_acp_arbiter acp_arbiter;
|
||||
|
@ -651,19 +652,12 @@ struct pp_hwmgr {
|
|||
uint32_t feature_mask;
|
||||
};
|
||||
|
||||
|
||||
extern int hwmgr_init(struct amd_pp_init *pp_init,
|
||||
struct pp_instance *handle);
|
||||
|
||||
extern int hwmgr_fini(struct pp_hwmgr *hwmgr);
|
||||
|
||||
extern int hw_init_power_state_table(struct pp_hwmgr *hwmgr);
|
||||
|
||||
extern int hwmgr_early_init(struct pp_instance *handle);
|
||||
extern int hwmgr_hw_init(struct pp_instance *handle);
|
||||
extern int hwmgr_hw_fini(struct pp_instance *handle);
|
||||
extern int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
|
||||
uint32_t value, uint32_t mask);
|
||||
|
||||
|
||||
|
||||
extern void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
|
||||
uint32_t indirect_port,
|
||||
uint32_t index,
|
||||
|
@ -692,11 +686,10 @@ extern int phm_find_boot_level(void *table, uint32_t value, uint32_t *boot_level
|
|||
extern int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, phm_ppt_v1_voltage_lookup_table *lookup_table,
|
||||
uint16_t virtual_voltage_id, int32_t *sclk);
|
||||
extern int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
|
||||
extern int phm_hwmgr_backend_fini(struct pp_hwmgr *hwmgr);
|
||||
extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask);
|
||||
extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
|
||||
|
||||
extern int smu7_hwmgr_init(struct pp_hwmgr *hwmgr);
|
||||
extern int smu7_init_function_pointers(struct pp_hwmgr *hwmgr);
|
||||
extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
|
||||
uint32_t sclk, uint16_t id, uint16_t *voltage);
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue