Merge branch 'drm-next-4.9' of git://people.freedesktop.org/~agd5f/linux into drm-next

Last set of radeon and amdgpu changes for 4.9.  This is
mostly just the powerplay cleanup for dGPUs.  Beyond that,
just misc code cleanups and bug fixes.

* 'drm-next-4.9' of git://people.freedesktop.org/~agd5f/linux: (49 commits)
  drm/amd/amdgpu: Clean up afmt allocation in DCEv6. (v2)
  drm/amd/amdgpu: Remove division from vblank_wait
  drm/radeon/atif: Send a hotplug event when we get dgpu display request
  drm/radeon/atpx: check for ATIF dGPU wake for display events support
  drm/amdgpu/atif: Send a hotplug event when we get dgpu display request
  drm/amdgpu/atpx: check for ATIF dGPU wake for display events support
  drm/amdgpu: bump version for new vce packet support
  drm/amdgpu/vce: allow the clock table packet
  drm/amdgpu:cleanup virt related define
  drm/amdgpu: use powerplay module for dgpu in Vi.
  drm/amdgpu: set gfx clock gating for tonga/polaris.
  drm/amdgpu: set system clock gating for tonga/polaris.
  drm/amd/powerplay: export function to help to set cg by smu.
  drm/amdgpu: avoid out of bounds access on array interrupt_status_offsets
  drm/amdgpu: mark symbols static where possible
  drm/amdgpu: remove unused functions
  drm/amd/powerplay:  Replace per-asic print_performance with generic
  drm/radeon: narrow asic_init for virtualization
  drm/amdgpu:add fw version entry to info
  drm/amdgpu:determine if vPost is needed indeed
  ...
This commit is contained in:
Dave Airlie 2016-09-28 11:27:05 +10:00
commit 9f4ef05bcd
106 changed files with 18547 additions and 35826 deletions

View File

@ -52,10 +52,7 @@ amdgpu-y += \
amdgpu-y += \ amdgpu-y += \
amdgpu_dpm.o \ amdgpu_dpm.o \
amdgpu_powerplay.o \ amdgpu_powerplay.o \
cz_smc.o cz_dpm.o \ cz_smc.o cz_dpm.o
tonga_smc.o tonga_dpm.o \
fiji_smc.o fiji_dpm.o \
iceland_smc.o iceland_dpm.o
# add DCE block # add DCE block
amdgpu-y += \ amdgpu-y += \

View File

@ -57,6 +57,7 @@
#include "amdgpu_acp.h" #include "amdgpu_acp.h"
#include "gpu_scheduler.h" #include "gpu_scheduler.h"
#include "amdgpu_virt.h"
/* /*
* Modules parameters. * Modules parameters.
@ -1827,6 +1828,7 @@ struct amdgpu_asic_funcs {
bool (*read_disabled_bios)(struct amdgpu_device *adev); bool (*read_disabled_bios)(struct amdgpu_device *adev);
bool (*read_bios_from_rom)(struct amdgpu_device *adev, bool (*read_bios_from_rom)(struct amdgpu_device *adev,
u8 *bios, u32 length_bytes); u8 *bios, u32 length_bytes);
void (*detect_hw_virtualization) (struct amdgpu_device *adev);
int (*read_register)(struct amdgpu_device *adev, u32 se_num, int (*read_register)(struct amdgpu_device *adev, u32 se_num,
u32 sh_num, u32 reg_offset, u32 *value); u32 sh_num, u32 reg_offset, u32 *value);
void (*set_vga_state)(struct amdgpu_device *adev, bool state); void (*set_vga_state)(struct amdgpu_device *adev, bool state);
@ -1836,8 +1838,6 @@ struct amdgpu_asic_funcs {
/* MM block clocks */ /* MM block clocks */
int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
/* query virtual capabilities */
u32 (*get_virtual_caps)(struct amdgpu_device *adev);
/* static power management */ /* static power management */
int (*get_pcie_lanes)(struct amdgpu_device *adev); int (*get_pcie_lanes)(struct amdgpu_device *adev);
void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes); void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes);
@ -1933,16 +1933,6 @@ struct amdgpu_atcs {
struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev); struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device); void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
/* GPU virtualization */
#define AMDGPU_VIRT_CAPS_SRIOV_EN (1 << 0)
#define AMDGPU_VIRT_CAPS_IS_VF (1 << 1)
struct amdgpu_virtualization {
bool supports_sr_iov;
bool is_virtual;
u32 caps;
};
/* /*
* Core structure, functions and helpers. * Core structure, functions and helpers.
*/ */
@ -2260,12 +2250,12 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
#define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev)))
#define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev)) #define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev))
#define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l)) #define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l))
#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
#define amdgpu_asic_detect_hw_virtualization(adev) (adev)->asic_funcs->detect_hw_virtualization((adev))
#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
@ -2323,6 +2313,11 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance)) #define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
#define amdgpu_dpm_read_sensor(adev, idx, value) \
((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \
-EINVAL)
#define amdgpu_dpm_get_temperature(adev) \ #define amdgpu_dpm_get_temperature(adev) \
((adev)->pp_enabled ? \ ((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \ (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
@ -2374,11 +2369,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
(adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \ (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
(adev)->pm.funcs->powergate_vce((adev), (g))) (adev)->pm.funcs->powergate_vce((adev), (g)))
#define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) \
((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->print_current_performance_level((adev)->powerplay.pp_handle, (m)) : \
(adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m)))
#define amdgpu_dpm_get_current_power_state(adev) \ #define amdgpu_dpm_get_current_power_state(adev) \
(adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle) (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
@ -2460,11 +2450,13 @@ void amdgpu_register_atpx_handler(void);
void amdgpu_unregister_atpx_handler(void); void amdgpu_unregister_atpx_handler(void);
bool amdgpu_has_atpx_dgpu_power_cntl(void); bool amdgpu_has_atpx_dgpu_power_cntl(void);
bool amdgpu_is_atpx_hybrid(void); bool amdgpu_is_atpx_hybrid(void);
bool amdgpu_atpx_dgpu_req_power_for_displays(void);
#else #else
static inline void amdgpu_register_atpx_handler(void) {} static inline void amdgpu_register_atpx_handler(void) {}
static inline void amdgpu_unregister_atpx_handler(void) {} static inline void amdgpu_unregister_atpx_handler(void) {}
static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; } static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
static inline bool amdgpu_is_atpx_hybrid(void) { return false; } static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; }
#endif #endif
/* /*

View File

@ -25,6 +25,7 @@
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/power_supply.h> #include <linux/power_supply.h>
#include <linux/pm_runtime.h>
#include <acpi/video.h> #include <acpi/video.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/drm_crtc_helper.h> #include <drm/drm_crtc_helper.h>
@ -333,6 +334,16 @@ int amdgpu_atif_handler(struct amdgpu_device *adev,
#endif #endif
} }
} }
if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
if ((adev->flags & AMD_IS_PX) &&
amdgpu_atpx_dgpu_req_power_for_displays()) {
pm_runtime_get_sync(adev->ddev->dev);
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(adev->ddev);
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
}
}
/* TODO: check other events */ /* TODO: check other events */
/* We've handled the event, stop the notifier chain. The ACPI interface /* We've handled the event, stop the notifier chain. The ACPI interface

View File

@ -143,14 +143,6 @@ int amdgpu_amdkfd_resume(struct amdgpu_device *rdev)
return r; return r;
} }
u32 pool_to_domain(enum kgd_memory_pool p)
{
switch (p) {
case KGD_POOL_FRAMEBUFFER: return AMDGPU_GEM_DOMAIN_VRAM;
default: return AMDGPU_GEM_DOMAIN_GTT;
}
}
int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr, void **mem_obj, uint64_t *gpu_addr,
void **cpu_ptr) void **cpu_ptr)

View File

@ -29,6 +29,7 @@ struct amdgpu_atpx {
acpi_handle handle; acpi_handle handle;
struct amdgpu_atpx_functions functions; struct amdgpu_atpx_functions functions;
bool is_hybrid; bool is_hybrid;
bool dgpu_req_power_for_displays;
}; };
static struct amdgpu_atpx_priv { static struct amdgpu_atpx_priv {
@ -73,6 +74,10 @@ bool amdgpu_is_atpx_hybrid(void) {
return amdgpu_atpx_priv.atpx.is_hybrid; return amdgpu_atpx_priv.atpx.is_hybrid;
} }
bool amdgpu_atpx_dgpu_req_power_for_displays(void) {
return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays;
}
/** /**
* amdgpu_atpx_call - call an ATPX method * amdgpu_atpx_call - call an ATPX method
* *
@ -213,6 +218,10 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
atpx->is_hybrid = true; atpx->is_hybrid = true;
} }
atpx->dgpu_req_power_for_displays = false;
if (valid_bits & ATPX_DGPU_REQ_POWER_FOR_DISPLAYS)
atpx->dgpu_req_power_for_displays = true;
return 0; return 0;
} }

View File

@ -711,6 +711,47 @@ static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode
return -EINVAL; return -EINVAL;
} }
static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
enum cgs_ucode_id type)
{
CGS_FUNC_ADEV;
uint16_t fw_version;
switch (type) {
case CGS_UCODE_ID_SDMA0:
fw_version = adev->sdma.instance[0].fw_version;
break;
case CGS_UCODE_ID_SDMA1:
fw_version = adev->sdma.instance[1].fw_version;
break;
case CGS_UCODE_ID_CP_CE:
fw_version = adev->gfx.ce_fw_version;
break;
case CGS_UCODE_ID_CP_PFP:
fw_version = adev->gfx.pfp_fw_version;
break;
case CGS_UCODE_ID_CP_ME:
fw_version = adev->gfx.me_fw_version;
break;
case CGS_UCODE_ID_CP_MEC:
fw_version = adev->gfx.mec_fw_version;
break;
case CGS_UCODE_ID_CP_MEC_JT1:
fw_version = adev->gfx.mec_fw_version;
break;
case CGS_UCODE_ID_CP_MEC_JT2:
fw_version = adev->gfx.mec_fw_version;
break;
case CGS_UCODE_ID_RLC_G:
fw_version = adev->gfx.rlc_fw_version;
break;
default:
DRM_ERROR("firmware type %d do not have version\n", type);
fw_version = 0;
}
return fw_version;
}
static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
enum cgs_ucode_id type, enum cgs_ucode_id type,
struct cgs_firmware_info *info) struct cgs_firmware_info *info)
@ -741,6 +782,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
info->mc_addr = gpu_addr; info->mc_addr = gpu_addr;
info->image_size = data_size; info->image_size = data_size;
info->version = (uint16_t)le32_to_cpu(header->header.ucode_version); info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version); info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
} else { } else {
char fw_name[30] = {0}; char fw_name[30] = {0};

View File

@ -1545,7 +1545,8 @@ static int amdgpu_connector_virtual_mode_valid(struct drm_connector *connector,
return MODE_OK; return MODE_OK;
} }
int amdgpu_connector_virtual_dpms(struct drm_connector *connector, int mode) static int
amdgpu_connector_virtual_dpms(struct drm_connector *connector, int mode)
{ {
return 0; return 0;
} }
@ -1557,7 +1558,8 @@ amdgpu_connector_virtual_detect(struct drm_connector *connector, bool force)
return connector_status_connected; return connector_status_connected;
} }
int amdgpu_connector_virtual_set_property(struct drm_connector *connector, static int
amdgpu_connector_virtual_set_property(struct drm_connector *connector,
struct drm_property *property, struct drm_property *property,
uint64_t val) uint64_t val)
{ {

View File

@ -50,6 +50,7 @@
#include "vi.h" #include "vi.h"
#include "bif/bif_4_1_d.h" #include "bif/bif_4_1_d.h"
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/firmware.h>
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev); static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
@ -651,6 +652,46 @@ bool amdgpu_card_posted(struct amdgpu_device *adev)
} }
static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
{
if (amdgpu_sriov_vf(adev))
return false;
if (amdgpu_passthrough(adev)) {
/* for FIJI: In whole GPU pass-through virtualization case
* old smc fw won't clear some registers (e.g. MEM_SIZE, BIOS_SCRATCH)
* so amdgpu_card_posted return false and driver will incorrectly skip vPost.
* but if we force vPost do in pass-through case, the driver reload will hang.
* whether doing vPost depends on amdgpu_card_posted if smc version is above
* 00160e00 for FIJI.
*/
if (adev->asic_type == CHIP_FIJI) {
int err;
uint32_t fw_ver;
err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
/* force vPost if error occured */
if (err)
return true;
fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
if (fw_ver >= 0x00160e00)
return !amdgpu_card_posted(adev);
}
} else {
/* in bare-metal case, amdgpu_card_posted return false
* after system reboot/boot, and return true if driver
* reloaded.
* we shouldn't do vPost after driver reload otherwise GPU
* could hang.
*/
if (amdgpu_card_posted(adev))
return false;
}
/* we assume vPost is neede for all other cases */
return true;
}
/** /**
* amdgpu_dummy_page_init - init dummy page used by the driver * amdgpu_dummy_page_init - init dummy page used by the driver
* *
@ -1485,13 +1526,10 @@ static int amdgpu_resume(struct amdgpu_device *adev)
return 0; return 0;
} }
static bool amdgpu_device_is_virtual(void) static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
{ {
#ifdef CONFIG_X86 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
return boot_cpu_has(X86_FEATURE_HYPERVISOR); adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
#else
return false;
#endif
} }
/** /**
@ -1648,25 +1686,24 @@ int amdgpu_device_init(struct amdgpu_device *adev,
goto failed; goto failed;
} }
/* See if the asic supports SR-IOV */ /* detect if we are with an SRIOV vbios */
adev->virtualization.supports_sr_iov = amdgpu_device_detect_sriov_bios(adev);
amdgpu_atombios_has_gpu_virtualization_table(adev);
/* Check if we are executing in a virtualized environment */
adev->virtualization.is_virtual = amdgpu_device_is_virtual();
adev->virtualization.caps = amdgpu_asic_get_virtual_caps(adev);
/* Post card if necessary */ /* Post card if necessary */
if (!amdgpu_card_posted(adev) || if (amdgpu_vpost_needed(adev)) {
(adev->virtualization.is_virtual &&
!(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) {
if (!adev->bios) { if (!adev->bios) {
dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); dev_err(adev->dev, "no vBIOS found\n");
r = -EINVAL; r = -EINVAL;
goto failed; goto failed;
} }
DRM_INFO("GPU not posted. posting now...\n"); DRM_INFO("GPU posting now...\n");
amdgpu_atom_asic_init(adev->mode_info.atom_context); r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
if (r) {
dev_err(adev->dev, "gpu post error!\n");
goto failed;
}
} else {
DRM_INFO("GPU post is not needed\n");
} }
/* Initialize clocks */ /* Initialize clocks */
@ -1842,8 +1879,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
adev = dev->dev_private; adev = dev->dev_private;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF || if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
return 0; return 0;
drm_kms_helper_poll_disable(dev); drm_kms_helper_poll_disable(dev);
@ -1928,8 +1964,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
struct drm_crtc *crtc; struct drm_crtc *crtc;
int r; int r;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF || if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
return 0; return 0;
if (fbcon) if (fbcon)
@ -2043,7 +2078,7 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
return asic_hang; return asic_hang;
} }
int amdgpu_pre_soft_reset(struct amdgpu_device *adev) static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
{ {
int i, r = 0; int i, r = 0;
@ -2714,7 +2749,7 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3) if (size & 0x3 || *pos & 0x3)
return -EINVAL; return -EINVAL;
config = kmalloc(256 * sizeof(*config), GFP_KERNEL); config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
if (!config) if (!config)
return -ENOMEM; return -ENOMEM;
@ -2773,6 +2808,29 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
return result; return result;
} }
static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_device *adev = f->f_inode->i_private;
int idx, r;
int32_t value;
if (size != 4 || *pos & 0x3)
return -EINVAL;
/* convert offset to sensor number */
idx = *pos >> 2;
if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &value);
else
return -EINVAL;
if (!r)
r = put_user(value, (int32_t *)buf);
return !r ? 4 : r;
}
static const struct file_operations amdgpu_debugfs_regs_fops = { static const struct file_operations amdgpu_debugfs_regs_fops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
@ -2805,12 +2863,19 @@ static const struct file_operations amdgpu_debugfs_gca_config_fops = {
.llseek = default_llseek .llseek = default_llseek
}; };
static const struct file_operations amdgpu_debugfs_sensors_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_sensor_read,
.llseek = default_llseek
};
static const struct file_operations *debugfs_regs[] = { static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_fops, &amdgpu_debugfs_regs_fops,
&amdgpu_debugfs_regs_didt_fops, &amdgpu_debugfs_regs_didt_fops,
&amdgpu_debugfs_regs_pcie_fops, &amdgpu_debugfs_regs_pcie_fops,
&amdgpu_debugfs_regs_smc_fops, &amdgpu_debugfs_regs_smc_fops,
&amdgpu_debugfs_gca_config_fops, &amdgpu_debugfs_gca_config_fops,
&amdgpu_debugfs_sensors_fops,
}; };
static const char *debugfs_regs_names[] = { static const char *debugfs_regs_names[] = {
@ -2819,6 +2884,7 @@ static const char *debugfs_regs_names[] = {
"amdgpu_regs_pcie", "amdgpu_regs_pcie",
"amdgpu_regs_smc", "amdgpu_regs_smc",
"amdgpu_gca_config", "amdgpu_gca_config",
"amdgpu_sensors",
}; };
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)

View File

@ -56,9 +56,10 @@
* - 3.4.0 - Add AMDGPU_INFO_NUM_EVICTIONS. * - 3.4.0 - Add AMDGPU_INFO_NUM_EVICTIONS.
* - 3.5.0 - Add support for new UVD_NO_OP register. * - 3.5.0 - Add support for new UVD_NO_OP register.
* - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer. * - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer.
* - 3.7.0 - Add support for VCE clock list packet
*/ */
#define KMS_DRIVER_MAJOR 3 #define KMS_DRIVER_MAJOR 3
#define KMS_DRIVER_MINOR 6 #define KMS_DRIVER_MINOR 7
#define KMS_DRIVER_PATCHLEVEL 0 #define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0; int amdgpu_vram_limit = 0;
@ -485,7 +486,7 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
/* if we are running in a VM, make sure the device /* if we are running in a VM, make sure the device
* torn down properly on reboot/shutdown * torn down properly on reboot/shutdown
*/ */
if (adev->virtualization.is_virtual) if (amdgpu_passthrough(adev))
amdgpu_pci_remove(pdev); amdgpu_pci_remove(pdev);
} }

View File

@ -1322,6 +1322,64 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
*/ */
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
{
int32_t value;
/* sanity check PP is enabled */
if (!(adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->read_sensor))
return -EINVAL;
/* GPU Clocks */
seq_printf(m, "GFX Clocks and Power:\n");
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, &value))
seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, &value))
seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, &value))
seq_printf(m, "\t%u mV (VDDGFX)\n", value);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, &value))
seq_printf(m, "\t%u mV (VDDNB)\n", value);
seq_printf(m, "\n");
/* GPU Temp */
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, &value))
seq_printf(m, "GPU Temperature: %u C\n", value/1000);
/* GPU Load */
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value))
seq_printf(m, "GPU Load: %u %%\n", value);
seq_printf(m, "\n");
/* UVD clocks */
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, &value)) {
if (!value) {
seq_printf(m, "UVD: Disabled\n");
} else {
seq_printf(m, "UVD: Enabled\n");
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, &value))
seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, &value))
seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
}
}
seq_printf(m, "\n");
/* VCE clocks */
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, &value)) {
if (!value) {
seq_printf(m, "VCE: Disabled\n");
} else {
seq_printf(m, "VCE: Enabled\n");
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, &value))
seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
}
}
return 0;
}
static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
{ {
struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_info_node *node = (struct drm_info_node *) m->private;
@ -1337,11 +1395,11 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
(ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
seq_printf(m, "PX asic powered off\n"); seq_printf(m, "PX asic powered off\n");
} else if (adev->pp_enabled) { } else if (adev->pp_enabled) {
amdgpu_dpm_debugfs_print_current_performance_level(adev, m); return amdgpu_debugfs_pm_info_pp(m, adev);
} else { } else {
mutex_lock(&adev->pm.mutex); mutex_lock(&adev->pm.mutex);
if (adev->pm.funcs->debugfs_print_current_performance_level) if (adev->pm.funcs->debugfs_print_current_performance_level)
amdgpu_dpm_debugfs_print_current_performance_level(adev, m); adev->pm.funcs->debugfs_print_current_performance_level(adev, m);
else else
seq_printf(m, "Debugfs support not implemented for this asic\n"); seq_printf(m, "Debugfs support not implemented for this asic\n");
mutex_unlock(&adev->pm.mutex); mutex_unlock(&adev->pm.mutex);

View File

@ -80,15 +80,6 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev)
amd_pp->ip_funcs = &kv_dpm_ip_funcs; amd_pp->ip_funcs = &kv_dpm_ip_funcs;
break; break;
#endif #endif
case CHIP_TOPAZ:
amd_pp->ip_funcs = &iceland_dpm_ip_funcs;
break;
case CHIP_TONGA:
amd_pp->ip_funcs = &tonga_dpm_ip_funcs;
break;
case CHIP_FIJI:
amd_pp->ip_funcs = &fiji_dpm_ip_funcs;
break;
case CHIP_CARRIZO: case CHIP_CARRIZO:
case CHIP_STONEY: case CHIP_STONEY:
amd_pp->ip_funcs = &cz_dpm_ip_funcs; amd_pp->ip_funcs = &cz_dpm_ip_funcs;
@ -110,11 +101,11 @@ static int amdgpu_pp_early_init(void *handle)
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_POLARIS11: case CHIP_POLARIS11:
case CHIP_POLARIS10: case CHIP_POLARIS10:
adev->pp_enabled = true;
break;
case CHIP_TONGA: case CHIP_TONGA:
case CHIP_FIJI: case CHIP_FIJI:
case CHIP_TOPAZ: case CHIP_TOPAZ:
adev->pp_enabled = true;
break;
case CHIP_CARRIZO: case CHIP_CARRIZO:
case CHIP_STONEY: case CHIP_STONEY:
adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true; adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true;

View File

@ -691,6 +691,7 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
case 0x04000008: /* rdo */ case 0x04000008: /* rdo */
case 0x04000009: /* vui */ case 0x04000009: /* vui */
case 0x05000002: /* auxiliary buffer */ case 0x05000002: /* auxiliary buffer */
case 0x05000009: /* clock table */
break; break;
case 0x03000001: /* encode */ case 0x03000001: /* encode */

View File

@ -19,22 +19,39 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE. * OTHER DEALINGS IN THE SOFTWARE.
* *
* Author: Monk.liu@amd.com
*/ */
#ifndef AMDGPU_VIRT_H
#define AMDGPU_VIRT_H
#ifndef _POLARIS10_CLOCK_POWER_GATING_H_ #define AMDGPU_SRIOV_CAPS_SRIOV_VBIOS (1 << 0) /* vBIOS is sr-iov ready */
#define _POLARIS10_CLOCK_POWER_GATING_H_ #define AMDGPU_SRIOV_CAPS_ENABLE_IOV (1 << 1) /* sr-iov is enabled on this GPU */
#define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */
#define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */
/* GPU virtualization */
struct amdgpu_virtualization {
uint32_t virtual_caps;
};
#include "polaris10_hwmgr.h" #define amdgpu_sriov_enabled(adev) \
#include "pp_asicblocks.h" ((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV)
int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); #define amdgpu_sriov_vf(adev) \
int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); ((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_IS_VF)
int polaris10_phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
int polaris10_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate);
int polaris10_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
int polaris10_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
const uint32_t *msg_id);
int polaris10_phm_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable);
#endif /* _POLARIS10_CLOCK_POWER_GATING_H_ */ #define amdgpu_sriov_bios(adev) \
((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS)
#define amdgpu_passthrough(adev) \
((adev)->virtualization.virtual_caps & AMDGPU_PASSTHROUGH_MODE)
static inline bool is_virtual_machine(void)
{
#ifdef CONFIG_X86
return boot_cpu_has(X86_FEATURE_HYPERVISOR);
#else
return false;
#endif
}
#endif

View File

@ -963,12 +963,6 @@ static bool cik_read_bios_from_rom(struct amdgpu_device *adev,
return true; return true;
} }
static u32 cik_get_virtual_caps(struct amdgpu_device *adev)
{
/* CIK does not support SR-IOV */
return 0;
}
static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = { static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
{mmGRBM_STATUS, false}, {mmGRBM_STATUS, false},
{mmGB_ADDR_CONFIG, false}, {mmGB_ADDR_CONFIG, false},
@ -1641,6 +1635,12 @@ static uint32_t cik_get_rev_id(struct amdgpu_device *adev)
>> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT; >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
} }
static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
{
if (is_virtual_machine()) /* passthrough mode */
adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
}
static const struct amdgpu_ip_block_version bonaire_ip_blocks[] = static const struct amdgpu_ip_block_version bonaire_ip_blocks[] =
{ {
/* ORDER MATTERS! */ /* ORDER MATTERS! */
@ -2384,13 +2384,13 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
{ {
.read_disabled_bios = &cik_read_disabled_bios, .read_disabled_bios = &cik_read_disabled_bios,
.read_bios_from_rom = &cik_read_bios_from_rom, .read_bios_from_rom = &cik_read_bios_from_rom,
.detect_hw_virtualization = cik_detect_hw_virtualization,
.read_register = &cik_read_register, .read_register = &cik_read_register,
.reset = &cik_asic_reset, .reset = &cik_asic_reset,
.set_vga_state = &cik_vga_set_state, .set_vga_state = &cik_vga_set_state,
.get_xclk = &cik_get_xclk, .get_xclk = &cik_get_xclk,
.set_uvd_clocks = &cik_set_uvd_clocks, .set_uvd_clocks = &cik_set_uvd_clocks,
.set_vce_clocks = &cik_set_vce_clocks, .set_vce_clocks = &cik_set_vce_clocks,
.get_virtual_caps = &cik_get_virtual_caps,
}; };
static int cik_common_early_init(void *handle) static int cik_common_early_init(void *handle)

View File

@ -562,4 +562,40 @@ enum {
MTYPE_NONCACHED = 3 MTYPE_NONCACHED = 3
}; };
/* mmPA_SC_RASTER_CONFIG mask */
#define RB_MAP_PKR0(x) ((x) << 0)
#define RB_MAP_PKR0_MASK (0x3 << 0)
#define RB_MAP_PKR1(x) ((x) << 2)
#define RB_MAP_PKR1_MASK (0x3 << 2)
#define RB_XSEL2(x) ((x) << 4)
#define RB_XSEL2_MASK (0x3 << 4)
#define RB_XSEL (1 << 6)
#define RB_YSEL (1 << 7)
#define PKR_MAP(x) ((x) << 8)
#define PKR_MAP_MASK (0x3 << 8)
#define PKR_XSEL(x) ((x) << 10)
#define PKR_XSEL_MASK (0x3 << 10)
#define PKR_YSEL(x) ((x) << 12)
#define PKR_YSEL_MASK (0x3 << 12)
#define SC_MAP(x) ((x) << 16)
#define SC_MAP_MASK (0x3 << 16)
#define SC_XSEL(x) ((x) << 18)
#define SC_XSEL_MASK (0x3 << 18)
#define SC_YSEL(x) ((x) << 20)
#define SC_YSEL_MASK (0x3 << 20)
#define SE_MAP(x) ((x) << 24)
#define SE_MAP_MASK (0x3 << 24)
#define SE_XSEL(x) ((x) << 26)
#define SE_XSEL_MASK (0x3 << 26)
#define SE_YSEL(x) ((x) << 28)
#define SE_YSEL_MASK (0x3 << 28)
/* mmPA_SC_RASTER_CONFIG_1 mask */
#define SE_PAIR_MAP(x) ((x) << 0)
#define SE_PAIR_MAP_MASK (0x3 << 0)
#define SE_PAIR_XSEL(x) ((x) << 2)
#define SE_PAIR_XSEL_MASK (0x3 << 2)
#define SE_PAIR_YSEL(x) ((x) << 4)
#define SE_PAIR_YSEL_MASK (0x3 << 4)
#endif #endif

View File

@ -101,13 +101,6 @@ int cz_send_msg_to_smc(struct amdgpu_device *adev, u16 msg)
return 0; return 0;
} }
int cz_send_msg_to_smc_with_parameter_async(struct amdgpu_device *adev,
u16 msg, u32 parameter)
{
WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter);
return cz_send_msg_to_smc_async(adev, msg);
}
int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
u16 msg, u32 parameter) u16 msg, u32 parameter)
{ {

View File

@ -221,7 +221,7 @@ static bool dce_v10_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
*/ */
static void dce_v10_0_vblank_wait(struct amdgpu_device *adev, int crtc) static void dce_v10_0_vblank_wait(struct amdgpu_device *adev, int crtc)
{ {
unsigned i = 0; unsigned i = 100;
if (crtc >= adev->mode_info.num_crtc) if (crtc >= adev->mode_info.num_crtc)
return; return;
@ -233,14 +233,16 @@ static void dce_v10_0_vblank_wait(struct amdgpu_device *adev, int crtc)
* wait for another frame. * wait for another frame.
*/ */
while (dce_v10_0_is_in_vblank(adev, crtc)) { while (dce_v10_0_is_in_vblank(adev, crtc)) {
if (i++ % 100 == 0) { if (i++ == 100) {
i = 0;
if (!dce_v10_0_is_counter_moving(adev, crtc)) if (!dce_v10_0_is_counter_moving(adev, crtc))
break; break;
} }
} }
while (!dce_v10_0_is_in_vblank(adev, crtc)) { while (!dce_v10_0_is_in_vblank(adev, crtc)) {
if (i++ % 100 == 0) { if (i++ == 100) {
i = 0;
if (!dce_v10_0_is_counter_moving(adev, crtc)) if (!dce_v10_0_is_counter_moving(adev, crtc))
break; break;
} }

View File

@ -146,7 +146,7 @@ static bool dce_v6_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
*/ */
static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc) static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc)
{ {
unsigned i = 0; unsigned i = 100;
if (crtc >= adev->mode_info.num_crtc) if (crtc >= adev->mode_info.num_crtc)
return; return;
@ -158,14 +158,16 @@ static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc)
* wait for another frame. * wait for another frame.
*/ */
while (dce_v6_0_is_in_vblank(adev, crtc)) { while (dce_v6_0_is_in_vblank(adev, crtc)) {
if (i++ % 100 == 0) { if (i++ == 100) {
i = 0;
if (!dce_v6_0_is_counter_moving(adev, crtc)) if (!dce_v6_0_is_counter_moving(adev, crtc))
break; break;
} }
} }
while (!dce_v6_0_is_in_vblank(adev, crtc)) { while (!dce_v6_0_is_in_vblank(adev, crtc)) {
if (i++ % 100 == 0) { if (i++ == 100) {
i = 0;
if (!dce_v6_0_is_counter_moving(adev, crtc)) if (!dce_v6_0_is_counter_moving(adev, crtc))
break; break;
} }
@ -185,7 +187,7 @@ static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
unsigned i; unsigned i;
/* Enable pflip interrupts */ /* Enable pflip interrupts */
for (i = 0; i <= adev->mode_info.num_crtc; i++) for (i = 0; i < adev->mode_info.num_crtc; i++)
amdgpu_irq_get(adev, &adev->pageflip_irq, i); amdgpu_irq_get(adev, &adev->pageflip_irq, i);
} }
@ -194,7 +196,7 @@ static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
unsigned i; unsigned i;
/* Disable pflip interrupts */ /* Disable pflip interrupts */
for (i = 0; i <= adev->mode_info.num_crtc; i++) for (i = 0; i < adev->mode_info.num_crtc; i++)
amdgpu_irq_put(adev, &adev->pageflip_irq, i); amdgpu_irq_put(adev, &adev->pageflip_irq, i);
} }
@ -1420,21 +1422,29 @@ static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id); enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
} }
static void dce_v6_0_afmt_init(struct amdgpu_device *adev) static int dce_v6_0_afmt_init(struct amdgpu_device *adev)
{ {
int i; int i, j;
for (i = 0; i < adev->mode_info.num_dig; i++) for (i = 0; i < adev->mode_info.num_dig; i++)
adev->mode_info.afmt[i] = NULL; adev->mode_info.afmt[i] = NULL;
/* DCE8 has audio blocks tied to DIG encoders */ /* DCE6 has audio blocks tied to DIG encoders */
for (i = 0; i < adev->mode_info.num_dig; i++) { for (i = 0; i < adev->mode_info.num_dig; i++) {
adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL); adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
if (adev->mode_info.afmt[i]) { if (adev->mode_info.afmt[i]) {
adev->mode_info.afmt[i]->offset = dig_offsets[i]; adev->mode_info.afmt[i]->offset = dig_offsets[i];
adev->mode_info.afmt[i]->id = i; adev->mode_info.afmt[i]->id = i;
} else {
for (j = 0; j < i; j++) {
kfree(adev->mode_info.afmt[j]);
adev->mode_info.afmt[j] = NULL;
}
DRM_ERROR("Out of memory allocating afmt table\n");
return -ENOMEM;
} }
} }
return 0;
} }
static void dce_v6_0_afmt_fini(struct amdgpu_device *adev) static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
@ -2397,7 +2407,9 @@ static int dce_v6_0_sw_init(void *handle)
return -EINVAL; return -EINVAL;
/* setup afmt */ /* setup afmt */
dce_v6_0_afmt_init(adev); r = dce_v6_0_afmt_init(adev);
if (r)
return r;
r = dce_v6_0_audio_init(adev); r = dce_v6_0_audio_init(adev);
if (r) if (r)
@ -2782,7 +2794,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
uint32_t disp_int, mask, int_control, tmp; uint32_t disp_int, mask, int_control, tmp;
unsigned hpd; unsigned hpd;
if (entry->src_data > 6) { if (entry->src_data >= adev->mode_info.num_hpd) {
DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
return 0; return 0;
} }

View File

@ -170,7 +170,7 @@ static bool dce_v8_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
*/ */
static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc) static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc)
{ {
unsigned i = 0; unsigned i = 100;
if (crtc >= adev->mode_info.num_crtc) if (crtc >= adev->mode_info.num_crtc)
return; return;
@ -182,14 +182,16 @@ static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc)
* wait for another frame. * wait for another frame.
*/ */
while (dce_v8_0_is_in_vblank(adev, crtc)) { while (dce_v8_0_is_in_vblank(adev, crtc)) {
if (i++ % 100 == 0) { if (i++ == 100) {
i = 0;
if (!dce_v8_0_is_counter_moving(adev, crtc)) if (!dce_v8_0_is_counter_moving(adev, crtc))
break; break;
} }
} }
while (!dce_v8_0_is_in_vblank(adev, crtc)) { while (!dce_v8_0_is_in_vblank(adev, crtc)) {
if (i++ % 100 == 0) { if (i++ == 100) {
i = 0;
if (!dce_v8_0_is_counter_moving(adev, crtc)) if (!dce_v8_0_is_counter_moving(adev, crtc))
break; break;
} }

View File

@ -95,7 +95,7 @@ static bool dce_virtual_is_display_hung(struct amdgpu_device *adev)
return false; return false;
} }
void dce_virtual_stop_mc_access(struct amdgpu_device *adev, static void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
struct amdgpu_mode_mc_save *save) struct amdgpu_mode_mc_save *save)
{ {
switch (adev->asic_type) { switch (adev->asic_type) {
@ -127,13 +127,13 @@ void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
return; return;
} }
void dce_virtual_resume_mc_access(struct amdgpu_device *adev, static void dce_virtual_resume_mc_access(struct amdgpu_device *adev,
struct amdgpu_mode_mc_save *save) struct amdgpu_mode_mc_save *save)
{ {
return; return;
} }
void dce_virtual_set_vga_render_state(struct amdgpu_device *adev, static void dce_virtual_set_vga_render_state(struct amdgpu_device *adev,
bool render) bool render)
{ {
return; return;

View File

@ -1,186 +0,0 @@
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/firmware.h>
#include "drmP.h"
#include "amdgpu.h"
#include "fiji_smum.h"
MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
static void fiji_dpm_set_funcs(struct amdgpu_device *adev);
static int fiji_dpm_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
fiji_dpm_set_funcs(adev);
return 0;
}
static int fiji_dpm_init_microcode(struct amdgpu_device *adev)
{
char fw_name[30] = "amdgpu/fiji_smc.bin";
int err;
err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
if (err)
goto out;
err = amdgpu_ucode_validate(adev->pm.fw);
out:
if (err) {
DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
release_firmware(adev->pm.fw);
adev->pm.fw = NULL;
}
return err;
}
static int fiji_dpm_sw_init(void *handle)
{
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
ret = fiji_dpm_init_microcode(adev);
if (ret)
return ret;
return 0;
}
static int fiji_dpm_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
release_firmware(adev->pm.fw);
adev->pm.fw = NULL;
return 0;
}
static int fiji_dpm_hw_init(void *handle)
{
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
mutex_lock(&adev->pm.mutex);
ret = fiji_smu_init(adev);
if (ret) {
DRM_ERROR("SMU initialization failed\n");
goto fail;
}
ret = fiji_smu_start(adev);
if (ret) {
DRM_ERROR("SMU start failed\n");
goto fail;
}
mutex_unlock(&adev->pm.mutex);
return 0;
fail:
adev->firmware.smu_load = false;
mutex_unlock(&adev->pm.mutex);
return -EINVAL;
}
static int fiji_dpm_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
mutex_lock(&adev->pm.mutex);
fiji_smu_fini(adev);
mutex_unlock(&adev->pm.mutex);
return 0;
}
static int fiji_dpm_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
fiji_dpm_hw_fini(adev);
return 0;
}
static int fiji_dpm_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
fiji_dpm_hw_init(adev);
return 0;
}
static int fiji_dpm_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
return 0;
}
static int fiji_dpm_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
return 0;
}
const struct amd_ip_funcs fiji_dpm_ip_funcs = {
.name = "fiji_dpm",
.early_init = fiji_dpm_early_init,
.late_init = NULL,
.sw_init = fiji_dpm_sw_init,
.sw_fini = fiji_dpm_sw_fini,
.hw_init = fiji_dpm_hw_init,
.hw_fini = fiji_dpm_hw_fini,
.suspend = fiji_dpm_suspend,
.resume = fiji_dpm_resume,
.is_idle = NULL,
.wait_for_idle = NULL,
.soft_reset = NULL,
.set_clockgating_state = fiji_dpm_set_clockgating_state,
.set_powergating_state = fiji_dpm_set_powergating_state,
};
static const struct amdgpu_dpm_funcs fiji_dpm_funcs = {
.get_temperature = NULL,
.pre_set_power_state = NULL,
.set_power_state = NULL,
.post_set_power_state = NULL,
.display_configuration_changed = NULL,
.get_sclk = NULL,
.get_mclk = NULL,
.print_power_state = NULL,
.debugfs_print_current_performance_level = NULL,
.force_performance_level = NULL,
.vblank_too_short = NULL,
.powergate_uvd = NULL,
};
static void fiji_dpm_set_funcs(struct amdgpu_device *adev)
{
if (NULL == adev->pm.funcs)
adev->pm.funcs = &fiji_dpm_funcs;
}

View File

@ -1,863 +0,0 @@
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/firmware.h>
#include "drmP.h"
#include "amdgpu.h"
#include "fiji_ppsmc.h"
#include "fiji_smum.h"
#include "smu_ucode_xfer_vi.h"
#include "amdgpu_ucode.h"
#include "smu/smu_7_1_3_d.h"
#include "smu/smu_7_1_3_sh_mask.h"
#define FIJI_SMC_SIZE 0x20000
static int fiji_set_smc_sram_address(struct amdgpu_device *adev, uint32_t smc_address, uint32_t limit)
{
uint32_t val;
if (smc_address & 3)
return -EINVAL;
if ((smc_address + 3) > limit)
return -EINVAL;
WREG32(mmSMC_IND_INDEX_0, smc_address);
val = RREG32(mmSMC_IND_ACCESS_CNTL);
val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
WREG32(mmSMC_IND_ACCESS_CNTL, val);
return 0;
}
static int fiji_copy_bytes_to_smc(struct amdgpu_device *adev, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit)
{
uint32_t addr;
uint32_t data, orig_data;
int result = 0;
uint32_t extra_shift;
unsigned long flags;
if (smc_start_address & 3)
return -EINVAL;
if ((smc_start_address + byte_count) > limit)
return -EINVAL;
addr = smc_start_address;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
while (byte_count >= 4) {
/* Bytes are written into the SMC addres space with the MSB first */
data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
result = fiji_set_smc_sram_address(adev, addr, limit);
if (result)
goto out;
WREG32(mmSMC_IND_DATA_0, data);
src += 4;
byte_count -= 4;
addr += 4;
}
if (0 != byte_count) {
/* Now write odd bytes left, do a read modify write cycle */
data = 0;
result = fiji_set_smc_sram_address(adev, addr, limit);
if (result)
goto out;
orig_data = RREG32(mmSMC_IND_DATA_0);
extra_shift = 8 * (4 - byte_count);
while (byte_count > 0) {
data = (data << 8) + *src++;
byte_count--;
}
data <<= extra_shift;
data |= (orig_data & ~((~0UL) << extra_shift));
result = fiji_set_smc_sram_address(adev, addr, limit);
if (result)
goto out;
WREG32(mmSMC_IND_DATA_0, data);
}
out:
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return result;
}
static int fiji_program_jump_on_start(struct amdgpu_device *adev)
{
static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
fiji_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
return 0;
}
static bool fiji_is_smc_ram_running(struct amdgpu_device *adev)
{
uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
}
static int wait_smu_response(struct amdgpu_device *adev)
{
int i;
uint32_t val;
for (i = 0; i < adev->usec_timeout; i++) {
val = RREG32(mmSMC_RESP_0);
if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
break;
udelay(1);
}
if (i == adev->usec_timeout)
return -EINVAL;
return 0;
}
static int fiji_send_msg_to_smc_offset(struct amdgpu_device *adev)
{
if (wait_smu_response(adev)) {
DRM_ERROR("Failed to send previous message\n");
return -EINVAL;
}
WREG32(mmSMC_MSG_ARG_0, 0x20000);
WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_Test);
if (wait_smu_response(adev)) {
DRM_ERROR("Failed to send message\n");
return -EINVAL;
}
return 0;
}
static int fiji_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
{
if (!fiji_is_smc_ram_running(adev))
{
return -EINVAL;
}
if (wait_smu_response(adev)) {
DRM_ERROR("Failed to send previous message\n");
return -EINVAL;
}
WREG32(mmSMC_MESSAGE_0, msg);
if (wait_smu_response(adev)) {
DRM_ERROR("Failed to send message\n");
return -EINVAL;
}
return 0;
}
static int fiji_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
PPSMC_Msg msg)
{
if (wait_smu_response(adev)) {
DRM_ERROR("Failed to send previous message\n");
return -EINVAL;
}
WREG32(mmSMC_MESSAGE_0, msg);
return 0;
}
static int fiji_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
PPSMC_Msg msg,
uint32_t parameter)
{
if (!fiji_is_smc_ram_running(adev))
return -EINVAL;
if (wait_smu_response(adev)) {
DRM_ERROR("Failed to send previous message\n");
return -EINVAL;
}
WREG32(mmSMC_MSG_ARG_0, parameter);
return fiji_send_msg_to_smc(adev, msg);
}
static int fiji_send_msg_to_smc_with_parameter_without_waiting(
struct amdgpu_device *adev,
PPSMC_Msg msg, uint32_t parameter)
{
if (wait_smu_response(adev)) {
DRM_ERROR("Failed to send previous message\n");
return -EINVAL;
}
WREG32(mmSMC_MSG_ARG_0, parameter);
return fiji_send_msg_to_smc_without_waiting(adev, msg);
}
#if 0 /* not used yet */
static int fiji_wait_for_smc_inactive(struct amdgpu_device *adev)
{
int i;
uint32_t val;
if (!fiji_is_smc_ram_running(adev))
return -EINVAL;
for (i = 0; i < adev->usec_timeout; i++) {
val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
break;
udelay(1);
}
if (i == adev->usec_timeout)
return -EINVAL;
return 0;
}
#endif
static int fiji_smu_upload_firmware_image(struct amdgpu_device *adev)
{
const struct smc_firmware_header_v1_0 *hdr;
uint32_t ucode_size;
uint32_t ucode_start_address;
const uint8_t *src;
uint32_t val;
uint32_t byte_count;
uint32_t *data;
unsigned long flags;
if (!adev->pm.fw)
return -EINVAL;
/* Skip SMC ucode loading on SR-IOV capable boards.
* vbios does this for us in asic_init in that case.
*/
if (adev->virtualization.supports_sr_iov)
return 0;
hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
amdgpu_ucode_print_smc_hdr(&hdr->header);
adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
src = (const uint8_t *)
(adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
if (ucode_size & 3) {
DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
return -EINVAL;
}
if (ucode_size > FIJI_SMC_SIZE) {
DRM_ERROR("SMC address is beyond the SMC RAM area\n");
return -EINVAL;
}
spin_lock_irqsave(&adev->smc_idx_lock, flags);
WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
val = RREG32(mmSMC_IND_ACCESS_CNTL);
val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
WREG32(mmSMC_IND_ACCESS_CNTL, val);
byte_count = ucode_size;
data = (uint32_t *)src;
for (; byte_count >= 4; data++, byte_count -= 4)
WREG32(mmSMC_IND_DATA_0, data[0]);
val = RREG32(mmSMC_IND_ACCESS_CNTL);
val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
WREG32(mmSMC_IND_ACCESS_CNTL, val);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return 0;
}
#if 0 /* not used yet */
static int fiji_read_smc_sram_dword(struct amdgpu_device *adev,
uint32_t smc_address,
uint32_t *value,
uint32_t limit)
{
int result;
unsigned long flags;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
result = fiji_set_smc_sram_address(adev, smc_address, limit);
if (result == 0)
*value = RREG32(mmSMC_IND_DATA_0);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return result;
}
static int fiji_write_smc_sram_dword(struct amdgpu_device *adev,
uint32_t smc_address,
uint32_t value,
uint32_t limit)
{
int result;
unsigned long flags;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
result = fiji_set_smc_sram_address(adev, smc_address, limit);
if (result == 0)
WREG32(mmSMC_IND_DATA_0, value);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return result;
}
static int fiji_smu_stop_smc(struct amdgpu_device *adev)
{
uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
return 0;
}
#endif
static enum AMDGPU_UCODE_ID fiji_convert_fw_type(uint32_t fw_type)
{
switch (fw_type) {
case UCODE_ID_SDMA0:
return AMDGPU_UCODE_ID_SDMA0;
case UCODE_ID_SDMA1:
return AMDGPU_UCODE_ID_SDMA1;
case UCODE_ID_CP_CE:
return AMDGPU_UCODE_ID_CP_CE;
case UCODE_ID_CP_PFP:
return AMDGPU_UCODE_ID_CP_PFP;
case UCODE_ID_CP_ME:
return AMDGPU_UCODE_ID_CP_ME;
case UCODE_ID_CP_MEC:
case UCODE_ID_CP_MEC_JT1:
case UCODE_ID_CP_MEC_JT2:
return AMDGPU_UCODE_ID_CP_MEC1;
case UCODE_ID_RLC_G:
return AMDGPU_UCODE_ID_RLC_G;
default:
DRM_ERROR("ucode type is out of range!\n");
return AMDGPU_UCODE_ID_MAXIMUM;
}
}
static int fiji_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
uint32_t fw_type,
struct SMU_Entry *entry)
{
enum AMDGPU_UCODE_ID id = fiji_convert_fw_type(fw_type);
struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
const struct gfx_firmware_header_v1_0 *header = NULL;
uint64_t gpu_addr;
uint32_t data_size;
if (ucode->fw == NULL)
return -EINVAL;
gpu_addr = ucode->mc_addr;
header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
data_size = le32_to_cpu(header->header.ucode_size_bytes);
if ((fw_type == UCODE_ID_CP_MEC_JT1) ||
(fw_type == UCODE_ID_CP_MEC_JT2)) {
gpu_addr += le32_to_cpu(header->jt_offset) << 2;
data_size = le32_to_cpu(header->jt_size) << 2;
}
entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
entry->id = (uint16_t)fw_type;
entry->image_addr_high = upper_32_bits(gpu_addr);
entry->image_addr_low = lower_32_bits(gpu_addr);
entry->meta_data_addr_high = 0;
entry->meta_data_addr_low = 0;
entry->data_size_byte = data_size;
entry->num_register_entries = 0;
if (fw_type == UCODE_ID_RLC_G)
entry->flags = 1;
else
entry->flags = 0;
return 0;
}
static int fiji_smu_request_load_fw(struct amdgpu_device *adev)
{
struct fiji_smu_private_data *private = (struct fiji_smu_private_data *)adev->smu.priv;
struct SMU_DRAMData_TOC *toc;
uint32_t fw_to_load;
WREG32_SMC(ixSOFT_REGISTERS_TABLE_28, 0);
fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_HI, private->smu_buffer_addr_high);
fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_LO, private->smu_buffer_addr_low);
toc = (struct SMU_DRAMData_TOC *)private->header;
toc->num_entries = 0;
toc->structure_version = 1;
if (!adev->firmware.smu_load)
return 0;
if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
&toc->entry[toc->num_entries++])) {
DRM_ERROR("Failed to get firmware entry for RLC\n");
return -EINVAL;
}
if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
&toc->entry[toc->num_entries++])) {
DRM_ERROR("Failed to get firmware entry for CE\n");
return -EINVAL;
}
if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
&toc->entry[toc->num_entries++])) {
DRM_ERROR("Failed to get firmware entry for PFP\n");
return -EINVAL;
}
if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
&toc->entry[toc->num_entries++])) {
DRM_ERROR("Failed to get firmware entry for ME\n");
return -EINVAL;
}
if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
&toc->entry[toc->num_entries++])) {
DRM_ERROR("Failed to get firmware entry for MEC\n");
return -EINVAL;
}
if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
&toc->entry[toc->num_entries++])) {
DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
return -EINVAL;
}
if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
&toc->entry[toc->num_entries++])) {
DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
return -EINVAL;
}
if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
&toc->entry[toc->num_entries++])) {
DRM_ERROR("Failed to get firmware entry for SDMA0\n");
return -EINVAL;
}
if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
&toc->entry[toc->num_entries++])) {
DRM_ERROR("Failed to get firmware entry for SDMA1\n");
return -EINVAL;
}
fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
fw_to_load = UCODE_ID_RLC_G_MASK |
UCODE_ID_SDMA0_MASK |
UCODE_ID_SDMA1_MASK |
UCODE_ID_CP_CE_MASK |
UCODE_ID_CP_ME_MASK |
UCODE_ID_CP_PFP_MASK |
UCODE_ID_CP_MEC_MASK;
if (fiji_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
DRM_ERROR("Fail to request SMU load ucode\n");
return -EINVAL;
}
return 0;
}
static uint32_t fiji_smu_get_mask_for_fw_type(uint32_t fw_type)
{
switch (fw_type) {
case AMDGPU_UCODE_ID_SDMA0:
return UCODE_ID_SDMA0_MASK;
case AMDGPU_UCODE_ID_SDMA1:
return UCODE_ID_SDMA1_MASK;
case AMDGPU_UCODE_ID_CP_CE:
return UCODE_ID_CP_CE_MASK;
case AMDGPU_UCODE_ID_CP_PFP:
return UCODE_ID_CP_PFP_MASK;
case AMDGPU_UCODE_ID_CP_ME:
return UCODE_ID_CP_ME_MASK;
case AMDGPU_UCODE_ID_CP_MEC1:
return UCODE_ID_CP_MEC_MASK;
case AMDGPU_UCODE_ID_CP_MEC2:
return UCODE_ID_CP_MEC_MASK;
case AMDGPU_UCODE_ID_RLC_G:
return UCODE_ID_RLC_G_MASK;
default:
DRM_ERROR("ucode type is out of range!\n");
return 0;
}
}
static int fiji_smu_check_fw_load_finish(struct amdgpu_device *adev,
uint32_t fw_type)
{
uint32_t fw_mask = fiji_smu_get_mask_for_fw_type(fw_type);
int i;
for (i = 0; i < adev->usec_timeout; i++) {
if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_28) & fw_mask))
break;
udelay(1);
}
if (i == adev->usec_timeout) {
DRM_ERROR("check firmware loading failed\n");
return -EINVAL;
}
return 0;
}
static int fiji_smu_start_in_protection_mode(struct amdgpu_device *adev)
{
int result;
uint32_t val;
int i;
/* Assert reset */
val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
result = fiji_smu_upload_firmware_image(adev);
if (result)
return result;
/* Clear status */
WREG32_SMC(ixSMU_STATUS, 0);
/* Enable clock */
val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
/* De-assert reset */
val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
/* Set SMU Auto Start */
val = RREG32_SMC(ixSMU_INPUT_DATA);
val = REG_SET_FIELD(val, SMU_INPUT_DATA, AUTO_START, 1);
WREG32_SMC(ixSMU_INPUT_DATA, val);
/* Clear firmware interrupt enable flag */
WREG32_SMC(ixFIRMWARE_FLAGS, 0);
for (i = 0; i < adev->usec_timeout; i++) {
val = RREG32_SMC(ixRCU_UC_EVENTS);
if (REG_GET_FIELD(val, RCU_UC_EVENTS, INTERRUPTS_ENABLED))
break;
udelay(1);
}
if (i == adev->usec_timeout) {
DRM_ERROR("Interrupt is not enabled by firmware\n");
return -EINVAL;
}
/* Call Test SMU message with 0x20000 offset
* to trigger SMU start
*/
fiji_send_msg_to_smc_offset(adev);
DRM_INFO("[FM]try triger smu start\n");
/* Wait for done bit to be set */
for (i = 0; i < adev->usec_timeout; i++) {
val = RREG32_SMC(ixSMU_STATUS);
if (REG_GET_FIELD(val, SMU_STATUS, SMU_DONE))
break;
udelay(1);
}
if (i == adev->usec_timeout) {
DRM_ERROR("Timeout for SMU start\n");
return -EINVAL;
}
/* Check pass/failed indicator */
val = RREG32_SMC(ixSMU_STATUS);
if (!REG_GET_FIELD(val, SMU_STATUS, SMU_PASS)) {
DRM_ERROR("SMU Firmware start failed\n");
return -EINVAL;
}
DRM_INFO("[FM]smu started\n");
/* Wait for firmware to initialize */
for (i = 0; i < adev->usec_timeout; i++) {
val = RREG32_SMC(ixFIRMWARE_FLAGS);
if(REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
break;
udelay(1);
}
if (i == adev->usec_timeout) {
DRM_ERROR("SMU firmware initialization failed\n");
return -EINVAL;
}
DRM_INFO("[FM]smu initialized\n");
return 0;
}
static int fiji_smu_start_in_non_protection_mode(struct amdgpu_device *adev)
{
int i, result;
uint32_t val;
/* wait for smc boot up */
for (i = 0; i < adev->usec_timeout; i++) {
val = RREG32_SMC(ixRCU_UC_EVENTS);
val = REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done);
if (val)
break;
udelay(1);
}
if (i == adev->usec_timeout) {
DRM_ERROR("SMC boot sequence is not completed\n");
return -EINVAL;
}
/* Clear firmware interrupt enable flag */
WREG32_SMC(ixFIRMWARE_FLAGS, 0);
/* Assert reset */
val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
result = fiji_smu_upload_firmware_image(adev);
if (result)
return result;
/* Set smc instruct start point at 0x0 */
fiji_program_jump_on_start(adev);
/* Enable clock */
val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
/* De-assert reset */
val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
/* Wait for firmware to initialize */
for (i = 0; i < adev->usec_timeout; i++) {
val = RREG32_SMC(ixFIRMWARE_FLAGS);
if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
break;
udelay(1);
}
if (i == adev->usec_timeout) {
DRM_ERROR("Timeout for SMC firmware initialization\n");
return -EINVAL;
}
return 0;
}
int fiji_smu_start(struct amdgpu_device *adev)
{
int result;
uint32_t val;
if (!fiji_is_smc_ram_running(adev)) {
val = RREG32_SMC(ixSMU_FIRMWARE);
if (!REG_GET_FIELD(val, SMU_FIRMWARE, SMU_MODE)) {
DRM_INFO("[FM]start smu in nonprotection mode\n");
result = fiji_smu_start_in_non_protection_mode(adev);
if (result)
return result;
} else {
DRM_INFO("[FM]start smu in protection mode\n");
result = fiji_smu_start_in_protection_mode(adev);
if (result)
return result;
}
}
return fiji_smu_request_load_fw(adev);
}
static const struct amdgpu_smumgr_funcs fiji_smumgr_funcs = {
.check_fw_load_finish = fiji_smu_check_fw_load_finish,
.request_smu_load_fw = NULL,
.request_smu_specific_fw = NULL,
};
int fiji_smu_init(struct amdgpu_device *adev)
{
struct fiji_smu_private_data *private;
uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
uint32_t smu_internal_buffer_size = 200*4096;
struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
uint64_t mc_addr;
void *toc_buf_ptr;
void *smu_buf_ptr;
int ret;
private = kzalloc(sizeof(struct fiji_smu_private_data), GFP_KERNEL);
if (NULL == private)
return -ENOMEM;
/* allocate firmware buffers */
if (adev->firmware.smu_load)
amdgpu_ucode_init_bo(adev);
adev->smu.priv = private;
adev->smu.fw_flags = 0;
/* Allocate FW image data structure and header buffer */
ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
NULL, NULL, toc_buf);
if (ret) {
DRM_ERROR("Failed to allocate memory for TOC buffer\n");
return -ENOMEM;
}
/* Allocate buffer for SMU internal buffer */
ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
NULL, NULL, smu_buf);
if (ret) {
DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
return -ENOMEM;
}
/* Retrieve GPU address for header buffer and internal buffer */
ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
if (ret) {
amdgpu_bo_unref(&adev->smu.toc_buf);
DRM_ERROR("Failed to reserve the TOC buffer\n");
return -EINVAL;
}
ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
if (ret) {
amdgpu_bo_unreserve(adev->smu.toc_buf);
amdgpu_bo_unref(&adev->smu.toc_buf);
DRM_ERROR("Failed to pin the TOC buffer\n");
return -EINVAL;
}
ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
if (ret) {
amdgpu_bo_unreserve(adev->smu.toc_buf);
amdgpu_bo_unref(&adev->smu.toc_buf);
DRM_ERROR("Failed to map the TOC buffer\n");
return -EINVAL;
}
amdgpu_bo_unreserve(adev->smu.toc_buf);
private->header_addr_low = lower_32_bits(mc_addr);
private->header_addr_high = upper_32_bits(mc_addr);
private->header = toc_buf_ptr;
ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
if (ret) {
amdgpu_bo_unref(&adev->smu.smu_buf);
amdgpu_bo_unref(&adev->smu.toc_buf);
DRM_ERROR("Failed to reserve the SMU internal buffer\n");
return -EINVAL;
}
ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
if (ret) {
amdgpu_bo_unreserve(adev->smu.smu_buf);
amdgpu_bo_unref(&adev->smu.smu_buf);
amdgpu_bo_unref(&adev->smu.toc_buf);
DRM_ERROR("Failed to pin the SMU internal buffer\n");
return -EINVAL;
}
ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
if (ret) {
amdgpu_bo_unreserve(adev->smu.smu_buf);
amdgpu_bo_unref(&adev->smu.smu_buf);
amdgpu_bo_unref(&adev->smu.toc_buf);
DRM_ERROR("Failed to map the SMU internal buffer\n");
return -EINVAL;
}
amdgpu_bo_unreserve(adev->smu.smu_buf);
private->smu_buffer_addr_low = lower_32_bits(mc_addr);
private->smu_buffer_addr_high = upper_32_bits(mc_addr);
adev->smu.smumgr_funcs = &fiji_smumgr_funcs;
return 0;
}
int fiji_smu_fini(struct amdgpu_device *adev)
{
amdgpu_bo_unref(&adev->smu.toc_buf);
amdgpu_bo_unref(&adev->smu.smu_buf);
kfree(adev->smu.priv);
adev->smu.priv = NULL;
if (adev->firmware.fw_buf)
amdgpu_ucode_fini_bo(adev);
return 0;
}

View File

@ -931,6 +931,123 @@ static u32 gfx_v6_0_get_rb_disabled(struct amdgpu_device *adev,
return data & mask; return data & mask;
} }
static void gfx_v6_0_raster_config(struct amdgpu_device *adev, u32 *rconf)
{
switch (adev->asic_type) {
case CHIP_TAHITI:
case CHIP_PITCAIRN:
*rconf |= RB_XSEL2(2) | RB_XSEL | PKR_MAP(2) | PKR_YSEL(1) |
SE_MAP(2) | SE_XSEL(2) | SE_YSEL(2);
break;
case CHIP_VERDE:
*rconf |= RB_XSEL | PKR_MAP(2) | PKR_YSEL(1);
break;
case CHIP_OLAND:
*rconf |= RB_YSEL;
break;
case CHIP_HAINAN:
*rconf |= 0x0;
break;
default:
DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
break;
}
}
static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev,
u32 raster_config, unsigned rb_mask,
unsigned num_rb)
{
unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
unsigned rb_per_se = num_rb / num_se;
unsigned se_mask[4];
unsigned se;
se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
for (se = 0; se < num_se; se++) {
unsigned raster_config_se = raster_config;
unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
int idx = (se / 2) * 2;
if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
raster_config_se &= ~SE_MAP_MASK;
if (!se_mask[idx]) {
raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
} else {
raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
}
}
pkr0_mask &= rb_mask;
pkr1_mask &= rb_mask;
if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
raster_config_se &= ~PKR_MAP_MASK;
if (!pkr0_mask) {
raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
} else {
raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
}
}
if (rb_per_se >= 2) {
unsigned rb0_mask = 1 << (se * rb_per_se);
unsigned rb1_mask = rb0_mask << 1;
rb0_mask &= rb_mask;
rb1_mask &= rb_mask;
if (!rb0_mask || !rb1_mask) {
raster_config_se &= ~RB_MAP_PKR0_MASK;
if (!rb0_mask) {
raster_config_se |=
RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
} else {
raster_config_se |=
RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
}
}
if (rb_per_se > 2) {
rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
rb1_mask = rb0_mask << 1;
rb0_mask &= rb_mask;
rb1_mask &= rb_mask;
if (!rb0_mask || !rb1_mask) {
raster_config_se &= ~RB_MAP_PKR1_MASK;
if (!rb0_mask) {
raster_config_se |=
RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
} else {
raster_config_se |=
RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
}
}
}
}
/* GRBM_GFX_INDEX has a different offset on SI */
gfx_v6_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
WREG32(PA_SC_RASTER_CONFIG, raster_config_se);
}
/* GRBM_GFX_INDEX has a different offset on SI */
gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
}
static void gfx_v6_0_setup_rb(struct amdgpu_device *adev, static void gfx_v6_0_setup_rb(struct amdgpu_device *adev,
u32 se_num, u32 sh_per_se, u32 se_num, u32 sh_per_se,
u32 max_rb_num_per_se) u32 max_rb_num_per_se)
@ -939,6 +1056,7 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev,
u32 data, mask; u32 data, mask;
u32 disabled_rbs = 0; u32 disabled_rbs = 0;
u32 enabled_rbs = 0; u32 enabled_rbs = 0;
unsigned num_rb_pipes;
mutex_lock(&adev->grbm_idx_mutex); mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < se_num; i++) { for (i = 0; i < se_num; i++) {
@ -961,6 +1079,9 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev,
adev->gfx.config.backend_enable_mask = enabled_rbs; adev->gfx.config.backend_enable_mask = enabled_rbs;
adev->gfx.config.num_rbs = hweight32(enabled_rbs); adev->gfx.config.num_rbs = hweight32(enabled_rbs);
num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
adev->gfx.config.max_shader_engines, 16);
mutex_lock(&adev->grbm_idx_mutex); mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < se_num; i++) { for (i = 0; i < se_num; i++) {
gfx_v6_0_select_se_sh(adev, i, 0xffffffff, 0xffffffff); gfx_v6_0_select_se_sh(adev, i, 0xffffffff, 0xffffffff);
@ -980,7 +1101,15 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev,
} }
enabled_rbs >>= 2; enabled_rbs >>= 2;
} }
WREG32(PA_SC_RASTER_CONFIG, data); gfx_v6_0_raster_config(adev, &data);
if (!adev->gfx.config.backend_enable_mask ||
adev->gfx.config.num_rbs >= num_rb_pipes)
WREG32(PA_SC_RASTER_CONFIG, data);
else
gfx_v6_0_write_harvested_raster_configs(adev, data,
adev->gfx.config.backend_enable_mask,
num_rb_pipes);
} }
gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex); mutex_unlock(&adev->grbm_idx_mutex);

View File

@ -1645,6 +1645,147 @@ static u32 gfx_v7_0_get_rb_active_bitmap(struct amdgpu_device *adev)
return (~data) & mask; return (~data) & mask;
} }
static void
gfx_v7_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
{
switch (adev->asic_type) {
case CHIP_BONAIRE:
*rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
SE_XSEL(1) | SE_YSEL(1);
*rconf1 |= 0x0;
break;
case CHIP_HAWAII:
*rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
RB_XSEL2(1) | PKR_MAP(2) | PKR_XSEL(1) |
PKR_YSEL(1) | SE_MAP(2) | SE_XSEL(2) |
SE_YSEL(3);
*rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
SE_PAIR_YSEL(2);
break;
case CHIP_KAVERI:
*rconf |= RB_MAP_PKR0(2);
*rconf1 |= 0x0;
break;
case CHIP_KABINI:
case CHIP_MULLINS:
*rconf |= 0x0;
*rconf1 |= 0x0;
break;
default:
DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
break;
}
}
static void
gfx_v7_0_write_harvested_raster_configs(struct amdgpu_device *adev,
u32 raster_config, u32 raster_config_1,
unsigned rb_mask, unsigned num_rb)
{
unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
unsigned rb_per_se = num_rb / num_se;
unsigned se_mask[4];
unsigned se;
se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
(!se_mask[2] && !se_mask[3]))) {
raster_config_1 &= ~SE_PAIR_MAP_MASK;
if (!se_mask[0] && !se_mask[1]) {
raster_config_1 |=
SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
} else {
raster_config_1 |=
SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
}
}
for (se = 0; se < num_se; se++) {
unsigned raster_config_se = raster_config;
unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
int idx = (se / 2) * 2;
if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
raster_config_se &= ~SE_MAP_MASK;
if (!se_mask[idx]) {
raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
} else {
raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
}
}
pkr0_mask &= rb_mask;
pkr1_mask &= rb_mask;
if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
raster_config_se &= ~PKR_MAP_MASK;
if (!pkr0_mask) {
raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
} else {
raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
}
}
if (rb_per_se >= 2) {
unsigned rb0_mask = 1 << (se * rb_per_se);
unsigned rb1_mask = rb0_mask << 1;
rb0_mask &= rb_mask;
rb1_mask &= rb_mask;
if (!rb0_mask || !rb1_mask) {
raster_config_se &= ~RB_MAP_PKR0_MASK;
if (!rb0_mask) {
raster_config_se |=
RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
} else {
raster_config_se |=
RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
}
}
if (rb_per_se > 2) {
rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
rb1_mask = rb0_mask << 1;
rb0_mask &= rb_mask;
rb1_mask &= rb_mask;
if (!rb0_mask || !rb1_mask) {
raster_config_se &= ~RB_MAP_PKR1_MASK;
if (!rb0_mask) {
raster_config_se |=
RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
} else {
raster_config_se |=
RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
}
}
}
}
/* GRBM_GFX_INDEX has a different offset on CI+ */
gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
}
/* GRBM_GFX_INDEX has a different offset on CI+ */
gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
}
/** /**
* gfx_v7_0_setup_rb - setup the RBs on the asic * gfx_v7_0_setup_rb - setup the RBs on the asic
* *
@ -1658,9 +1799,11 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
{ {
int i, j; int i, j;
u32 data; u32 data;
u32 raster_config = 0, raster_config_1 = 0;
u32 active_rbs = 0; u32 active_rbs = 0;
u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
adev->gfx.config.max_sh_per_se; adev->gfx.config.max_sh_per_se;
unsigned num_rb_pipes;
mutex_lock(&adev->grbm_idx_mutex); mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
@ -1672,10 +1815,25 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
} }
} }
gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
adev->gfx.config.backend_enable_mask = active_rbs; adev->gfx.config.backend_enable_mask = active_rbs;
adev->gfx.config.num_rbs = hweight32(active_rbs); adev->gfx.config.num_rbs = hweight32(active_rbs);
num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
adev->gfx.config.max_shader_engines, 16);
gfx_v7_0_raster_config(adev, &raster_config, &raster_config_1);
if (!adev->gfx.config.backend_enable_mask ||
adev->gfx.config.num_rbs >= num_rb_pipes) {
WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
} else {
gfx_v7_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
adev->gfx.config.backend_enable_mask,
num_rb_pipes);
}
mutex_unlock(&adev->grbm_idx_mutex);
} }
/** /**

View File

@ -3492,13 +3492,163 @@ static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev)
return (~data) & mask; return (~data) & mask;
} }
static void
gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
{
switch (adev->asic_type) {
case CHIP_FIJI:
*rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
RB_XSEL2(1) | PKR_MAP(2) |
PKR_XSEL(1) | PKR_YSEL(1) |
SE_MAP(2) | SE_XSEL(2) | SE_YSEL(3);
*rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
SE_PAIR_YSEL(2);
break;
case CHIP_TONGA:
case CHIP_POLARIS10:
*rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
SE_XSEL(1) | SE_YSEL(1);
*rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(2) |
SE_PAIR_YSEL(2);
break;
case CHIP_TOPAZ:
case CHIP_CARRIZO:
*rconf |= RB_MAP_PKR0(2);
*rconf1 |= 0x0;
break;
case CHIP_POLARIS11:
*rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
SE_XSEL(1) | SE_YSEL(1);
*rconf1 |= 0x0;
break;
case CHIP_STONEY:
*rconf |= 0x0;
*rconf1 |= 0x0;
break;
default:
DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
break;
}
}
static void
gfx_v8_0_write_harvested_raster_configs(struct amdgpu_device *adev,
u32 raster_config, u32 raster_config_1,
unsigned rb_mask, unsigned num_rb)
{
unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
unsigned rb_per_se = num_rb / num_se;
unsigned se_mask[4];
unsigned se;
se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
(!se_mask[2] && !se_mask[3]))) {
raster_config_1 &= ~SE_PAIR_MAP_MASK;
if (!se_mask[0] && !se_mask[1]) {
raster_config_1 |=
SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
} else {
raster_config_1 |=
SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
}
}
for (se = 0; se < num_se; se++) {
unsigned raster_config_se = raster_config;
unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
int idx = (se / 2) * 2;
if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
raster_config_se &= ~SE_MAP_MASK;
if (!se_mask[idx]) {
raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
} else {
raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
}
}
pkr0_mask &= rb_mask;
pkr1_mask &= rb_mask;
if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
raster_config_se &= ~PKR_MAP_MASK;
if (!pkr0_mask) {
raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
} else {
raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
}
}
if (rb_per_se >= 2) {
unsigned rb0_mask = 1 << (se * rb_per_se);
unsigned rb1_mask = rb0_mask << 1;
rb0_mask &= rb_mask;
rb1_mask &= rb_mask;
if (!rb0_mask || !rb1_mask) {
raster_config_se &= ~RB_MAP_PKR0_MASK;
if (!rb0_mask) {
raster_config_se |=
RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
} else {
raster_config_se |=
RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
}
}
if (rb_per_se > 2) {
rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
rb1_mask = rb0_mask << 1;
rb0_mask &= rb_mask;
rb1_mask &= rb_mask;
if (!rb0_mask || !rb1_mask) {
raster_config_se &= ~RB_MAP_PKR1_MASK;
if (!rb0_mask) {
raster_config_se |=
RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
} else {
raster_config_se |=
RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
}
}
}
}
/* GRBM_GFX_INDEX has a different offset on VI */
gfx_v8_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
}
/* GRBM_GFX_INDEX has a different offset on VI */
gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
}
static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
{ {
int i, j; int i, j;
u32 data; u32 data;
u32 raster_config = 0, raster_config_1 = 0;
u32 active_rbs = 0; u32 active_rbs = 0;
u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
adev->gfx.config.max_sh_per_se; adev->gfx.config.max_sh_per_se;
unsigned num_rb_pipes;
mutex_lock(&adev->grbm_idx_mutex); mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
@ -3510,10 +3660,26 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
} }
} }
gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
adev->gfx.config.backend_enable_mask = active_rbs; adev->gfx.config.backend_enable_mask = active_rbs;
adev->gfx.config.num_rbs = hweight32(active_rbs); adev->gfx.config.num_rbs = hweight32(active_rbs);
num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
adev->gfx.config.max_shader_engines, 16);
gfx_v8_0_raster_config(adev, &raster_config, &raster_config_1);
if (!adev->gfx.config.backend_enable_mask ||
adev->gfx.config.num_rbs >= num_rb_pipes) {
WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
} else {
gfx_v8_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
adev->gfx.config.backend_enable_mask,
num_rb_pipes);
}
mutex_unlock(&adev->grbm_idx_mutex);
} }
/** /**
@ -5817,6 +5983,76 @@ static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
return 0; return 0;
} }
static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
enum amd_clockgating_state state)
{
uint32_t msg_id, pp_state;
void *pp_handle = adev->powerplay.pp_handle;
if (state == AMD_CG_STATE_UNGATE)
pp_state = 0;
else
pp_state = PP_STATE_CG | PP_STATE_LS;
msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
PP_BLOCK_GFX_CG,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
amd_set_clockgating_by_smu(pp_handle, msg_id);
msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
PP_BLOCK_GFX_MG,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
amd_set_clockgating_by_smu(pp_handle, msg_id);
return 0;
}
static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
enum amd_clockgating_state state)
{
uint32_t msg_id, pp_state;
void *pp_handle = adev->powerplay.pp_handle;
if (state == AMD_CG_STATE_UNGATE)
pp_state = 0;
else
pp_state = PP_STATE_CG | PP_STATE_LS;
msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
PP_BLOCK_GFX_CG,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
amd_set_clockgating_by_smu(pp_handle, msg_id);
msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
PP_BLOCK_GFX_3D,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
amd_set_clockgating_by_smu(pp_handle, msg_id);
msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
PP_BLOCK_GFX_MG,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
amd_set_clockgating_by_smu(pp_handle, msg_id);
msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
PP_BLOCK_GFX_RLC,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
amd_set_clockgating_by_smu(pp_handle, msg_id);
msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
PP_BLOCK_GFX_CP,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
amd_set_clockgating_by_smu(pp_handle, msg_id);
return 0;
}
static int gfx_v8_0_set_clockgating_state(void *handle, static int gfx_v8_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state) enum amd_clockgating_state state)
{ {
@ -5829,6 +6065,13 @@ static int gfx_v8_0_set_clockgating_state(void *handle,
gfx_v8_0_update_gfx_clock_gating(adev, gfx_v8_0_update_gfx_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false); state == AMD_CG_STATE_GATE ? true : false);
break; break;
case CHIP_TONGA:
gfx_v8_0_tonga_update_gfx_clock_gating(adev, state);
break;
case CHIP_POLARIS10:
case CHIP_POLARIS11:
gfx_v8_0_polaris_update_gfx_clock_gating(adev, state);
break;
default: default:
break; break;
} }

View File

@ -269,8 +269,10 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev)
/* Skip MC ucode loading on SR-IOV capable boards. /* Skip MC ucode loading on SR-IOV capable boards.
* vbios does this for us in asic_init in that case. * vbios does this for us in asic_init in that case.
* Skip MC ucode loading on VF, because hypervisor will do that
* for this adaptor.
*/ */
if (adev->virtualization.supports_sr_iov) if (amdgpu_sriov_bios(adev))
return 0; return 0;
hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data; hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;

View File

@ -1,200 +0,0 @@
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/firmware.h>
#include "drmP.h"
#include "amdgpu.h"
#include "iceland_smum.h"
MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
static void iceland_dpm_set_funcs(struct amdgpu_device *adev);
static int iceland_dpm_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
iceland_dpm_set_funcs(adev);
return 0;
}
static int iceland_dpm_init_microcode(struct amdgpu_device *adev)
{
char fw_name[30] = "amdgpu/topaz_smc.bin";
int err;
err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
if (err)
goto out;
err = amdgpu_ucode_validate(adev->pm.fw);
out:
if (err) {
DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
release_firmware(adev->pm.fw);
adev->pm.fw = NULL;
}
return err;
}
static int iceland_dpm_sw_init(void *handle)
{
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
ret = iceland_dpm_init_microcode(adev);
if (ret)
return ret;
return 0;
}
static int iceland_dpm_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
release_firmware(adev->pm.fw);
adev->pm.fw = NULL;
return 0;
}
static int iceland_dpm_hw_init(void *handle)
{
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
mutex_lock(&adev->pm.mutex);
/* smu init only needs to be called at startup, not resume.
* It should be in sw_init, but requires the fw info gathered
* in sw_init from other IP modules.
*/
ret = iceland_smu_init(adev);
if (ret) {
DRM_ERROR("SMU initialization failed\n");
goto fail;
}
ret = iceland_smu_start(adev);
if (ret) {
DRM_ERROR("SMU start failed\n");
goto fail;
}
mutex_unlock(&adev->pm.mutex);
return 0;
fail:
adev->firmware.smu_load = false;
mutex_unlock(&adev->pm.mutex);
return -EINVAL;
}
static int iceland_dpm_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
mutex_lock(&adev->pm.mutex);
/* smu fini only needs to be called at teardown, not suspend.
* It should be in sw_fini, but we put it here for symmetry
* with smu init.
*/
iceland_smu_fini(adev);
mutex_unlock(&adev->pm.mutex);
return 0;
}
static int iceland_dpm_suspend(void *handle)
{
return 0;
}
static int iceland_dpm_resume(void *handle)
{
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
mutex_lock(&adev->pm.mutex);
ret = iceland_smu_start(adev);
if (ret) {
DRM_ERROR("SMU start failed\n");
goto fail;
}
fail:
mutex_unlock(&adev->pm.mutex);
return ret;
}
static int iceland_dpm_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
return 0;
}
static int iceland_dpm_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
return 0;
}
const struct amd_ip_funcs iceland_dpm_ip_funcs = {
.name = "iceland_dpm",
.early_init = iceland_dpm_early_init,
.late_init = NULL,
.sw_init = iceland_dpm_sw_init,
.sw_fini = iceland_dpm_sw_fini,
.hw_init = iceland_dpm_hw_init,
.hw_fini = iceland_dpm_hw_fini,
.suspend = iceland_dpm_suspend,
.resume = iceland_dpm_resume,
.is_idle = NULL,
.wait_for_idle = NULL,
.soft_reset = NULL,
.set_clockgating_state = iceland_dpm_set_clockgating_state,
.set_powergating_state = iceland_dpm_set_powergating_state,
};
static const struct amdgpu_dpm_funcs iceland_dpm_funcs = {
.get_temperature = NULL,
.pre_set_power_state = NULL,
.set_power_state = NULL,
.post_set_power_state = NULL,
.display_configuration_changed = NULL,
.get_sclk = NULL,
.get_mclk = NULL,
.print_power_state = NULL,
.debugfs_print_current_performance_level = NULL,
.force_performance_level = NULL,
.vblank_too_short = NULL,
.powergate_uvd = NULL,
};
static void iceland_dpm_set_funcs(struct amdgpu_device *adev)
{
if (NULL == adev->pm.funcs)
adev->pm.funcs = &iceland_dpm_funcs;
}

View File

@ -1,677 +0,0 @@
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/firmware.h>
#include "drmP.h"
#include "amdgpu.h"
#include "ppsmc.h"
#include "iceland_smum.h"
#include "smu_ucode_xfer_vi.h"
#include "amdgpu_ucode.h"
#include "smu/smu_7_1_1_d.h"
#include "smu/smu_7_1_1_sh_mask.h"
#define ICELAND_SMC_SIZE 0x20000
static int iceland_set_smc_sram_address(struct amdgpu_device *adev,
uint32_t smc_address, uint32_t limit)
{
uint32_t val;
if (smc_address & 3)
return -EINVAL;
if ((smc_address + 3) > limit)
return -EINVAL;
WREG32(mmSMC_IND_INDEX_0, smc_address);
val = RREG32(mmSMC_IND_ACCESS_CNTL);
val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
WREG32(mmSMC_IND_ACCESS_CNTL, val);
return 0;
}
static int iceland_copy_bytes_to_smc(struct amdgpu_device *adev,
uint32_t smc_start_address,
const uint8_t *src,
uint32_t byte_count, uint32_t limit)
{
uint32_t addr;
uint32_t data, orig_data;
int result = 0;
uint32_t extra_shift;
unsigned long flags;
if (smc_start_address & 3)
return -EINVAL;
if ((smc_start_address + byte_count) > limit)
return -EINVAL;
addr = smc_start_address;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
while (byte_count >= 4) {
/* Bytes are written into the SMC addres space with the MSB first */
data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
result = iceland_set_smc_sram_address(adev, addr, limit);
if (result)
goto out;
WREG32(mmSMC_IND_DATA_0, data);
src += 4;
byte_count -= 4;
addr += 4;
}
if (0 != byte_count) {
/* Now write odd bytes left, do a read modify write cycle */
data = 0;
result = iceland_set_smc_sram_address(adev, addr, limit);
if (result)
goto out;
orig_data = RREG32(mmSMC_IND_DATA_0);
extra_shift = 8 * (4 - byte_count);
while (byte_count > 0) {
data = (data << 8) + *src++;
byte_count--;
}
data <<= extra_shift;
data |= (orig_data & ~((~0UL) << extra_shift));
result = iceland_set_smc_sram_address(adev, addr, limit);
if (result)
goto out;
WREG32(mmSMC_IND_DATA_0, data);
}
out:
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return result;
}
static void iceland_start_smc(struct amdgpu_device *adev)
{
uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
}
static void iceland_reset_smc(struct amdgpu_device *adev)
{
uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
}
static int iceland_program_jump_on_start(struct amdgpu_device *adev)
{
static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
iceland_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
return 0;
}
static void iceland_stop_smc_clock(struct amdgpu_device *adev)
{
uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
}
static void iceland_start_smc_clock(struct amdgpu_device *adev)
{
uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
}
static bool iceland_is_smc_ram_running(struct amdgpu_device *adev)
{
uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
}
static int wait_smu_response(struct amdgpu_device *adev)
{
int i;
uint32_t val;
for (i = 0; i < adev->usec_timeout; i++) {
val = RREG32(mmSMC_RESP_0);
if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
break;
udelay(1);
}
if (i == adev->usec_timeout)
return -EINVAL;
return 0;
}
static int iceland_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
{
if (!iceland_is_smc_ram_running(adev))
return -EINVAL;
if (wait_smu_response(adev)) {
DRM_ERROR("Failed to send previous message\n");
return -EINVAL;
}
WREG32(mmSMC_MESSAGE_0, msg);
if (wait_smu_response(adev)) {
DRM_ERROR("Failed to send message\n");
return -EINVAL;
}
return 0;
}
static int iceland_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
PPSMC_Msg msg)
{
if (!iceland_is_smc_ram_running(adev))
return -EINVAL;
if (wait_smu_response(adev)) {
DRM_ERROR("Failed to send previous message\n");
return -EINVAL;
}
WREG32(mmSMC_MESSAGE_0, msg);
return 0;
}
static int iceland_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
PPSMC_Msg msg,
uint32_t parameter)
{
WREG32(mmSMC_MSG_ARG_0, parameter);
return iceland_send_msg_to_smc(adev, msg);
}
static int iceland_send_msg_to_smc_with_parameter_without_waiting(
struct amdgpu_device *adev,
PPSMC_Msg msg, uint32_t parameter)
{
WREG32(mmSMC_MSG_ARG_0, parameter);
return iceland_send_msg_to_smc_without_waiting(adev, msg);
}
#if 0 /* not used yet */
static int iceland_wait_for_smc_inactive(struct amdgpu_device *adev)
{
int i;
uint32_t val;
if (!iceland_is_smc_ram_running(adev))
return -EINVAL;
for (i = 0; i < adev->usec_timeout; i++) {
val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
break;
udelay(1);
}
if (i == adev->usec_timeout)
return -EINVAL;
return 0;
}
#endif
static int iceland_smu_upload_firmware_image(struct amdgpu_device *adev)
{
const struct smc_firmware_header_v1_0 *hdr;
uint32_t ucode_size;
uint32_t ucode_start_address;
const uint8_t *src;
uint32_t val;
uint32_t byte_count;
uint32_t data;
unsigned long flags;
int i;
if (!adev->pm.fw)
return -EINVAL;
/* Skip SMC ucode loading on SR-IOV capable boards.
* vbios does this for us in asic_init in that case.
*/
if (adev->virtualization.supports_sr_iov)
return 0;
hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
amdgpu_ucode_print_smc_hdr(&hdr->header);
adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
src = (const uint8_t *)
(adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
if (ucode_size & 3) {
DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
return -EINVAL;
}
if (ucode_size > ICELAND_SMC_SIZE) {
DRM_ERROR("SMC address is beyond the SMC RAM area\n");
return -EINVAL;
}
for (i = 0; i < adev->usec_timeout; i++) {
val = RREG32_SMC(ixRCU_UC_EVENTS);
if (REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done) == 0)
break;
udelay(1);
}
val = RREG32_SMC(ixSMC_SYSCON_MISC_CNTL);
WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, val | 1);
iceland_stop_smc_clock(adev);
iceland_reset_smc(adev);
spin_lock_irqsave(&adev->smc_idx_lock, flags);
WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
val = RREG32(mmSMC_IND_ACCESS_CNTL);
val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
WREG32(mmSMC_IND_ACCESS_CNTL, val);
byte_count = ucode_size;
while (byte_count >= 4) {
data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
WREG32(mmSMC_IND_DATA_0, data);
src += 4;
byte_count -= 4;
}
val = RREG32(mmSMC_IND_ACCESS_CNTL);
val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
WREG32(mmSMC_IND_ACCESS_CNTL, val);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return 0;
}
#if 0 /* not used yet */
static int iceland_read_smc_sram_dword(struct amdgpu_device *adev,
uint32_t smc_address,
uint32_t *value,
uint32_t limit)
{
int result;
unsigned long flags;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
result = iceland_set_smc_sram_address(adev, smc_address, limit);
if (result == 0)
*value = RREG32(mmSMC_IND_DATA_0);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return result;
}
static int iceland_write_smc_sram_dword(struct amdgpu_device *adev,
uint32_t smc_address,
uint32_t value,
uint32_t limit)
{
int result;
unsigned long flags;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
result = iceland_set_smc_sram_address(adev, smc_address, limit);
if (result == 0)
WREG32(mmSMC_IND_DATA_0, value);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return result;
}
static int iceland_smu_stop_smc(struct amdgpu_device *adev)
{
iceland_reset_smc(adev);
iceland_stop_smc_clock(adev);
return 0;
}
#endif
static int iceland_smu_start_smc(struct amdgpu_device *adev)
{
int i;
uint32_t val;
iceland_program_jump_on_start(adev);
iceland_start_smc_clock(adev);
iceland_start_smc(adev);
for (i = 0; i < adev->usec_timeout; i++) {
val = RREG32_SMC(ixFIRMWARE_FLAGS);
if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED) == 1)
break;
udelay(1);
}
return 0;
}
static enum AMDGPU_UCODE_ID iceland_convert_fw_type(uint32_t fw_type)
{
switch (fw_type) {
case UCODE_ID_SDMA0:
return AMDGPU_UCODE_ID_SDMA0;
case UCODE_ID_SDMA1:
return AMDGPU_UCODE_ID_SDMA1;
case UCODE_ID_CP_CE:
return AMDGPU_UCODE_ID_CP_CE;
case UCODE_ID_CP_PFP:
return AMDGPU_UCODE_ID_CP_PFP;
case UCODE_ID_CP_ME:
return AMDGPU_UCODE_ID_CP_ME;
case UCODE_ID_CP_MEC:
case UCODE_ID_CP_MEC_JT1:
return AMDGPU_UCODE_ID_CP_MEC1;
case UCODE_ID_CP_MEC_JT2:
return AMDGPU_UCODE_ID_CP_MEC2;
case UCODE_ID_RLC_G:
return AMDGPU_UCODE_ID_RLC_G;
default:
DRM_ERROR("ucode type is out of range!\n");
return AMDGPU_UCODE_ID_MAXIMUM;
}
}
static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type)
{
switch (fw_type) {
case AMDGPU_UCODE_ID_SDMA0:
return UCODE_ID_SDMA0_MASK;
case AMDGPU_UCODE_ID_SDMA1:
return UCODE_ID_SDMA1_MASK;
case AMDGPU_UCODE_ID_CP_CE:
return UCODE_ID_CP_CE_MASK;
case AMDGPU_UCODE_ID_CP_PFP:
return UCODE_ID_CP_PFP_MASK;
case AMDGPU_UCODE_ID_CP_ME:
return UCODE_ID_CP_ME_MASK;
case AMDGPU_UCODE_ID_CP_MEC1:
return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK;
case AMDGPU_UCODE_ID_CP_MEC2:
return UCODE_ID_CP_MEC_MASK;
case AMDGPU_UCODE_ID_RLC_G:
return UCODE_ID_RLC_G_MASK;
default:
DRM_ERROR("ucode type is out of range!\n");
return 0;
}
}
static int iceland_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
uint32_t fw_type,
struct SMU_Entry *entry)
{
enum AMDGPU_UCODE_ID id = iceland_convert_fw_type(fw_type);
struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
const struct gfx_firmware_header_v1_0 *header = NULL;
uint64_t gpu_addr;
uint32_t data_size;
if (ucode->fw == NULL)
return -EINVAL;
gpu_addr = ucode->mc_addr;
header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
data_size = le32_to_cpu(header->header.ucode_size_bytes);
entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
entry->id = (uint16_t)fw_type;
entry->image_addr_high = upper_32_bits(gpu_addr);
entry->image_addr_low = lower_32_bits(gpu_addr);
entry->meta_data_addr_high = 0;
entry->meta_data_addr_low = 0;
entry->data_size_byte = data_size;
entry->num_register_entries = 0;
entry->flags = 0;
return 0;
}
static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
{
struct iceland_smu_private_data *private = (struct iceland_smu_private_data *)adev->smu.priv;
struct SMU_DRAMData_TOC *toc;
uint32_t fw_to_load;
toc = (struct SMU_DRAMData_TOC *)private->header;
toc->num_entries = 0;
toc->structure_version = 1;
if (!adev->firmware.smu_load)
return 0;
if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
&toc->entry[toc->num_entries++])) {
DRM_ERROR("Failed to get firmware entry for RLC\n");
return -EINVAL;
}
if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
&toc->entry[toc->num_entries++])) {
DRM_ERROR("Failed to get firmware entry for CE\n");
return -EINVAL;
}
if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
&toc->entry[toc->num_entries++])) {
DRM_ERROR("Failed to get firmware entry for PFP\n");
return -EINVAL;
}
if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
&toc->entry[toc->num_entries++])) {
DRM_ERROR("Failed to get firmware entry for ME\n");
return -EINVAL;
}
if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
&toc->entry[toc->num_entries++])) {
DRM_ERROR("Failed to get firmware entry for MEC\n");
return -EINVAL;
}
if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
&toc->entry[toc->num_entries++])) {
DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
return -EINVAL;
}
if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
&toc->entry[toc->num_entries++])) {
DRM_ERROR("Failed to get firmware entry for SDMA0\n");
return -EINVAL;
}
if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
&toc->entry[toc->num_entries++])) {
DRM_ERROR("Failed to get firmware entry for SDMA1\n");
return -EINVAL;
}
iceland_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
iceland_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
fw_to_load = UCODE_ID_RLC_G_MASK |
UCODE_ID_SDMA0_MASK |
UCODE_ID_SDMA1_MASK |
UCODE_ID_CP_CE_MASK |
UCODE_ID_CP_ME_MASK |
UCODE_ID_CP_PFP_MASK |
UCODE_ID_CP_MEC_MASK |
UCODE_ID_CP_MEC_JT1_MASK;
if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
DRM_ERROR("Fail to request SMU load ucode\n");
return -EINVAL;
}
return 0;
}
static int iceland_smu_check_fw_load_finish(struct amdgpu_device *adev,
uint32_t fw_type)
{
uint32_t fw_mask = iceland_smu_get_mask_for_fw_type(fw_type);
int i;
for (i = 0; i < adev->usec_timeout; i++) {
if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_27) & fw_mask))
break;
udelay(1);
}
if (i == adev->usec_timeout) {
DRM_ERROR("check firmware loading failed\n");
return -EINVAL;
}
return 0;
}
int iceland_smu_start(struct amdgpu_device *adev)
{
int result;
result = iceland_smu_upload_firmware_image(adev);
if (result)
return result;
result = iceland_smu_start_smc(adev);
if (result)
return result;
return iceland_smu_request_load_fw(adev);
}
static const struct amdgpu_smumgr_funcs iceland_smumgr_funcs = {
.check_fw_load_finish = iceland_smu_check_fw_load_finish,
.request_smu_load_fw = NULL,
.request_smu_specific_fw = NULL,
};
int iceland_smu_init(struct amdgpu_device *adev)
{
struct iceland_smu_private_data *private;
uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
uint64_t mc_addr;
void *toc_buf_ptr;
int ret;
private = kzalloc(sizeof(struct iceland_smu_private_data), GFP_KERNEL);
if (NULL == private)
return -ENOMEM;
/* allocate firmware buffers */
if (adev->firmware.smu_load)
amdgpu_ucode_init_bo(adev);
adev->smu.priv = private;
adev->smu.fw_flags = 0;
/* Allocate FW image data structure and header buffer */
ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
NULL, NULL, toc_buf);
if (ret) {
DRM_ERROR("Failed to allocate memory for TOC buffer\n");
return -ENOMEM;
}
/* Retrieve GPU address for header buffer and internal buffer */
ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
if (ret) {
amdgpu_bo_unref(&adev->smu.toc_buf);
DRM_ERROR("Failed to reserve the TOC buffer\n");
return -EINVAL;
}
ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
if (ret) {
amdgpu_bo_unreserve(adev->smu.toc_buf);
amdgpu_bo_unref(&adev->smu.toc_buf);
DRM_ERROR("Failed to pin the TOC buffer\n");
return -EINVAL;
}
ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
if (ret) {
amdgpu_bo_unreserve(adev->smu.toc_buf);
amdgpu_bo_unref(&adev->smu.toc_buf);
DRM_ERROR("Failed to map the TOC buffer\n");
return -EINVAL;
}
amdgpu_bo_unreserve(adev->smu.toc_buf);
private->header_addr_low = lower_32_bits(mc_addr);
private->header_addr_high = upper_32_bits(mc_addr);
private->header = toc_buf_ptr;
adev->smu.smumgr_funcs = &iceland_smumgr_funcs;
return 0;
}
int iceland_smu_fini(struct amdgpu_device *adev)
{
amdgpu_bo_unref(&adev->smu.toc_buf);
kfree(adev->smu.priv);
adev->smu.priv = NULL;
if (adev->firmware.fw_buf)
amdgpu_ucode_fini_bo(adev);
return 0;
}

View File

@ -952,12 +952,6 @@ static void si_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
spin_unlock_irqrestore(&adev->smc_idx_lock, flags); spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
} }
static u32 si_get_virtual_caps(struct amdgpu_device *adev)
{
/* SI does not support SR-IOV */
return 0;
}
static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = { static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
{GRBM_STATUS, false}, {GRBM_STATUS, false},
{GB_ADDR_CONFIG, false}, {GB_ADDR_CONFIG, false},
@ -1124,16 +1118,22 @@ static int si_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
return 0; return 0;
} }
static void si_detect_hw_virtualization(struct amdgpu_device *adev)
{
if (is_virtual_machine()) /* passthrough mode */
adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
}
static const struct amdgpu_asic_funcs si_asic_funcs = static const struct amdgpu_asic_funcs si_asic_funcs =
{ {
.read_disabled_bios = &si_read_disabled_bios, .read_disabled_bios = &si_read_disabled_bios,
.detect_hw_virtualization = si_detect_hw_virtualization,
.read_register = &si_read_register, .read_register = &si_read_register,
.reset = &si_asic_reset, .reset = &si_asic_reset,
.set_vga_state = &si_vga_set_state, .set_vga_state = &si_vga_set_state,
.get_xclk = &si_get_xclk, .get_xclk = &si_get_xclk,
.set_uvd_clocks = &si_set_uvd_clocks, .set_uvd_clocks = &si_set_uvd_clocks,
.set_vce_clocks = NULL, .set_vce_clocks = NULL,
.get_virtual_caps = &si_get_virtual_caps,
}; };
static uint32_t si_get_rev_id(struct amdgpu_device *adev) static uint32_t si_get_rev_id(struct amdgpu_device *adev)

View File

@ -1,186 +0,0 @@
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/firmware.h>
#include "drmP.h"
#include "amdgpu.h"
#include "tonga_smum.h"
MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
static void tonga_dpm_set_funcs(struct amdgpu_device *adev);
static int tonga_dpm_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
tonga_dpm_set_funcs(adev);
return 0;
}
static int tonga_dpm_init_microcode(struct amdgpu_device *adev)
{
char fw_name[30] = "amdgpu/tonga_smc.bin";
int err;
err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
if (err)
goto out;
err = amdgpu_ucode_validate(adev->pm.fw);
out:
if (err) {
DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
release_firmware(adev->pm.fw);
adev->pm.fw = NULL;
}
return err;
}
static int tonga_dpm_sw_init(void *handle)
{
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
ret = tonga_dpm_init_microcode(adev);
if (ret)
return ret;
return 0;
}
static int tonga_dpm_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
release_firmware(adev->pm.fw);
adev->pm.fw = NULL;
return 0;
}
static int tonga_dpm_hw_init(void *handle)
{
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
mutex_lock(&adev->pm.mutex);
/* smu init only needs to be called at startup, not resume.
* It should be in sw_init, but requires the fw info gathered
* in sw_init from other IP modules.
*/
ret = tonga_smu_init(adev);
if (ret) {
DRM_ERROR("SMU initialization failed\n");
goto fail;
}
ret = tonga_smu_start(adev);
if (ret) {
DRM_ERROR("SMU start failed\n");
goto fail;
}
mutex_unlock(&adev->pm.mutex);
return 0;
fail:
adev->firmware.smu_load = false;
mutex_unlock(&adev->pm.mutex);
return -EINVAL;
}
static int tonga_dpm_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
mutex_lock(&adev->pm.mutex);
/* smu fini only needs to be called at teardown, not suspend.
* It should be in sw_fini, but we put it here for symmetry
* with smu init.
*/
tonga_smu_fini(adev);
mutex_unlock(&adev->pm.mutex);
return 0;
}
static int tonga_dpm_suspend(void *handle)
{
return tonga_dpm_hw_fini(handle);
}
static int tonga_dpm_resume(void *handle)
{
return tonga_dpm_hw_init(handle);
}
static int tonga_dpm_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
return 0;
}
static int tonga_dpm_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
return 0;
}
const struct amd_ip_funcs tonga_dpm_ip_funcs = {
.name = "tonga_dpm",
.early_init = tonga_dpm_early_init,
.late_init = NULL,
.sw_init = tonga_dpm_sw_init,
.sw_fini = tonga_dpm_sw_fini,
.hw_init = tonga_dpm_hw_init,
.hw_fini = tonga_dpm_hw_fini,
.suspend = tonga_dpm_suspend,
.resume = tonga_dpm_resume,
.is_idle = NULL,
.wait_for_idle = NULL,
.soft_reset = NULL,
.set_clockgating_state = tonga_dpm_set_clockgating_state,
.set_powergating_state = tonga_dpm_set_powergating_state,
};
static const struct amdgpu_dpm_funcs tonga_dpm_funcs = {
.get_temperature = NULL,
.pre_set_power_state = NULL,
.set_power_state = NULL,
.post_set_power_state = NULL,
.display_configuration_changed = NULL,
.get_sclk = NULL,
.get_mclk = NULL,
.print_power_state = NULL,
.debugfs_print_current_performance_level = NULL,
.force_performance_level = NULL,
.vblank_too_short = NULL,
.powergate_uvd = NULL,
};
static void tonga_dpm_set_funcs(struct amdgpu_device *adev)
{
if (NULL == adev->pm.funcs)
adev->pm.funcs = &tonga_dpm_funcs;
}

View File

@ -1,862 +0,0 @@
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/firmware.h>
#include "drmP.h"
#include "amdgpu.h"
#include "tonga_ppsmc.h"
#include "tonga_smum.h"
#include "smu_ucode_xfer_vi.h"
#include "amdgpu_ucode.h"
#include "smu/smu_7_1_2_d.h"
#include "smu/smu_7_1_2_sh_mask.h"
#define TONGA_SMC_SIZE 0x20000
static int tonga_set_smc_sram_address(struct amdgpu_device *adev, uint32_t smc_address, uint32_t limit)
{
uint32_t val;
if (smc_address & 3)
return -EINVAL;
if ((smc_address + 3) > limit)
return -EINVAL;
WREG32(mmSMC_IND_INDEX_0, smc_address);
val = RREG32(mmSMC_IND_ACCESS_CNTL);
val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
WREG32(mmSMC_IND_ACCESS_CNTL, val);
return 0;
}
static int tonga_copy_bytes_to_smc(struct amdgpu_device *adev, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit)
{
uint32_t addr;
uint32_t data, orig_data;
int result = 0;
uint32_t extra_shift;
unsigned long flags;
if (smc_start_address & 3)
return -EINVAL;
if ((smc_start_address + byte_count) > limit)
return -EINVAL;
addr = smc_start_address;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
while (byte_count >= 4) {
/* Bytes are written into the SMC addres space with the MSB first */
data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
result = tonga_set_smc_sram_address(adev, addr, limit);
if (result)
goto out;
WREG32(mmSMC_IND_DATA_0, data);
src += 4;
byte_count -= 4;
addr += 4;
}
if (0 != byte_count) {
/* Now write odd bytes left, do a read modify write cycle */
data = 0;
result = tonga_set_smc_sram_address(adev, addr, limit);
if (result)
goto out;
orig_data = RREG32(mmSMC_IND_DATA_0);
extra_shift = 8 * (4 - byte_count);
while (byte_count > 0) {
data = (data << 8) + *src++;
byte_count--;
}
data <<= extra_shift;
data |= (orig_data & ~((~0UL) << extra_shift));
result = tonga_set_smc_sram_address(adev, addr, limit);
if (result)
goto out;
WREG32(mmSMC_IND_DATA_0, data);
}
out:
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return result;
}
static int tonga_program_jump_on_start(struct amdgpu_device *adev)
{
static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
tonga_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
return 0;
}
static bool tonga_is_smc_ram_running(struct amdgpu_device *adev)
{
uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
}
static int wait_smu_response(struct amdgpu_device *adev)
{
int i;
uint32_t val;
for (i = 0; i < adev->usec_timeout; i++) {
val = RREG32(mmSMC_RESP_0);
if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
break;
udelay(1);
}
if (i == adev->usec_timeout)
return -EINVAL;
return 0;
}
static int tonga_send_msg_to_smc_offset(struct amdgpu_device *adev)
{
if (wait_smu_response(adev)) {
DRM_ERROR("Failed to send previous message\n");
return -EINVAL;
}
WREG32(mmSMC_MSG_ARG_0, 0x20000);
WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_Test);
if (wait_smu_response(adev)) {
DRM_ERROR("Failed to send message\n");
return -EINVAL;
}
return 0;
}
static int tonga_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
{
if (!tonga_is_smc_ram_running(adev))
{
return -EINVAL;
}
if (wait_smu_response(adev)) {
DRM_ERROR("Failed to send previous message\n");
return -EINVAL;
}
WREG32(mmSMC_MESSAGE_0, msg);
if (wait_smu_response(adev)) {
DRM_ERROR("Failed to send message\n");
return -EINVAL;
}
return 0;
}
static int tonga_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
PPSMC_Msg msg)
{
if (wait_smu_response(adev)) {
DRM_ERROR("Failed to send previous message\n");
return -EINVAL;
}
WREG32(mmSMC_MESSAGE_0, msg);
return 0;
}
static int tonga_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
PPSMC_Msg msg,
uint32_t parameter)
{
if (!tonga_is_smc_ram_running(adev))
return -EINVAL;
if (wait_smu_response(adev)) {
DRM_ERROR("Failed to send previous message\n");
return -EINVAL;
}
WREG32(mmSMC_MSG_ARG_0, parameter);
return tonga_send_msg_to_smc(adev, msg);
}
static int tonga_send_msg_to_smc_with_parameter_without_waiting(
struct amdgpu_device *adev,
PPSMC_Msg msg, uint32_t parameter)
{
if (wait_smu_response(adev)) {
DRM_ERROR("Failed to send previous message\n");
return -EINVAL;
}
WREG32(mmSMC_MSG_ARG_0, parameter);
return tonga_send_msg_to_smc_without_waiting(adev, msg);
}
#if 0 /* not used yet */
static int tonga_wait_for_smc_inactive(struct amdgpu_device *adev)
{
int i;
uint32_t val;
if (!tonga_is_smc_ram_running(adev))
return -EINVAL;
for (i = 0; i < adev->usec_timeout; i++) {
val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
break;
udelay(1);
}
if (i == adev->usec_timeout)
return -EINVAL;
return 0;
}
#endif
static int tonga_smu_upload_firmware_image(struct amdgpu_device *adev)
{
const struct smc_firmware_header_v1_0 *hdr;
uint32_t ucode_size;
uint32_t ucode_start_address;
const uint8_t *src;
uint32_t val;
uint32_t byte_count;
uint32_t *data;
unsigned long flags;
if (!adev->pm.fw)
return -EINVAL;
/* Skip SMC ucode loading on SR-IOV capable boards.
* vbios does this for us in asic_init in that case.
*/
if (adev->virtualization.supports_sr_iov)
return 0;
hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
amdgpu_ucode_print_smc_hdr(&hdr->header);
adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
src = (const uint8_t *)
(adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
if (ucode_size & 3) {
DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
return -EINVAL;
}
if (ucode_size > TONGA_SMC_SIZE) {
DRM_ERROR("SMC address is beyond the SMC RAM area\n");
return -EINVAL;
}
spin_lock_irqsave(&adev->smc_idx_lock, flags);
WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
val = RREG32(mmSMC_IND_ACCESS_CNTL);
val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
WREG32(mmSMC_IND_ACCESS_CNTL, val);
byte_count = ucode_size;
data = (uint32_t *)src;
for (; byte_count >= 4; data++, byte_count -= 4)
WREG32(mmSMC_IND_DATA_0, data[0]);
val = RREG32(mmSMC_IND_ACCESS_CNTL);
val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
WREG32(mmSMC_IND_ACCESS_CNTL, val);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return 0;
}
#if 0 /* not used yet */
static int tonga_read_smc_sram_dword(struct amdgpu_device *adev,
uint32_t smc_address,
uint32_t *value,
uint32_t limit)
{
int result;
unsigned long flags;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
result = tonga_set_smc_sram_address(adev, smc_address, limit);
if (result == 0)
*value = RREG32(mmSMC_IND_DATA_0);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return result;
}
static int tonga_write_smc_sram_dword(struct amdgpu_device *adev,
uint32_t smc_address,
uint32_t value,
uint32_t limit)
{
int result;
unsigned long flags;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
result = tonga_set_smc_sram_address(adev, smc_address, limit);
if (result == 0)
WREG32(mmSMC_IND_DATA_0, value);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return result;
}
static int tonga_smu_stop_smc(struct amdgpu_device *adev)
{
uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
return 0;
}
#endif
static enum AMDGPU_UCODE_ID tonga_convert_fw_type(uint32_t fw_type)
{
switch (fw_type) {
case UCODE_ID_SDMA0:
return AMDGPU_UCODE_ID_SDMA0;
case UCODE_ID_SDMA1:
return AMDGPU_UCODE_ID_SDMA1;
case UCODE_ID_CP_CE:
return AMDGPU_UCODE_ID_CP_CE;
case UCODE_ID_CP_PFP:
return AMDGPU_UCODE_ID_CP_PFP;
case UCODE_ID_CP_ME:
return AMDGPU_UCODE_ID_CP_ME;
case UCODE_ID_CP_MEC:
case UCODE_ID_CP_MEC_JT1:
return AMDGPU_UCODE_ID_CP_MEC1;
case UCODE_ID_CP_MEC_JT2:
return AMDGPU_UCODE_ID_CP_MEC2;
case UCODE_ID_RLC_G:
return AMDGPU_UCODE_ID_RLC_G;
default:
DRM_ERROR("ucode type is out of range!\n");
return AMDGPU_UCODE_ID_MAXIMUM;
}
}
static int tonga_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
uint32_t fw_type,
struct SMU_Entry *entry)
{
enum AMDGPU_UCODE_ID id = tonga_convert_fw_type(fw_type);
struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
const struct gfx_firmware_header_v1_0 *header = NULL;
uint64_t gpu_addr;
uint32_t data_size;
if (ucode->fw == NULL)
return -EINVAL;
gpu_addr = ucode->mc_addr;
header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
data_size = le32_to_cpu(header->header.ucode_size_bytes);
if ((fw_type == UCODE_ID_CP_MEC_JT1) ||
(fw_type == UCODE_ID_CP_MEC_JT2)) {
gpu_addr += le32_to_cpu(header->jt_offset) << 2;
data_size = le32_to_cpu(header->jt_size) << 2;
}
entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
entry->id = (uint16_t)fw_type;
entry->image_addr_high = upper_32_bits(gpu_addr);
entry->image_addr_low = lower_32_bits(gpu_addr);
entry->meta_data_addr_high = 0;
entry->meta_data_addr_low = 0;
entry->data_size_byte = data_size;
entry->num_register_entries = 0;
if (fw_type == UCODE_ID_RLC_G)
entry->flags = 1;
else
entry->flags = 0;
return 0;
}
static int tonga_smu_request_load_fw(struct amdgpu_device *adev)
{
struct tonga_smu_private_data *private = (struct tonga_smu_private_data *)adev->smu.priv;
struct SMU_DRAMData_TOC *toc;
uint32_t fw_to_load;
WREG32_SMC(ixSOFT_REGISTERS_TABLE_28, 0);
tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_HI, private->smu_buffer_addr_high);
tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_LO, private->smu_buffer_addr_low);
toc = (struct SMU_DRAMData_TOC *)private->header;
toc->num_entries = 0;
toc->structure_version = 1;
if (!adev->firmware.smu_load)
return 0;
if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
&toc->entry[toc->num_entries++])) {
DRM_ERROR("Failed to get firmware entry for RLC\n");
return -EINVAL;
}
if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
&toc->entry[toc->num_entries++])) {
DRM_ERROR("Failed to get firmware entry for CE\n");
return -EINVAL;
}
if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
&toc->entry[toc->num_entries++])) {
DRM_ERROR("Failed to get firmware entry for PFP\n");
return -EINVAL;
}
if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
&toc->entry[toc->num_entries++])) {
DRM_ERROR("Failed to get firmware entry for ME\n");
return -EINVAL;
}
if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
&toc->entry[toc->num_entries++])) {
DRM_ERROR("Failed to get firmware entry for MEC\n");
return -EINVAL;
}
if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
&toc->entry[toc->num_entries++])) {
DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
return -EINVAL;
}
if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
&toc->entry[toc->num_entries++])) {
DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
return -EINVAL;
}
if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
&toc->entry[toc->num_entries++])) {
DRM_ERROR("Failed to get firmware entry for SDMA0\n");
return -EINVAL;
}
if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
&toc->entry[toc->num_entries++])) {
DRM_ERROR("Failed to get firmware entry for SDMA1\n");
return -EINVAL;
}
tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
fw_to_load = UCODE_ID_RLC_G_MASK |
UCODE_ID_SDMA0_MASK |
UCODE_ID_SDMA1_MASK |
UCODE_ID_CP_CE_MASK |
UCODE_ID_CP_ME_MASK |
UCODE_ID_CP_PFP_MASK |
UCODE_ID_CP_MEC_MASK;
if (tonga_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
DRM_ERROR("Fail to request SMU load ucode\n");
return -EINVAL;
}
return 0;
}
static uint32_t tonga_smu_get_mask_for_fw_type(uint32_t fw_type)
{
switch (fw_type) {
case AMDGPU_UCODE_ID_SDMA0:
return UCODE_ID_SDMA0_MASK;
case AMDGPU_UCODE_ID_SDMA1:
return UCODE_ID_SDMA1_MASK;
case AMDGPU_UCODE_ID_CP_CE:
return UCODE_ID_CP_CE_MASK;
case AMDGPU_UCODE_ID_CP_PFP:
return UCODE_ID_CP_PFP_MASK;
case AMDGPU_UCODE_ID_CP_ME:
return UCODE_ID_CP_ME_MASK;
case AMDGPU_UCODE_ID_CP_MEC1:
return UCODE_ID_CP_MEC_MASK;
case AMDGPU_UCODE_ID_CP_MEC2:
return UCODE_ID_CP_MEC_MASK;
case AMDGPU_UCODE_ID_RLC_G:
return UCODE_ID_RLC_G_MASK;
default:
DRM_ERROR("ucode type is out of range!\n");
return 0;
}
}
static int tonga_smu_check_fw_load_finish(struct amdgpu_device *adev,
uint32_t fw_type)
{
uint32_t fw_mask = tonga_smu_get_mask_for_fw_type(fw_type);
int i;
for (i = 0; i < adev->usec_timeout; i++) {
if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_28) & fw_mask))
break;
udelay(1);
}
if (i == adev->usec_timeout) {
DRM_ERROR("check firmware loading failed\n");
return -EINVAL;
}
return 0;
}
static int tonga_smu_start_in_protection_mode(struct amdgpu_device *adev)
{
int result;
uint32_t val;
int i;
/* Assert reset */
val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
result = tonga_smu_upload_firmware_image(adev);
if (result)
return result;
/* Clear status */
WREG32_SMC(ixSMU_STATUS, 0);
/* Enable clock */
val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
/* De-assert reset */
val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
/* Set SMU Auto Start */
val = RREG32_SMC(ixSMU_INPUT_DATA);
val = REG_SET_FIELD(val, SMU_INPUT_DATA, AUTO_START, 1);
WREG32_SMC(ixSMU_INPUT_DATA, val);
/* Clear firmware interrupt enable flag */
WREG32_SMC(ixFIRMWARE_FLAGS, 0);
for (i = 0; i < adev->usec_timeout; i++) {
val = RREG32_SMC(ixRCU_UC_EVENTS);
if (REG_GET_FIELD(val, RCU_UC_EVENTS, INTERRUPTS_ENABLED))
break;
udelay(1);
}
if (i == adev->usec_timeout) {
DRM_ERROR("Interrupt is not enabled by firmware\n");
return -EINVAL;
}
/* Call Test SMU message with 0x20000 offset
* to trigger SMU start
*/
tonga_send_msg_to_smc_offset(adev);
/* Wait for done bit to be set */
for (i = 0; i < adev->usec_timeout; i++) {
val = RREG32_SMC(ixSMU_STATUS);
if (REG_GET_FIELD(val, SMU_STATUS, SMU_DONE))
break;
udelay(1);
}
if (i == adev->usec_timeout) {
DRM_ERROR("Timeout for SMU start\n");
return -EINVAL;
}
/* Check pass/failed indicator */
val = RREG32_SMC(ixSMU_STATUS);
if (!REG_GET_FIELD(val, SMU_STATUS, SMU_PASS)) {
DRM_ERROR("SMU Firmware start failed\n");
return -EINVAL;
}
/* Wait for firmware to initialize */
for (i = 0; i < adev->usec_timeout; i++) {
val = RREG32_SMC(ixFIRMWARE_FLAGS);
if(REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
break;
udelay(1);
}
if (i == adev->usec_timeout) {
DRM_ERROR("SMU firmware initialization failed\n");
return -EINVAL;
}
return 0;
}
static int tonga_smu_start_in_non_protection_mode(struct amdgpu_device *adev)
{
int i, result;
uint32_t val;
/* wait for smc boot up */
for (i = 0; i < adev->usec_timeout; i++) {
val = RREG32_SMC(ixRCU_UC_EVENTS);
val = REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done);
if (val)
break;
udelay(1);
}
if (i == adev->usec_timeout) {
DRM_ERROR("SMC boot sequence is not completed\n");
return -EINVAL;
}
/* Clear firmware interrupt enable flag */
WREG32_SMC(ixFIRMWARE_FLAGS, 0);
/* Assert reset */
val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
result = tonga_smu_upload_firmware_image(adev);
if (result)
return result;
/* Set smc instruct start point at 0x0 */
tonga_program_jump_on_start(adev);
/* Enable clock */
val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
/* De-assert reset */
val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
/* Wait for firmware to initialize */
for (i = 0; i < adev->usec_timeout; i++) {
val = RREG32_SMC(ixFIRMWARE_FLAGS);
if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
break;
udelay(1);
}
if (i == adev->usec_timeout) {
DRM_ERROR("Timeout for SMC firmware initialization\n");
return -EINVAL;
}
return 0;
}
int tonga_smu_start(struct amdgpu_device *adev)
{
int result;
uint32_t val;
if (!tonga_is_smc_ram_running(adev)) {
val = RREG32_SMC(ixSMU_FIRMWARE);
if (!REG_GET_FIELD(val, SMU_FIRMWARE, SMU_MODE)) {
result = tonga_smu_start_in_non_protection_mode(adev);
if (result)
return result;
} else {
result = tonga_smu_start_in_protection_mode(adev);
if (result)
return result;
}
}
return tonga_smu_request_load_fw(adev);
}
static const struct amdgpu_smumgr_funcs tonga_smumgr_funcs = {
.check_fw_load_finish = tonga_smu_check_fw_load_finish,
.request_smu_load_fw = NULL,
.request_smu_specific_fw = NULL,
};
int tonga_smu_init(struct amdgpu_device *adev)
{
struct tonga_smu_private_data *private;
uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
uint32_t smu_internal_buffer_size = 200*4096;
struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
uint64_t mc_addr;
void *toc_buf_ptr;
void *smu_buf_ptr;
int ret;
private = kzalloc(sizeof(struct tonga_smu_private_data), GFP_KERNEL);
if (NULL == private)
return -ENOMEM;
/* allocate firmware buffers */
if (adev->firmware.smu_load)
amdgpu_ucode_init_bo(adev);
adev->smu.priv = private;
adev->smu.fw_flags = 0;
/* Allocate FW image data structure and header buffer */
ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
NULL, NULL, toc_buf);
if (ret) {
DRM_ERROR("Failed to allocate memory for TOC buffer\n");
return -ENOMEM;
}
/* Allocate buffer for SMU internal buffer */
ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
NULL, NULL, smu_buf);
if (ret) {
DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
return -ENOMEM;
}
/* Retrieve GPU address for header buffer and internal buffer */
ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
if (ret) {
amdgpu_bo_unref(&adev->smu.toc_buf);
DRM_ERROR("Failed to reserve the TOC buffer\n");
return -EINVAL;
}
ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
if (ret) {
amdgpu_bo_unreserve(adev->smu.toc_buf);
amdgpu_bo_unref(&adev->smu.toc_buf);
DRM_ERROR("Failed to pin the TOC buffer\n");
return -EINVAL;
}
ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
if (ret) {
amdgpu_bo_unreserve(adev->smu.toc_buf);
amdgpu_bo_unref(&adev->smu.toc_buf);
DRM_ERROR("Failed to map the TOC buffer\n");
return -EINVAL;
}
amdgpu_bo_unreserve(adev->smu.toc_buf);
private->header_addr_low = lower_32_bits(mc_addr);
private->header_addr_high = upper_32_bits(mc_addr);
private->header = toc_buf_ptr;
ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
if (ret) {
amdgpu_bo_unref(&adev->smu.smu_buf);
amdgpu_bo_unref(&adev->smu.toc_buf);
DRM_ERROR("Failed to reserve the SMU internal buffer\n");
return -EINVAL;
}
ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
if (ret) {
amdgpu_bo_unreserve(adev->smu.smu_buf);
amdgpu_bo_unref(&adev->smu.smu_buf);
amdgpu_bo_unref(&adev->smu.toc_buf);
DRM_ERROR("Failed to pin the SMU internal buffer\n");
return -EINVAL;
}
ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
if (ret) {
amdgpu_bo_unreserve(adev->smu.smu_buf);
amdgpu_bo_unref(&adev->smu.smu_buf);
amdgpu_bo_unref(&adev->smu.toc_buf);
DRM_ERROR("Failed to map the SMU internal buffer\n");
return -EINVAL;
}
amdgpu_bo_unreserve(adev->smu.smu_buf);
private->smu_buffer_addr_low = lower_32_bits(mc_addr);
private->smu_buffer_addr_high = upper_32_bits(mc_addr);
adev->smu.smumgr_funcs = &tonga_smumgr_funcs;
return 0;
}
int tonga_smu_fini(struct amdgpu_device *adev)
{
amdgpu_bo_unref(&adev->smu.toc_buf);
amdgpu_bo_unref(&adev->smu.smu_buf);
kfree(adev->smu.priv);
adev->smu.priv = NULL;
if (adev->firmware.fw_buf)
amdgpu_ucode_fini_bo(adev);
return 0;
}

View File

@ -79,6 +79,9 @@
#endif #endif
#include "dce_virtual.h" #include "dce_virtual.h"
MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
MODULE_FIRMWARE("amdgpu/polaris11_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
@ -445,18 +448,21 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
return true; return true;
} }
static u32 vi_get_virtual_caps(struct amdgpu_device *adev) static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
{ {
u32 caps = 0; uint32_t reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
u32 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); /* bit0: 0 means pf and 1 means vf */
/* bit31: 0 means disable IOV and 1 means enable */
if (reg & 1)
adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_IS_VF;
if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE)) if (reg & 0x80000000)
caps |= AMDGPU_VIRT_CAPS_SRIOV_EN; adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER)) if (reg == 0) {
caps |= AMDGPU_VIRT_CAPS_IS_VF; if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
return caps; }
} }
static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = { static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
@ -1521,13 +1527,13 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
{ {
.read_disabled_bios = &vi_read_disabled_bios, .read_disabled_bios = &vi_read_disabled_bios,
.read_bios_from_rom = &vi_read_bios_from_rom, .read_bios_from_rom = &vi_read_bios_from_rom,
.detect_hw_virtualization = vi_detect_hw_virtualization,
.read_register = &vi_read_register, .read_register = &vi_read_register,
.reset = &vi_asic_reset, .reset = &vi_asic_reset,
.set_vga_state = &vi_vga_set_state, .set_vga_state = &vi_vga_set_state,
.get_xclk = &vi_get_xclk, .get_xclk = &vi_get_xclk,
.set_uvd_clocks = &vi_set_uvd_clocks, .set_uvd_clocks = &vi_set_uvd_clocks,
.set_vce_clocks = &vi_set_vce_clocks, .set_vce_clocks = &vi_set_vce_clocks,
.get_virtual_caps = &vi_get_virtual_caps,
}; };
static int vi_common_early_init(void *handle) static int vi_common_early_init(void *handle)
@ -1657,6 +1663,10 @@ static int vi_common_early_init(void *handle)
return -EINVAL; return -EINVAL;
} }
/* in early init stage, vbios code won't work */
if (adev->asic_funcs->detect_hw_virtualization)
amdgpu_asic_detect_hw_virtualization(adev);
if (amdgpu_smc_load_fw && smc_enabled) if (amdgpu_smc_load_fw && smc_enabled)
adev->firmware.smu_load = true; adev->firmware.smu_load = true;
@ -1800,6 +1810,63 @@ static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data); WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);
} }
static int vi_common_set_clockgating_state_by_smu(void *handle,
enum amd_clockgating_state state)
{
uint32_t msg_id, pp_state;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
void *pp_handle = adev->powerplay.pp_handle;
if (state == AMD_CG_STATE_UNGATE)
pp_state = 0;
else
pp_state = PP_STATE_CG | PP_STATE_LS;
msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
PP_BLOCK_SYS_MC,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
amd_set_clockgating_by_smu(pp_handle, msg_id);
msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
PP_BLOCK_SYS_SDMA,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
amd_set_clockgating_by_smu(pp_handle, msg_id);
msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
PP_BLOCK_SYS_HDP,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
amd_set_clockgating_by_smu(pp_handle, msg_id);
msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
PP_BLOCK_SYS_BIF,
PP_STATE_SUPPORT_LS,
pp_state);
amd_set_clockgating_by_smu(pp_handle, msg_id);
msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
PP_BLOCK_SYS_BIF,
PP_STATE_SUPPORT_CG,
pp_state);
amd_set_clockgating_by_smu(pp_handle, msg_id);
msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
PP_BLOCK_SYS_DRM,
PP_STATE_SUPPORT_LS,
pp_state);
amd_set_clockgating_by_smu(pp_handle, msg_id);
msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
PP_BLOCK_SYS_ROM,
PP_STATE_SUPPORT_CG,
pp_state);
amd_set_clockgating_by_smu(pp_handle, msg_id);
return 0;
}
static int vi_common_set_clockgating_state(void *handle, static int vi_common_set_clockgating_state(void *handle,
enum amd_clockgating_state state) enum amd_clockgating_state state)
{ {
@ -1825,6 +1892,10 @@ static int vi_common_set_clockgating_state(void *handle,
vi_update_hdp_light_sleep(adev, vi_update_hdp_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false); state == AMD_CG_STATE_GATE ? true : false);
break; break;
case CHIP_TONGA:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
vi_common_set_clockgating_state_by_smu(adev, state);
default: default:
break; break;
} }

View File

@ -373,4 +373,41 @@
#define VCE_CMD_WAIT_GE 0x00000106 #define VCE_CMD_WAIT_GE 0x00000106
#define VCE_CMD_UPDATE_PTB 0x00000107 #define VCE_CMD_UPDATE_PTB 0x00000107
#define VCE_CMD_FLUSH_TLB 0x00000108 #define VCE_CMD_FLUSH_TLB 0x00000108
/* mmPA_SC_RASTER_CONFIG mask */
#define RB_MAP_PKR0(x) ((x) << 0)
#define RB_MAP_PKR0_MASK (0x3 << 0)
#define RB_MAP_PKR1(x) ((x) << 2)
#define RB_MAP_PKR1_MASK (0x3 << 2)
#define RB_XSEL2(x) ((x) << 4)
#define RB_XSEL2_MASK (0x3 << 4)
#define RB_XSEL (1 << 6)
#define RB_YSEL (1 << 7)
#define PKR_MAP(x) ((x) << 8)
#define PKR_MAP_MASK (0x3 << 8)
#define PKR_XSEL(x) ((x) << 10)
#define PKR_XSEL_MASK (0x3 << 10)
#define PKR_YSEL(x) ((x) << 12)
#define PKR_YSEL_MASK (0x3 << 12)
#define SC_MAP(x) ((x) << 16)
#define SC_MAP_MASK (0x3 << 16)
#define SC_XSEL(x) ((x) << 18)
#define SC_XSEL_MASK (0x3 << 18)
#define SC_YSEL(x) ((x) << 20)
#define SC_YSEL_MASK (0x3 << 20)
#define SE_MAP(x) ((x) << 24)
#define SE_MAP_MASK (0x3 << 24)
#define SE_XSEL(x) ((x) << 26)
#define SE_XSEL_MASK (0x3 << 26)
#define SE_YSEL(x) ((x) << 28)
#define SE_YSEL_MASK (0x3 << 28)
/* mmPA_SC_RASTER_CONFIG_1 mask */
#define SE_PAIR_MAP(x) ((x) << 0)
#define SE_PAIR_MAP_MASK (0x3 << 0)
#define SE_PAIR_XSEL(x) ((x) << 2)
#define SE_PAIR_XSEL_MASK (0x3 << 2)
#define SE_PAIR_YSEL(x) ((x) << 4)
#define SE_PAIR_YSEL_MASK (0x3 << 4)
#endif #endif

View File

@ -1398,10 +1398,45 @@
#define DB_DEPTH_INFO 0xA00F #define DB_DEPTH_INFO 0xA00F
#define PA_SC_RASTER_CONFIG 0xA0D4 #define PA_SC_RASTER_CONFIG 0xA0D4
# define RB_MAP_PKR0(x) ((x) << 0)
# define RB_MAP_PKR0_MASK (0x3 << 0)
# define RB_MAP_PKR1(x) ((x) << 2)
# define RB_MAP_PKR1_MASK (0x3 << 2)
# define RASTER_CONFIG_RB_MAP_0 0 # define RASTER_CONFIG_RB_MAP_0 0
# define RASTER_CONFIG_RB_MAP_1 1 # define RASTER_CONFIG_RB_MAP_1 1
# define RASTER_CONFIG_RB_MAP_2 2 # define RASTER_CONFIG_RB_MAP_2 2
# define RASTER_CONFIG_RB_MAP_3 3 # define RASTER_CONFIG_RB_MAP_3 3
# define RB_XSEL2(x) ((x) << 4)
# define RB_XSEL2_MASK (0x3 << 4)
# define RB_XSEL (1 << 6)
# define RB_YSEL (1 << 7)
# define PKR_MAP(x) ((x) << 8)
# define PKR_MAP_MASK (0x3 << 8)
# define RASTER_CONFIG_PKR_MAP_0 0
# define RASTER_CONFIG_PKR_MAP_1 1
# define RASTER_CONFIG_PKR_MAP_2 2
# define RASTER_CONFIG_PKR_MAP_3 3
# define PKR_XSEL(x) ((x) << 10)
# define PKR_XSEL_MASK (0x3 << 10)
# define PKR_YSEL(x) ((x) << 12)
# define PKR_YSEL_MASK (0x3 << 12)
# define SC_MAP(x) ((x) << 16)
# define SC_MAP_MASK (0x3 << 16)
# define SC_XSEL(x) ((x) << 18)
# define SC_XSEL_MASK (0x3 << 18)
# define SC_YSEL(x) ((x) << 20)
# define SC_YSEL_MASK (0x3 << 20)
# define SE_MAP(x) ((x) << 24)
# define SE_MAP_MASK (0x3 << 24)
# define RASTER_CONFIG_SE_MAP_0 0
# define RASTER_CONFIG_SE_MAP_1 1
# define RASTER_CONFIG_SE_MAP_2 2
# define RASTER_CONFIG_SE_MAP_3 3
# define SE_XSEL(x) ((x) << 26)
# define SE_XSEL_MASK (0x3 << 26)
# define SE_YSEL(x) ((x) << 28)
# define SE_YSEL_MASK (0x3 << 28)
#define VGT_EVENT_INITIATOR 0xA2A4 #define VGT_EVENT_INITIATOR 0xA2A4
# define SAMPLE_STREAMOUTSTATS1 (1 << 0) # define SAMPLE_STREAMOUTSTATS1 (1 << 0)

1
drivers/gpu/drm/amd/include/cgs_common.h Normal file → Executable file
View File

@ -161,6 +161,7 @@ struct cgs_clock_limits {
*/ */
struct cgs_firmware_info { struct cgs_firmware_info {
uint16_t version; uint16_t version;
uint16_t fw_version;
uint16_t feature_version; uint16_t feature_version;
uint32_t image_size; uint32_t image_size;
uint64_t mc_addr; uint64_t mc_addr;

View File

@ -191,11 +191,9 @@ static int pp_sw_reset(void *handle)
} }
static int pp_set_clockgating_state(void *handle, int amd_set_clockgating_by_smu(void *handle, uint32_t msg_id)
enum amd_clockgating_state state)
{ {
struct pp_hwmgr *hwmgr; struct pp_hwmgr *hwmgr;
uint32_t msg_id, pp_state;
if (handle == NULL) if (handle == NULL)
return -EINVAL; return -EINVAL;
@ -209,76 +207,7 @@ static int pp_set_clockgating_state(void *handle,
return 0; return 0;
} }
if (state == AMD_CG_STATE_UNGATE) return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
pp_state = 0;
else
pp_state = PP_STATE_CG | PP_STATE_LS;
/* Enable/disable GFX blocks clock gating through SMU */
msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
PP_BLOCK_GFX_CG,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
PP_BLOCK_GFX_3D,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
PP_BLOCK_GFX_RLC,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
PP_BLOCK_GFX_CP,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
PP_BLOCK_GFX_MG,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
/* Enable/disable System blocks clock gating through SMU */
msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
PP_BLOCK_SYS_BIF,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
PP_BLOCK_SYS_BIF,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
PP_BLOCK_SYS_MC,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
PP_BLOCK_SYS_ROM,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
PP_BLOCK_SYS_DRM,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
PP_BLOCK_SYS_HDP,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
PP_BLOCK_SYS_SDMA,
PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
pp_state);
hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
return 0;
} }
static int pp_set_powergating_state(void *handle, static int pp_set_powergating_state(void *handle,
@ -362,7 +291,7 @@ const struct amd_ip_funcs pp_ip_funcs = {
.is_idle = pp_is_idle, .is_idle = pp_is_idle,
.wait_for_idle = pp_wait_for_idle, .wait_for_idle = pp_wait_for_idle,
.soft_reset = pp_sw_reset, .soft_reset = pp_sw_reset,
.set_clockgating_state = pp_set_clockgating_state, .set_clockgating_state = NULL,
.set_powergating_state = pp_set_powergating_state, .set_powergating_state = pp_set_powergating_state,
}; };
@ -576,28 +505,6 @@ enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
} }
} }
static void
pp_debugfs_print_current_performance_level(void *handle,
struct seq_file *m)
{
struct pp_hwmgr *hwmgr;
if (handle == NULL)
return;
hwmgr = ((struct pp_instance *)handle)->hwmgr;
if (hwmgr == NULL || hwmgr->hwmgr_func == NULL)
return;
if (hwmgr->hwmgr_func->print_current_perforce_level == NULL) {
printk(KERN_INFO "%s was not implemented.\n", __func__);
return;
}
hwmgr->hwmgr_func->print_current_perforce_level(hwmgr, m);
}
static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode) static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
{ {
struct pp_hwmgr *hwmgr; struct pp_hwmgr *hwmgr;
@ -894,6 +801,25 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value); return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
} }
static int pp_dpm_read_sensor(void *handle, int idx, int32_t *value)
{
struct pp_hwmgr *hwmgr;
if (!handle)
return -EINVAL;
hwmgr = ((struct pp_instance *)handle)->hwmgr;
PP_CHECK_HW(hwmgr);
if (hwmgr->hwmgr_func->read_sensor == NULL) {
printk(KERN_INFO "%s was not implemented.\n", __func__);
return 0;
}
return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value);
}
const struct amd_powerplay_funcs pp_dpm_funcs = { const struct amd_powerplay_funcs pp_dpm_funcs = {
.get_temperature = pp_dpm_get_temperature, .get_temperature = pp_dpm_get_temperature,
.load_firmware = pp_dpm_load_fw, .load_firmware = pp_dpm_load_fw,
@ -906,7 +832,6 @@ const struct amd_powerplay_funcs pp_dpm_funcs = {
.powergate_vce = pp_dpm_powergate_vce, .powergate_vce = pp_dpm_powergate_vce,
.powergate_uvd = pp_dpm_powergate_uvd, .powergate_uvd = pp_dpm_powergate_uvd,
.dispatch_tasks = pp_dpm_dispatch_tasks, .dispatch_tasks = pp_dpm_dispatch_tasks,
.print_current_performance_level = pp_debugfs_print_current_performance_level,
.set_fan_control_mode = pp_dpm_set_fan_control_mode, .set_fan_control_mode = pp_dpm_set_fan_control_mode,
.get_fan_control_mode = pp_dpm_get_fan_control_mode, .get_fan_control_mode = pp_dpm_get_fan_control_mode,
.set_fan_speed_percent = pp_dpm_set_fan_speed_percent, .set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
@ -920,6 +845,7 @@ const struct amd_powerplay_funcs pp_dpm_funcs = {
.set_sclk_od = pp_dpm_set_sclk_od, .set_sclk_od = pp_dpm_set_sclk_od,
.get_mclk_od = pp_dpm_get_mclk_od, .get_mclk_od = pp_dpm_get_mclk_od,
.set_mclk_od = pp_dpm_set_mclk_od, .set_mclk_od = pp_dpm_set_mclk_od,
.read_sensor = pp_dpm_read_sensor,
}; };
static int amd_pp_instance_init(struct amd_pp_init *pp_init, static int amd_pp_instance_init(struct amd_pp_init *pp_init,

View File

@ -262,6 +262,8 @@ static const pem_event_action * const display_config_change_event[] = {
unblock_adjust_power_state_tasks, unblock_adjust_power_state_tasks,
set_cpu_power_state, set_cpu_power_state,
notify_hw_power_source_tasks, notify_hw_power_source_tasks,
get_2d_performance_state_tasks,
set_performance_state_tasks,
/* updateDALConfigurationTasks, /* updateDALConfigurationTasks,
variBrightDisplayConfigurationChangeTasks, */ variBrightDisplayConfigurationChangeTasks, */
adjust_power_state_tasks, adjust_power_state_tasks,

View File

@ -101,11 +101,12 @@ int psm_adjust_power_state_dynamic(struct pp_eventmgr *eventmgr, bool skip)
if (requested == NULL) if (requested == NULL)
return 0; return 0;
phm_apply_state_adjust_rules(hwmgr, requested, pcurrent);
if (pcurrent == NULL || (0 != phm_check_states_equal(hwmgr, &pcurrent->hardware, &requested->hardware, &equal))) if (pcurrent == NULL || (0 != phm_check_states_equal(hwmgr, &pcurrent->hardware, &requested->hardware, &equal)))
equal = false; equal = false;
if (!equal || phm_check_smc_update_required_for_display_configuration(hwmgr)) { if (!equal || phm_check_smc_update_required_for_display_configuration(hwmgr)) {
phm_apply_state_adjust_rules(hwmgr, requested, pcurrent);
phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware); phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware);
memcpy(hwmgr->current_ps, hwmgr->request_ps, hwmgr->ps_size); memcpy(hwmgr->current_ps, hwmgr->request_ps, hwmgr->ps_size);
} }

View File

@ -3,16 +3,12 @@
# It provides the hardware management services for the driver. # It provides the hardware management services for the driver.
HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \ HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \
hardwaremanager.o pp_acpi.o cz_hwmgr.o \ hardwaremanager.o pp_acpi.o cz_hwmgr.o \
cz_clockpowergating.o tonga_powertune.o\ cz_clockpowergating.o pppcielanes.o\
process_pptables_v1_0.o ppatomctrl.o \ process_pptables_v1_0.o ppatomctrl.o \
tonga_hwmgr.o pppcielanes.o tonga_thermal.o\ smu7_hwmgr.o smu7_powertune.o smu7_thermal.o \
fiji_powertune.o fiji_hwmgr.o tonga_clockpowergating.o \ smu7_clockpowergating.o
fiji_clockpowergating.o fiji_thermal.o \
polaris10_hwmgr.o polaris10_powertune.o polaris10_thermal.o \
polaris10_clockpowergating.o iceland_hwmgr.o \
iceland_clockpowergating.o iceland_thermal.o \
iceland_powertune.o
AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR)) AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))

View File

@ -1538,78 +1538,6 @@ int cz_get_power_state_size(struct pp_hwmgr *hwmgr)
return sizeof(struct cz_power_state); return sizeof(struct cz_power_state);
} }
static void
cz_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
struct phm_clock_voltage_dependency_table *table =
hwmgr->dyn_state.vddc_dependency_on_sclk;
struct phm_vce_clock_voltage_dependency_table *vce_table =
hwmgr->dyn_state.vce_clock_voltage_dependency_table;
struct phm_uvd_clock_voltage_dependency_table *uvd_table =
hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
uint32_t sclk_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX),
TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
uint32_t uvd_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
uint32_t vce_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
uint32_t sclk, vclk, dclk, ecclk, tmp, activity_percent;
uint16_t vddnb, vddgfx;
int result;
if (sclk_index >= NUM_SCLK_LEVELS) {
seq_printf(m, "\n invalid sclk dpm profile %d\n", sclk_index);
} else {
sclk = table->entries[sclk_index].clk;
seq_printf(m, "\n index: %u sclk: %u MHz\n", sclk_index, sclk/100);
}
tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
vddnb = cz_convert_8Bit_index_to_voltage(hwmgr, tmp);
tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
vddgfx = cz_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
seq_printf(m, "\n vddnb: %u vddgfx: %u\n", vddnb, vddgfx);
seq_printf(m, "\n uvd %sabled\n", cz_hwmgr->uvd_power_gated ? "dis" : "en");
if (!cz_hwmgr->uvd_power_gated) {
if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
seq_printf(m, "\n invalid uvd dpm level %d\n", uvd_index);
} else {
vclk = uvd_table->entries[uvd_index].vclk;
dclk = uvd_table->entries[uvd_index].dclk;
seq_printf(m, "\n index: %u uvd vclk: %u MHz dclk: %u MHz\n", uvd_index, vclk/100, dclk/100);
}
}
seq_printf(m, "\n vce %sabled\n", cz_hwmgr->vce_power_gated ? "dis" : "en");
if (!cz_hwmgr->vce_power_gated) {
if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
seq_printf(m, "\n invalid vce dpm level %d\n", vce_index);
} else {
ecclk = vce_table->entries[vce_index].ecclk;
seq_printf(m, "\n index: %u vce ecclk: %u MHz\n", vce_index, ecclk/100);
}
}
result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetAverageGraphicsActivity);
if (0 == result) {
activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0);
activity_percent = activity_percent > 100 ? 100 : activity_percent;
} else {
activity_percent = 50;
}
seq_printf(m, "\n [GPU load]: %u %%\n\n", activity_percent);
}
static void cz_hw_print_display_cfg( static void cz_hw_print_display_cfg(
const struct cc6_settings *cc6_settings) const struct cc6_settings *cc6_settings)
{ {
@ -1857,6 +1785,107 @@ static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_c
return 0; return 0;
} }
static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
struct phm_clock_voltage_dependency_table *table =
hwmgr->dyn_state.vddc_dependency_on_sclk;
struct phm_vce_clock_voltage_dependency_table *vce_table =
hwmgr->dyn_state.vce_clock_voltage_dependency_table;
struct phm_uvd_clock_voltage_dependency_table *uvd_table =
hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
uint32_t sclk_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX),
TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
uint32_t uvd_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
uint32_t vce_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
uint32_t sclk, vclk, dclk, ecclk, tmp, activity_percent;
uint16_t vddnb, vddgfx;
int result;
switch (idx) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
if (sclk_index < NUM_SCLK_LEVELS) {
sclk = table->entries[sclk_index].clk;
*value = sclk;
return 0;
}
return -EINVAL;
case AMDGPU_PP_SENSOR_VDDNB:
tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
vddnb = cz_convert_8Bit_index_to_voltage(hwmgr, tmp);
*value = vddnb;
return 0;
case AMDGPU_PP_SENSOR_VDDGFX:
tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
vddgfx = cz_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
*value = vddgfx;
return 0;
case AMDGPU_PP_SENSOR_UVD_VCLK:
if (!cz_hwmgr->uvd_power_gated) {
if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
return -EINVAL;
} else {
vclk = uvd_table->entries[uvd_index].vclk;
*value = vclk;
return 0;
}
}
*value = 0;
return 0;
case AMDGPU_PP_SENSOR_UVD_DCLK:
if (!cz_hwmgr->uvd_power_gated) {
if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
return -EINVAL;
} else {
dclk = uvd_table->entries[uvd_index].dclk;
*value = dclk;
return 0;
}
}
*value = 0;
return 0;
case AMDGPU_PP_SENSOR_VCE_ECCLK:
if (!cz_hwmgr->vce_power_gated) {
if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
return -EINVAL;
} else {
ecclk = vce_table->entries[vce_index].ecclk;
*value = ecclk;
return 0;
}
}
*value = 0;
return 0;
case AMDGPU_PP_SENSOR_GPU_LOAD:
result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetAverageGraphicsActivity);
if (0 == result) {
activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0);
activity_percent = activity_percent > 100 ? 100 : activity_percent;
} else {
activity_percent = 50;
}
*value = activity_percent;
return 0;
case AMDGPU_PP_SENSOR_UVD_POWER:
*value = cz_hwmgr->uvd_power_gated ? 0 : 1;
return 0;
case AMDGPU_PP_SENSOR_VCE_POWER:
*value = cz_hwmgr->vce_power_gated ? 0 : 1;
return 0;
default:
return -EINVAL;
}
}
static const struct pp_hwmgr_func cz_hwmgr_funcs = { static const struct pp_hwmgr_func cz_hwmgr_funcs = {
.backend_init = cz_hwmgr_backend_init, .backend_init = cz_hwmgr_backend_init,
.backend_fini = cz_hwmgr_backend_fini, .backend_fini = cz_hwmgr_backend_fini,
@ -1872,7 +1901,6 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = {
.patch_boot_state = cz_dpm_patch_boot_state, .patch_boot_state = cz_dpm_patch_boot_state,
.get_pp_table_entry = cz_dpm_get_pp_table_entry, .get_pp_table_entry = cz_dpm_get_pp_table_entry,
.get_num_of_pp_table_entries = cz_dpm_get_num_of_pp_table_entries, .get_num_of_pp_table_entries = cz_dpm_get_num_of_pp_table_entries,
.print_current_perforce_level = cz_print_current_perforce_level,
.set_cpu_power_state = cz_set_cpu_power_state, .set_cpu_power_state = cz_set_cpu_power_state,
.store_cc6_data = cz_store_cc6_data, .store_cc6_data = cz_store_cc6_data,
.force_clock_level = cz_force_clock_level, .force_clock_level = cz_force_clock_level,
@ -1882,6 +1910,7 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = {
.get_current_shallow_sleep_clocks = cz_get_current_shallow_sleep_clocks, .get_current_shallow_sleep_clocks = cz_get_current_shallow_sleep_clocks,
.get_clock_by_type = cz_get_clock_by_type, .get_clock_by_type = cz_get_clock_by_type,
.get_max_high_clocks = cz_get_max_high_clocks, .get_max_high_clocks = cz_get_max_high_clocks,
.read_sensor = cz_read_sensor,
}; };
int cz_hwmgr_init(struct pp_hwmgr *hwmgr) int cz_hwmgr_init(struct pp_hwmgr *hwmgr)

View File

@ -1,121 +0,0 @@
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "hwmgr.h"
#include "fiji_clockpowergating.h"
#include "fiji_ppsmc.h"
#include "fiji_hwmgr.h"
int fiji_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
data->uvd_power_gated = false;
data->vce_power_gated = false;
data->samu_power_gated = false;
data->acp_power_gated = false;
return 0;
}
int fiji_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
if (data->uvd_power_gated == bgate)
return 0;
data->uvd_power_gated = bgate;
if (bgate) {
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_GATE);
fiji_update_uvd_dpm(hwmgr, true);
} else {
fiji_update_uvd_dpm(hwmgr, false);
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_UNGATE);
}
return 0;
}
int fiji_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
struct phm_set_power_state_input states;
const struct pp_power_state *pcurrent;
struct pp_power_state *requested;
if (data->vce_power_gated == bgate)
return 0;
data->vce_power_gated = bgate;
pcurrent = hwmgr->current_ps;
requested = hwmgr->request_ps;
states.pcurrent_state = &(pcurrent->hardware);
states.pnew_state = &(requested->hardware);
fiji_update_vce_dpm(hwmgr, &states);
fiji_enable_disable_vce_dpm(hwmgr, !bgate);
return 0;
}
int fiji_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
if (data->samu_power_gated == bgate)
return 0;
data->samu_power_gated = bgate;
if (bgate)
fiji_update_samu_dpm(hwmgr, true);
else
fiji_update_samu_dpm(hwmgr, false);
return 0;
}
int fiji_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
if (data->acp_power_gated == bgate)
return 0;
data->acp_power_gated = bgate;
if (bgate)
fiji_update_acp_dpm(hwmgr, true);
else
fiji_update_acp_dpm(hwmgr, false);
return 0;
}

View File

@ -1,105 +0,0 @@
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef FIJI_DYN_DEFAULTS_H
#define FIJI_DYN_DEFAULTS_H
/** \file
* Volcanic Islands Dynamic default parameters.
*/
enum FIJIdpm_TrendDetection
{
FIJIAdpm_TrendDetection_AUTO,
FIJIAdpm_TrendDetection_UP,
FIJIAdpm_TrendDetection_DOWN
};
typedef enum FIJIdpm_TrendDetection FIJIdpm_TrendDetection;
/* We need to fill in the default values!!!!!!!!!!!!!!!!!!!!!!! */
/* Bit vector representing same fields as hardware register. */
#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 /* CP_Gfx_busy ????
* HDP_busy
* IH_busy
* UVD_busy
* VCE_busy
* ACP_busy
* SAMU_busy
* SDMA enabled */
#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT1 0x000400 /* FE_Gfx_busy - Intended for primary usage. Rest are for flexibility. ????
* SH_Gfx_busy
* RB_Gfx_busy
* VCE_busy */
#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080 /* SH_Gfx_busy - Intended for primary usage. Rest are for flexibility.
* FE_Gfx_busy
* RB_Gfx_busy
* ACP_busy */
#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200 /* RB_Gfx_busy - Intended for primary usage. Rest are for flexibility.
* FE_Gfx_busy
* SH_Gfx_busy
* UVD_busy */
#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680 /* UVD_busy
* VCE_busy
* ACP_busy
* SAMU_busy */
#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033 /* GFX, HDP */
#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033 /* GFX, HDP */
#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000 /* GFX, HDP */
/* thermal protection counter (units). */
#define PPFIJI_THERMALPROTECTCOUNTER_DFLT 0x200 /* ~19us */
/* static screen threshold unit */
#define PPFIJI_STATICSCREENTHRESHOLDUNIT_DFLT 0
/* static screen threshold */
#define PPFIJI_STATICSCREENTHRESHOLD_DFLT 0x00C8
/* gfx idle clock stop threshold */
#define PPFIJI_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200 /* ~19us with static screen threshold unit of 0 */
/* Fixed reference divider to use when building baby stepping tables. */
#define PPFIJI_REFERENCEDIVIDER_DFLT 4
/* ULV voltage change delay time
* Used to be delay_vreg in N.I. split for S.I.
* Using N.I. delay_vreg value as default
* ReferenceClock = 2700
* VoltageResponseTime = 1000
* VDDCDelayTime = (VoltageResponseTime * ReferenceClock) / 1600 = 1687
*/
#define PPFIJI_ULVVOLTAGECHANGEDELAY_DFLT 1687
#define PPFIJI_CGULVPARAMETER_DFLT 0x00040035
#define PPFIJI_CGULVCONTROL_DFLT 0x00007450
#define PPFIJI_TARGETACTIVITY_DFLT 30 /* 30%*/
#define PPFIJI_MCLK_TARGETACTIVITY_DFLT 10 /* 10% */
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,350 +0,0 @@
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef _FIJI_HWMGR_H_
#define _FIJI_HWMGR_H_
#include "hwmgr.h"
#include "smu73.h"
#include "smu73_discrete.h"
#include "ppatomctrl.h"
#include "fiji_ppsmc.h"
#include "pp_endian.h"
#define FIJI_MAX_HARDWARE_POWERLEVELS 2
#define FIJI_AT_DFLT 30
#define FIJI_VOLTAGE_CONTROL_NONE 0x0
#define FIJI_VOLTAGE_CONTROL_BY_GPIO 0x1
#define FIJI_VOLTAGE_CONTROL_BY_SVID2 0x2
#define FIJI_VOLTAGE_CONTROL_MERGED 0x3
#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
#define DPMTABLE_UPDATE_SCLK 0x00000004
#define DPMTABLE_UPDATE_MCLK 0x00000008
struct fiji_performance_level {
uint32_t memory_clock;
uint32_t engine_clock;
uint16_t pcie_gen;
uint16_t pcie_lane;
};
struct fiji_uvd_clocks {
uint32_t vclk;
uint32_t dclk;
};
struct fiji_vce_clocks {
uint32_t evclk;
uint32_t ecclk;
};
struct fiji_power_state {
uint32_t magic;
struct fiji_uvd_clocks uvd_clks;
struct fiji_vce_clocks vce_clks;
uint32_t sam_clk;
uint32_t acp_clk;
uint16_t performance_level_count;
bool dc_compatible;
uint32_t sclk_threshold;
struct fiji_performance_level performance_levels[FIJI_MAX_HARDWARE_POWERLEVELS];
};
struct fiji_dpm_level {
bool enabled;
uint32_t value;
uint32_t param1;
};
#define FIJI_MAX_DEEPSLEEP_DIVIDER_ID 5
#define MAX_REGULAR_DPM_NUMBER 8
#define FIJI_MINIMUM_ENGINE_CLOCK 2500
struct fiji_single_dpm_table {
uint32_t count;
struct fiji_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
};
struct fiji_dpm_table {
struct fiji_single_dpm_table sclk_table;
struct fiji_single_dpm_table mclk_table;
struct fiji_single_dpm_table pcie_speed_table;
struct fiji_single_dpm_table vddc_table;
struct fiji_single_dpm_table vddci_table;
struct fiji_single_dpm_table mvdd_table;
};
struct fiji_clock_registers {
uint32_t vCG_SPLL_FUNC_CNTL;
uint32_t vCG_SPLL_FUNC_CNTL_2;
uint32_t vCG_SPLL_FUNC_CNTL_3;
uint32_t vCG_SPLL_FUNC_CNTL_4;
uint32_t vCG_SPLL_SPREAD_SPECTRUM;
uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
uint32_t vDLL_CNTL;
uint32_t vMCLK_PWRMGT_CNTL;
uint32_t vMPLL_AD_FUNC_CNTL;
uint32_t vMPLL_DQ_FUNC_CNTL;
uint32_t vMPLL_FUNC_CNTL;
uint32_t vMPLL_FUNC_CNTL_1;
uint32_t vMPLL_FUNC_CNTL_2;
uint32_t vMPLL_SS1;
uint32_t vMPLL_SS2;
};
struct fiji_voltage_smio_registers {
uint32_t vS0_VID_LOWER_SMIO_CNTL;
};
#define FIJI_MAX_LEAKAGE_COUNT 8
struct fiji_leakage_voltage {
uint16_t count;
uint16_t leakage_id[FIJI_MAX_LEAKAGE_COUNT];
uint16_t actual_voltage[FIJI_MAX_LEAKAGE_COUNT];
};
struct fiji_vbios_boot_state {
uint16_t mvdd_bootup_value;
uint16_t vddc_bootup_value;
uint16_t vddci_bootup_value;
uint32_t sclk_bootup_value;
uint32_t mclk_bootup_value;
uint16_t pcie_gen_bootup_value;
uint16_t pcie_lane_bootup_value;
};
struct fiji_bacos {
uint32_t best_match;
uint32_t baco_flags;
struct fiji_performance_level performance_level;
};
/* Ultra Low Voltage parameter structure */
struct fiji_ulv_parm {
bool ulv_supported;
uint32_t cg_ulv_parameter;
uint32_t ulv_volt_change_delay;
struct fiji_performance_level ulv_power_level;
};
struct fiji_display_timing {
uint32_t min_clock_in_sr;
uint32_t num_existing_displays;
};
struct fiji_dpmlevel_enable_mask {
uint32_t uvd_dpm_enable_mask;
uint32_t vce_dpm_enable_mask;
uint32_t acp_dpm_enable_mask;
uint32_t samu_dpm_enable_mask;
uint32_t sclk_dpm_enable_mask;
uint32_t mclk_dpm_enable_mask;
uint32_t pcie_dpm_enable_mask;
};
struct fiji_pcie_perf_range {
uint16_t max;
uint16_t min;
};
struct fiji_hwmgr {
struct fiji_dpm_table dpm_table;
struct fiji_dpm_table golden_dpm_table;
uint32_t voting_rights_clients0;
uint32_t voting_rights_clients1;
uint32_t voting_rights_clients2;
uint32_t voting_rights_clients3;
uint32_t voting_rights_clients4;
uint32_t voting_rights_clients5;
uint32_t voting_rights_clients6;
uint32_t voting_rights_clients7;
uint32_t static_screen_threshold_unit;
uint32_t static_screen_threshold;
uint32_t voltage_control;
uint32_t vddc_vddci_delta;
uint32_t active_auto_throttle_sources;
struct fiji_clock_registers clock_registers;
struct fiji_voltage_smio_registers voltage_smio_registers;
bool is_memory_gddr5;
uint16_t acpi_vddc;
bool pspp_notify_required;
uint16_t force_pcie_gen;
uint16_t acpi_pcie_gen;
uint32_t pcie_gen_cap;
uint32_t pcie_lane_cap;
uint32_t pcie_spc_cap;
struct fiji_leakage_voltage vddc_leakage;
struct fiji_leakage_voltage Vddci_leakage;
uint32_t mvdd_control;
uint32_t vddc_mask_low;
uint32_t mvdd_mask_low;
uint16_t max_vddc_in_pptable;
uint16_t min_vddc_in_pptable;
uint16_t max_vddci_in_pptable;
uint16_t min_vddci_in_pptable;
uint32_t mclk_strobe_mode_threshold;
uint32_t mclk_stutter_mode_threshold;
uint32_t mclk_edc_enable_threshold;
uint32_t mclk_edcwr_enable_threshold;
bool is_uvd_enabled;
struct fiji_vbios_boot_state vbios_boot_state;
bool battery_state;
bool is_tlu_enabled;
/* ---- SMC SRAM Address of firmware header tables ---- */
uint32_t sram_end;
uint32_t dpm_table_start;
uint32_t soft_regs_start;
uint32_t mc_reg_table_start;
uint32_t fan_table_start;
uint32_t arb_table_start;
struct SMU73_Discrete_DpmTable smc_state_table;
struct SMU73_Discrete_Ulv ulv_setting;
/* ---- Stuff originally coming from Evergreen ---- */
uint32_t vddci_control;
struct pp_atomctrl_voltage_table vddc_voltage_table;
struct pp_atomctrl_voltage_table vddci_voltage_table;
struct pp_atomctrl_voltage_table mvdd_voltage_table;
uint32_t mgcg_cgtt_local2;
uint32_t mgcg_cgtt_local3;
uint32_t gpio_debug;
uint32_t mc_micro_code_feature;
uint32_t highest_mclk;
uint16_t acpi_vddci;
uint8_t mvdd_high_index;
uint8_t mvdd_low_index;
bool dll_default_on;
bool performance_request_registered;
/* ---- Low Power Features ---- */
struct fiji_bacos bacos;
struct fiji_ulv_parm ulv;
/* ---- CAC Stuff ---- */
uint32_t cac_table_start;
bool cac_configuration_required;
bool driver_calculate_cac_leakage;
bool cac_enabled;
/* ---- DPM2 Parameters ---- */
uint32_t power_containment_features;
bool enable_dte_feature;
bool enable_tdc_limit_feature;
bool enable_pkg_pwr_tracking_feature;
bool disable_uvd_power_tune_feature;
const struct fiji_pt_defaults *power_tune_defaults;
struct SMU73_Discrete_PmFuses power_tune_table;
uint32_t dte_tj_offset;
uint32_t fast_watermark_threshold;
/* ---- Phase Shedding ---- */
bool vddc_phase_shed_control;
/* ---- DI/DT ---- */
struct fiji_display_timing display_timing;
/* ---- Thermal Temperature Setting ---- */
struct fiji_dpmlevel_enable_mask dpm_level_enable_mask;
uint32_t need_update_smu7_dpm_table;
uint32_t sclk_dpm_key_disabled;
uint32_t mclk_dpm_key_disabled;
uint32_t pcie_dpm_key_disabled;
uint32_t min_engine_clocks;
struct fiji_pcie_perf_range pcie_gen_performance;
struct fiji_pcie_perf_range pcie_lane_performance;
struct fiji_pcie_perf_range pcie_gen_power_saving;
struct fiji_pcie_perf_range pcie_lane_power_saving;
bool use_pcie_performance_levels;
bool use_pcie_power_saving_levels;
uint32_t activity_target[SMU73_MAX_LEVELS_GRAPHICS];
uint32_t mclk_activity_target;
uint32_t mclk_dpm0_activity_target;
uint32_t low_sclk_interrupt_threshold;
uint32_t last_mclk_dpm_enable_mask;
bool uvd_enabled;
/* ---- Power Gating States ---- */
bool uvd_power_gated;
bool vce_power_gated;
bool samu_power_gated;
bool acp_power_gated;
bool pg_acp_init;
bool frtc_enabled;
bool frtc_status_changed;
};
/* To convert to Q8.8 format for firmware */
#define FIJI_Q88_FORMAT_CONVERSION_UNIT 256
enum Fiji_I2CLineID {
Fiji_I2CLineID_DDC1 = 0x90,
Fiji_I2CLineID_DDC2 = 0x91,
Fiji_I2CLineID_DDC3 = 0x92,
Fiji_I2CLineID_DDC4 = 0x93,
Fiji_I2CLineID_DDC5 = 0x94,
Fiji_I2CLineID_DDC6 = 0x95,
Fiji_I2CLineID_SCLSDA = 0x96,
Fiji_I2CLineID_DDCVGA = 0x97
};
#define Fiji_I2C_DDC1DATA 0
#define Fiji_I2C_DDC1CLK 1
#define Fiji_I2C_DDC2DATA 2
#define Fiji_I2C_DDC2CLK 3
#define Fiji_I2C_DDC3DATA 4
#define Fiji_I2C_DDC3CLK 5
#define Fiji_I2C_SDA 40
#define Fiji_I2C_SCL 41
#define Fiji_I2C_DDC4DATA 65
#define Fiji_I2C_DDC4CLK 66
#define Fiji_I2C_DDC5DATA 0x48
#define Fiji_I2C_DDC5CLK 0x49
#define Fiji_I2C_DDC6DATA 0x4a
#define Fiji_I2C_DDC6CLK 0x4b
#define Fiji_I2C_DDCVGADATA 0x4c
#define Fiji_I2C_DDCVGACLK 0x4d
#define FIJI_UNUSED_GPIO_PIN 0x7F
extern int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
extern int tonga_get_mc_microcode_version (struct pp_hwmgr *hwmgr);
extern int tonga_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr);
extern int tonga_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display);
int fiji_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input);
int fiji_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
int fiji_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate);
int fiji_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate);
int fiji_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
#endif /* _FIJI_HWMGR_H_ */

View File

@ -1,610 +0,0 @@
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "hwmgr.h"
#include "smumgr.h"
#include "fiji_hwmgr.h"
#include "fiji_powertune.h"
#include "fiji_smumgr.h"
#include "smu73_discrete.h"
#include "pp_debug.h"
#define VOLTAGE_SCALE 4
#define POWERTUNE_DEFAULT_SET_MAX 1
const struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
/*sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */
{1, 0xF, 0xFD,
/* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */
0x19, 5, 45}
};
void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
{
struct fiji_hwmgr *fiji_hwmgr = (struct fiji_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
uint32_t tmp = 0;
if(table_info &&
table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
table_info->cac_dtp_table->usPowerTuneDataSetID)
fiji_hwmgr->power_tune_defaults =
&fiji_power_tune_data_set_array
[table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
else
fiji_hwmgr->power_tune_defaults = &fiji_power_tune_data_set_array[0];
/* Assume disabled */
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_CAC);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SQRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DBRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TDRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TCPRamping);
fiji_hwmgr->dte_tj_offset = tmp;
if (!tmp) {
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_CAC);
fiji_hwmgr->fast_watermark_threshold = 100;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment)) {
tmp = 1;
fiji_hwmgr->enable_dte_feature = tmp ? false : true;
fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false;
fiji_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false;
}
}
}
/* PPGen has the gain setting generated in x * 100 unit
* This function is to convert the unit to x * 4096(0x1000) unit.
* This is the unit expected by SMC firmware
*/
static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
{
uint32_t tmp;
tmp = raw_setting * 4096 / 100;
return (uint16_t)tmp;
}
static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t* sda)
{
switch (line) {
case Fiji_I2CLineID_DDC1 :
*scl = Fiji_I2C_DDC1CLK;
*sda = Fiji_I2C_DDC1DATA;
break;
case Fiji_I2CLineID_DDC2 :
*scl = Fiji_I2C_DDC2CLK;
*sda = Fiji_I2C_DDC2DATA;
break;
case Fiji_I2CLineID_DDC3 :
*scl = Fiji_I2C_DDC3CLK;
*sda = Fiji_I2C_DDC3DATA;
break;
case Fiji_I2CLineID_DDC4 :
*scl = Fiji_I2C_DDC4CLK;
*sda = Fiji_I2C_DDC4DATA;
break;
case Fiji_I2CLineID_DDC5 :
*scl = Fiji_I2C_DDC5CLK;
*sda = Fiji_I2C_DDC5DATA;
break;
case Fiji_I2CLineID_DDC6 :
*scl = Fiji_I2C_DDC6CLK;
*sda = Fiji_I2C_DDC6DATA;
break;
case Fiji_I2CLineID_SCLSDA :
*scl = Fiji_I2C_SCL;
*sda = Fiji_I2C_SDA;
break;
case Fiji_I2CLineID_DDCVGA :
*scl = Fiji_I2C_DDCVGACLK;
*sda = Fiji_I2C_DDCVGADATA;
break;
default:
*scl = 0;
*sda = 0;
break;
}
}
int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
SMU73_Discrete_DpmTable *dpm_table = &(data->smc_state_table);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
struct pp_advance_fan_control_parameters *fan_table=
&hwmgr->thermal_controller.advanceFanControlParameters;
uint8_t uc_scl, uc_sda;
/* TDP number of fraction bits are changed from 8 to 7 for Fiji
* as requested by SMC team
*/
dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
(uint16_t)(cac_dtp_table->usTDP * 128));
dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
(uint16_t)(cac_dtp_table->usTDP * 128));
PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
"Target Operating Temp is out of Range!",);
dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
dpm_table->GpuTjHyst = 8;
dpm_table->DTEAmbientTempBase = defaults->DTEAmbientTempBase;
/* The following are for new Fiji Multi-input fan/thermal control */
dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
cac_dtp_table->usTargetOperatingTemp * 256);
dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
cac_dtp_table->usTemperatureLimitHotspot * 256);
dpm_table->TemperatureLimitLiquid1 = PP_HOST_TO_SMC_US(
cac_dtp_table->usTemperatureLimitLiquid1 * 256);
dpm_table->TemperatureLimitLiquid2 = PP_HOST_TO_SMC_US(
cac_dtp_table->usTemperatureLimitLiquid2 * 256);
dpm_table->TemperatureLimitVrVddc = PP_HOST_TO_SMC_US(
cac_dtp_table->usTemperatureLimitVrVddc * 256);
dpm_table->TemperatureLimitVrMvdd = PP_HOST_TO_SMC_US(
cac_dtp_table->usTemperatureLimitVrMvdd * 256);
dpm_table->TemperatureLimitPlx = PP_HOST_TO_SMC_US(
cac_dtp_table->usTemperatureLimitPlx * 256);
dpm_table->FanGainEdge = PP_HOST_TO_SMC_US(
scale_fan_gain_settings(fan_table->usFanGainEdge));
dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US(
scale_fan_gain_settings(fan_table->usFanGainHotspot));
dpm_table->FanGainLiquid = PP_HOST_TO_SMC_US(
scale_fan_gain_settings(fan_table->usFanGainLiquid));
dpm_table->FanGainVrVddc = PP_HOST_TO_SMC_US(
scale_fan_gain_settings(fan_table->usFanGainVrVddc));
dpm_table->FanGainVrMvdd = PP_HOST_TO_SMC_US(
scale_fan_gain_settings(fan_table->usFanGainVrMvdd));
dpm_table->FanGainPlx = PP_HOST_TO_SMC_US(
scale_fan_gain_settings(fan_table->usFanGainPlx));
dpm_table->FanGainHbm = PP_HOST_TO_SMC_US(
scale_fan_gain_settings(fan_table->usFanGainHbm));
dpm_table->Liquid1_I2C_address = cac_dtp_table->ucLiquid1_I2C_address;
dpm_table->Liquid2_I2C_address = cac_dtp_table->ucLiquid2_I2C_address;
dpm_table->Vr_I2C_address = cac_dtp_table->ucVr_I2C_address;
dpm_table->Plx_I2C_address = cac_dtp_table->ucPlx_I2C_address;
get_scl_sda_value(cac_dtp_table->ucLiquid_I2C_Line, &uc_scl, &uc_sda);
dpm_table->Liquid_I2C_LineSCL = uc_scl;
dpm_table->Liquid_I2C_LineSDA = uc_sda;
get_scl_sda_value(cac_dtp_table->ucVr_I2C_Line, &uc_scl, &uc_sda);
dpm_table->Vr_I2C_LineSCL = uc_scl;
dpm_table->Vr_I2C_LineSDA = uc_sda;
get_scl_sda_value(cac_dtp_table->ucPlx_I2C_Line, &uc_scl, &uc_sda);
dpm_table->Plx_I2C_LineSCL = uc_scl;
dpm_table->Plx_I2C_LineSDA = uc_sda;
return 0;
}
static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
data->power_tune_table.SviLoadLineTrimVddC = 3;
data->power_tune_table.SviLoadLineOffsetVddC = 0;
return 0;
}
static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr)
{
uint16_t tdc_limit;
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
/* TDC number of fraction bits are changed from 8 to 7
* for Fiji as requested by SMC team
*/
tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
data->power_tune_table.TDC_VDDC_PkgLimit =
CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
return 0;
}
static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
uint32_t temp;
if (fiji_read_smc_sram_dword(hwmgr->smumgr,
fuse_table_offset +
offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl),
(uint32_t *)&temp, data->sram_end))
PP_ASSERT_WITH_CODE(false,
"Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
return -EINVAL);
else {
data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
data->power_tune_table.LPMLTemperatureMin =
(uint8_t)((temp >> 16) & 0xff);
data->power_tune_table.LPMLTemperatureMax =
(uint8_t)((temp >> 8) & 0xff);
data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
}
return 0;
}
static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
{
int i;
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
/* Currently not used. Set all to zero. */
for (i = 0; i < 16; i++)
data->power_tune_table.LPMLTemperatureScaler[i] = 0;
return 0;
}
static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
if( (hwmgr->thermal_controller.advanceFanControlParameters.
usFanOutputSensitivity & (1 << 15)) ||
0 == hwmgr->thermal_controller.advanceFanControlParameters.
usFanOutputSensitivity )
hwmgr->thermal_controller.advanceFanControlParameters.
usFanOutputSensitivity = hwmgr->thermal_controller.
advanceFanControlParameters.usDefaultFanOutputSensitivity;
data->power_tune_table.FuzzyFan_PwmSetDelta =
PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
advanceFanControlParameters.usFanOutputSensitivity);
return 0;
}
static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
{
int i;
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
/* Currently not used. Set all to zero. */
for (i = 0; i < 16; i++)
data->power_tune_table.GnbLPML[i] = 0;
return 0;
}
static int fiji_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
{
/* int i, min, max;
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
uint8_t * pHiVID = data->power_tune_table.BapmVddCVidHiSidd;
uint8_t * pLoVID = data->power_tune_table.BapmVddCVidLoSidd;
min = max = pHiVID[0];
for (i = 0; i < 8; i++) {
if (0 != pHiVID[i]) {
if (min > pHiVID[i])
min = pHiVID[i];
if (max < pHiVID[i])
max = pHiVID[i];
}
if (0 != pLoVID[i]) {
if (min > pLoVID[i])
min = pLoVID[i];
if (max < pLoVID[i])
max = pLoVID[i];
}
}
PP_ASSERT_WITH_CODE((0 != min) && (0 != max), "BapmVddcVidSidd table does not exist!", return int_Failed);
data->power_tune_table.GnbLPMLMaxVid = (uint8_t)max;
data->power_tune_table.GnbLPMLMinVid = (uint8_t)min;
*/
return 0;
}
static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
uint16_t HiSidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd;
uint16_t LoSidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd;
struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
data->power_tune_table.BapmVddCBaseLeakageHiSidd =
CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
data->power_tune_table.BapmVddCBaseLeakageLoSidd =
CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
return 0;
}
int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
uint32_t pm_fuse_table_offset;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment)) {
if (fiji_read_smc_sram_dword(hwmgr->smumgr,
SMU7_FIRMWARE_HEADER_LOCATION +
offsetof(SMU73_Firmware_Header, PmFuseTable),
&pm_fuse_table_offset, data->sram_end))
PP_ASSERT_WITH_CODE(false,
"Attempt to get pm_fuse_table_offset Failed!",
return -EINVAL);
/* DW6 */
if (fiji_populate_svi_load_line(hwmgr))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate SviLoadLine Failed!",
return -EINVAL);
/* DW7 */
if (fiji_populate_tdc_limit(hwmgr))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate TDCLimit Failed!", return -EINVAL);
/* DW8 */
if (fiji_populate_dw8(hwmgr, pm_fuse_table_offset))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate TdcWaterfallCtl, "
"LPMLTemperature Min and Max Failed!",
return -EINVAL);
/* DW9-DW12 */
if (0 != fiji_populate_temperature_scaler(hwmgr))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate LPMLTemperatureScaler Failed!",
return -EINVAL);
/* DW13-DW14 */
if(fiji_populate_fuzzy_fan(hwmgr))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate Fuzzy Fan Control parameters Failed!",
return -EINVAL);
/* DW15-DW18 */
if (fiji_populate_gnb_lpml(hwmgr))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate GnbLPML Failed!",
return -EINVAL);
/* DW19 */
if (fiji_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate GnbLPML Min and Max Vid Failed!",
return -EINVAL);
/* DW20 */
if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate BapmVddCBaseLeakage Hi and Lo "
"Sidd Failed!", return -EINVAL);
if (fiji_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
(uint8_t *)&data->power_tune_table,
sizeof(struct SMU73_Discrete_PmFuses), data->sram_end))
PP_ASSERT_WITH_CODE(false,
"Attempt to download PmFuseTable Failed!",
return -EINVAL);
}
return 0;
}
int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
int result = 0;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_CAC)) {
int smc_result;
smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
(uint16_t)(PPSMC_MSG_EnableCac));
PP_ASSERT_WITH_CODE((0 == smc_result),
"Failed to enable CAC in SMC.", result = -1);
data->cac_enabled = (0 == smc_result) ? true : false;
}
return result;
}
int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
int result = 0;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_CAC) && data->cac_enabled) {
int smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
(uint16_t)(PPSMC_MSG_DisableCac));
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to disable CAC in SMC.", result = -1);
data->cac_enabled = false;
}
return result;
}
int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
if(data->power_containment_features &
POWERCONTAINMENT_FEATURE_PkgPwrLimit)
return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_PkgPwrSetLimit, n);
return 0;
}
static int fiji_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
{
return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr,
PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
}
int fiji_enable_power_containment(struct pp_hwmgr *hwmgr)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
int smc_result;
int result = 0;
data->power_containment_features = 0;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment)) {
if (data->enable_dte_feature) {
smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
(uint16_t)(PPSMC_MSG_EnableDTE));
PP_ASSERT_WITH_CODE((0 == smc_result),
"Failed to enable DTE in SMC.", result = -1;);
if (0 == smc_result)
data->power_containment_features |= POWERCONTAINMENT_FEATURE_DTE;
}
if (data->enable_tdc_limit_feature) {
smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
(uint16_t)(PPSMC_MSG_TDCLimitEnable));
PP_ASSERT_WITH_CODE((0 == smc_result),
"Failed to enable TDCLimit in SMC.", result = -1;);
if (0 == smc_result)
data->power_containment_features |=
POWERCONTAINMENT_FEATURE_TDCLimit;
}
if (data->enable_pkg_pwr_tracking_feature) {
smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
(uint16_t)(PPSMC_MSG_PkgPwrLimitEnable));
PP_ASSERT_WITH_CODE((0 == smc_result),
"Failed to enable PkgPwrTracking in SMC.", result = -1;);
if (0 == smc_result) {
struct phm_cac_tdp_table *cac_table =
table_info->cac_dtp_table;
uint32_t default_limit =
(uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256);
data->power_containment_features |=
POWERCONTAINMENT_FEATURE_PkgPwrLimit;
if (fiji_set_power_limit(hwmgr, default_limit))
printk(KERN_ERR "Failed to set Default Power Limit in SMC!");
}
}
}
return result;
}
int fiji_disable_power_containment(struct pp_hwmgr *hwmgr)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
int result = 0;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment) &&
data->power_containment_features) {
int smc_result;
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_TDCLimit) {
smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
(uint16_t)(PPSMC_MSG_TDCLimitDisable));
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to disable TDCLimit in SMC.",
result = smc_result);
}
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_DTE) {
smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
(uint16_t)(PPSMC_MSG_DisableDTE));
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to disable DTE in SMC.",
result = smc_result);
}
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
(uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to disable PkgPwrTracking in SMC.",
result = smc_result);
}
data->power_containment_features = 0;
}
return result;
}
int fiji_power_control_set_level(struct pp_hwmgr *hwmgr)
{
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
int adjust_percent, target_tdp;
int result = 0;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment)) {
/* adjustment percentage has already been validated */
adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
hwmgr->platform_descriptor.TDPAdjustment :
(-1 * hwmgr->platform_descriptor.TDPAdjustment);
/* SMC requested that target_tdp to be 7 bit fraction in DPM table
* but message to be 8 bit fraction for messages
*/
target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
result = fiji_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
}
return result;
}

View File

@ -1,81 +0,0 @@
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef FIJI_POWERTUNE_H
#define FIJI_POWERTUNE_H
enum fiji_pt_config_reg_type {
FIJI_CONFIGREG_MMR = 0,
FIJI_CONFIGREG_SMC_IND,
FIJI_CONFIGREG_DIDT_IND,
FIJI_CONFIGREG_CACHE,
FIJI_CONFIGREG_MAX
};
/* PowerContainment Features */
#define POWERCONTAINMENT_FEATURE_DTE 0x00000001
#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xffffffc0
#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x6
#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xffffffc0
#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x6
#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xffffffc0
#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x6
#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK 0xe0000000
#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d
#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK 0xe0000000
#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d
#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK 0xe0000000
#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d
struct fiji_pt_config_reg {
uint32_t offset;
uint32_t mask;
uint32_t shift;
uint32_t value;
enum fiji_pt_config_reg_type type;
};
struct fiji_pt_defaults
{
uint8_t SviLoadLineEn;
uint8_t SviLoadLineVddC;
uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
uint8_t TDC_MAWt;
uint8_t TdcWaterfallCtl;
uint8_t DTEAmbientTempBase;
};
void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr);
int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr);
int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr);
int fiji_enable_power_containment(struct pp_hwmgr *hwmgr);
int fiji_disable_power_containment(struct pp_hwmgr *hwmgr);
int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
int fiji_power_control_set_level(struct pp_hwmgr *hwmgr);
#endif /* FIJI_POWERTUNE_H */

View File

@ -1,62 +0,0 @@
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef FIJI_THERMAL_H
#define FIJI_THERMAL_H
#include "hwmgr.h"
#define FIJI_THERMAL_HIGH_ALERT_MASK 0x1
#define FIJI_THERMAL_LOW_ALERT_MASK 0x2
#define FIJI_THERMAL_MINIMUM_TEMP_READING -256
#define FIJI_THERMAL_MAXIMUM_TEMP_READING 255
#define FIJI_THERMAL_MINIMUM_ALERT_TEMP 0
#define FIJI_THERMAL_MAXIMUM_ALERT_TEMP 255
#define FDO_PWM_MODE_STATIC 1
#define FDO_PWM_MODE_STATIC_RPM 5
extern int tf_fiji_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
extern int tf_fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
extern int tf_fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
extern int fiji_thermal_get_temperature(struct pp_hwmgr *hwmgr);
extern int fiji_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
extern int fiji_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
extern int fiji_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
extern int fiji_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
extern int fiji_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
extern int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
extern int fiji_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
extern int pp_fiji_thermal_initialize(struct pp_hwmgr *hwmgr);
extern int fiji_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
extern int fiji_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
extern int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
extern int fiji_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
extern uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr);
#endif

View File

@ -36,13 +36,13 @@
#include "amd_acpi.h" #include "amd_acpi.h"
extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr); extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr);
extern int tonga_hwmgr_init(struct pp_hwmgr *hwmgr);
extern int fiji_hwmgr_init(struct pp_hwmgr *hwmgr);
extern int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr);
extern int iceland_hwmgr_init(struct pp_hwmgr *hwmgr);
static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr);
static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr); static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr);
static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr); static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr);
static int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr);
static int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr);
static int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr);
uint8_t convert_to_vid(uint16_t vddc) uint8_t convert_to_vid(uint16_t vddc)
{ {
@ -79,21 +79,32 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
case AMDGPU_FAMILY_VI: case AMDGPU_FAMILY_VI:
switch (hwmgr->chip_id) { switch (hwmgr->chip_id) {
case CHIP_TOPAZ: case CHIP_TOPAZ:
iceland_hwmgr_init(hwmgr); topaz_set_asic_special_caps(hwmgr);
hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK |
PP_VBI_TIME_SUPPORT_MASK |
PP_ENABLE_GFX_CG_THRU_SMU);
hwmgr->pp_table_version = PP_TABLE_V0;
break; break;
case CHIP_TONGA: case CHIP_TONGA:
tonga_hwmgr_init(hwmgr); tonga_set_asic_special_caps(hwmgr);
hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK |
PP_VBI_TIME_SUPPORT_MASK);
break; break;
case CHIP_FIJI: case CHIP_FIJI:
fiji_hwmgr_init(hwmgr); fiji_set_asic_special_caps(hwmgr);
hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK |
PP_VBI_TIME_SUPPORT_MASK |
PP_ENABLE_GFX_CG_THRU_SMU);
break; break;
case CHIP_POLARIS11: case CHIP_POLARIS11:
case CHIP_POLARIS10: case CHIP_POLARIS10:
polaris10_hwmgr_init(hwmgr); polaris_set_asic_special_caps(hwmgr);
hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
break; break;
default: default:
return -EINVAL; return -EINVAL;
} }
smu7_hwmgr_init(hwmgr);
break; break;
default: default:
return -EINVAL; return -EINVAL;
@ -388,12 +399,9 @@ int phm_reset_single_dpm_table(void *table,
struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table; struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
PP_ASSERT_WITH_CODE(count <= max, dpm_table->count = count > max ? max : count;
"Fatal error, can not set up single DPM table entries to exceed max number!",
);
dpm_table->count = count; for (i = 0; i < dpm_table->count; i++)
for (i = 0; i < max; i++)
dpm_table->dpm_level[i].enabled = false; dpm_table->dpm_level[i].enabled = false;
return 0; return 0;
@ -713,3 +721,95 @@ int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
return ret; return ret;
} }
int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
{
/* power tune caps Assume disabled */
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SQRamping);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DBRamping);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TDRamping);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TCPRamping);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_CAC);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_RegulatorHot);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_AutomaticDCTransition);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TablelessHardwareInterface);
if (hwmgr->chip_id == CHIP_POLARIS11)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SPLLShutdownSupport);
return 0;
}
int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr)
{
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SQRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DBRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TDRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TCPRamping);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TablelessHardwareInterface);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_CAC);
return 0;
}
int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr)
{
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SQRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DBRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TDRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TCPRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_UVDPowerGating);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_VCEPowerGating);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TablelessHardwareInterface);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_CAC);
return 0;
}
int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr)
{
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SQRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DBRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TDRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TCPRamping);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TablelessHardwareInterface);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_CAC);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_EVV);
return 0;
}

View File

@ -1,119 +0,0 @@
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Author: Huang Rui <ray.huang@amd.com>
*
*/
#include "hwmgr.h"
#include "iceland_clockpowergating.h"
#include "ppsmc.h"
#include "iceland_hwmgr.h"
int iceland_phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
{
/* iceland does not have MM hardware block */
return 0;
}
static int iceland_phm_powerup_uvd(struct pp_hwmgr *hwmgr)
{
/* iceland does not have MM hardware block */
return 0;
}
static int iceland_phm_powerdown_vce(struct pp_hwmgr *hwmgr)
{
/* iceland does not have MM hardware block */
return 0;
}
static int iceland_phm_powerup_vce(struct pp_hwmgr *hwmgr)
{
/* iceland does not have MM hardware block */
return 0;
}
int iceland_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum
PHM_AsicBlock block, enum PHM_ClockGateSetting gating)
{
int ret = 0;
switch (block) {
case PHM_AsicBlock_UVD_MVC:
case PHM_AsicBlock_UVD:
case PHM_AsicBlock_UVD_HD:
case PHM_AsicBlock_UVD_SD:
if (gating == PHM_ClockGateSetting_StaticOff)
ret = iceland_phm_powerdown_uvd(hwmgr);
else
ret = iceland_phm_powerup_uvd(hwmgr);
break;
case PHM_AsicBlock_GFX:
default:
break;
}
return ret;
}
int iceland_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
{
struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
data->uvd_power_gated = false;
data->vce_power_gated = false;
iceland_phm_powerup_uvd(hwmgr);
iceland_phm_powerup_vce(hwmgr);
return 0;
}
int iceland_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
{
if (bgate) {
iceland_update_uvd_dpm(hwmgr, true);
iceland_phm_powerdown_uvd(hwmgr);
} else {
iceland_phm_powerup_uvd(hwmgr);
iceland_update_uvd_dpm(hwmgr, false);
}
return 0;
}
int iceland_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
{
if (bgate)
return iceland_phm_powerdown_vce(hwmgr);
else
return iceland_phm_powerup_vce(hwmgr);
return 0;
}
int iceland_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
const uint32_t *msg_id)
{
/* iceland does not have MM hardware block */
return 0;
}

View File

@ -1,38 +0,0 @@
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Author: Huang Rui <ray.huang@amd.com>
*
*/
#ifndef _ICELAND_CLOCK_POWER_GATING_H_
#define _ICELAND_CLOCK_POWER_GATING_H_
#include "iceland_hwmgr.h"
#include "pp_asicblocks.h"
extern int iceland_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating);
extern int iceland_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
extern int iceland_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
extern int iceland_phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
extern int iceland_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
extern int iceland_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, const uint32_t *msg_id);
#endif /* _ICELAND_CLOCK_POWER_GATING_H_ */

View File

@ -1,41 +0,0 @@
#ifndef ICELAND_DYN_DEFAULTS_H
#define ICELAND_DYN_DEFAULTS_H
enum ICELANDdpm_TrendDetection
{
ICELANDdpm_TrendDetection_AUTO,
ICELANDdpm_TrendDetection_UP,
ICELANDdpm_TrendDetection_DOWN
};
typedef enum ICELANDdpm_TrendDetection ICELANDdpm_TrendDetection;
#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102
#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT1 0x000400
#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080
#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200
#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680
#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033
#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033
#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000
#define PPICELAND_THERMALPROTECTCOUNTER_DFLT 0x200
#define PPICELAND_STATICSCREENTHRESHOLDUNIT_DFLT 0
#define PPICELAND_STATICSCREENTHRESHOLD_DFLT 0x00C8
#define PPICELAND_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200
#define PPICELAND_REFERENCEDIVIDER_DFLT 4
#define PPICELAND_ULVVOLTAGECHANGEDELAY_DFLT 1687
#define PPICELAND_CGULVPARAMETER_DFLT 0x00040035
#define PPICELAND_CGULVCONTROL_DFLT 0x00007450
#define PPICELAND_TARGETACTIVITY_DFLT 30
#define PPICELAND_MCLK_TARGETACTIVITY_DFLT 10
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,424 +0,0 @@
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Author: Huang Rui <ray.huang@amd.com>
*
*/
#ifndef ICELAND_HWMGR_H
#define ICELAND_HWMGR_H
#include "hwmgr.h"
#include "ppatomctrl.h"
#include "ppinterrupt.h"
#include "ppsmc.h"
#include "iceland_powertune.h"
#include "pp_endian.h"
#include "smu71_discrete.h"
#define ICELAND_MAX_HARDWARE_POWERLEVELS 2
#define ICELAND_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS 15
struct iceland_performance_level {
uint32_t memory_clock;
uint32_t engine_clock;
uint16_t pcie_gen;
uint16_t pcie_lane;
};
struct _phw_iceland_bacos {
uint32_t best_match;
uint32_t baco_flags;
struct iceland_performance_level performance_level;
};
typedef struct _phw_iceland_bacos phw_iceland_bacos;
struct _phw_iceland_uvd_clocks {
uint32_t VCLK;
uint32_t DCLK;
};
typedef struct _phw_iceland_uvd_clocks phw_iceland_uvd_clocks;
struct _phw_iceland_vce_clocks {
uint32_t EVCLK;
uint32_t ECCLK;
};
typedef struct _phw_iceland_vce_clocks phw_iceland_vce_clocks;
struct iceland_power_state {
uint32_t magic;
phw_iceland_uvd_clocks uvd_clocks;
phw_iceland_vce_clocks vce_clocks;
uint32_t sam_clk;
uint32_t acp_clk;
uint16_t performance_level_count;
bool dc_compatible;
uint32_t sclk_threshold;
struct iceland_performance_level performance_levels[ICELAND_MAX_HARDWARE_POWERLEVELS];
};
struct _phw_iceland_dpm_level {
bool enabled;
uint32_t value;
uint32_t param1;
};
typedef struct _phw_iceland_dpm_level phw_iceland_dpm_level;
#define ICELAND_MAX_DEEPSLEEP_DIVIDER_ID 5
#define MAX_REGULAR_DPM_NUMBER 8
#define ICELAND_MINIMUM_ENGINE_CLOCK 5000
struct iceland_single_dpm_table {
uint32_t count;
phw_iceland_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
};
struct iceland_dpm_table {
struct iceland_single_dpm_table sclk_table;
struct iceland_single_dpm_table mclk_table;
struct iceland_single_dpm_table pcie_speed_table;
struct iceland_single_dpm_table vddc_table;
struct iceland_single_dpm_table vdd_gfx_table;
struct iceland_single_dpm_table vdd_ci_table;
struct iceland_single_dpm_table mvdd_table;
};
typedef struct _phw_iceland_dpm_table phw_iceland_dpm_table;
struct _phw_iceland_clock_regisiters {
uint32_t vCG_SPLL_FUNC_CNTL;
uint32_t vCG_SPLL_FUNC_CNTL_2;
uint32_t vCG_SPLL_FUNC_CNTL_3;
uint32_t vCG_SPLL_FUNC_CNTL_4;
uint32_t vCG_SPLL_SPREAD_SPECTRUM;
uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
uint32_t vDLL_CNTL;
uint32_t vMCLK_PWRMGT_CNTL;
uint32_t vMPLL_AD_FUNC_CNTL;
uint32_t vMPLL_DQ_FUNC_CNTL;
uint32_t vMPLL_FUNC_CNTL;
uint32_t vMPLL_FUNC_CNTL_1;
uint32_t vMPLL_FUNC_CNTL_2;
uint32_t vMPLL_SS1;
uint32_t vMPLL_SS2;
};
typedef struct _phw_iceland_clock_regisiters phw_iceland_clock_registers;
struct _phw_iceland_voltage_smio_registers {
uint32_t vs0_vid_lower_smio_cntl;
};
typedef struct _phw_iceland_voltage_smio_registers phw_iceland_voltage_smio_registers;
struct _phw_iceland_mc_reg_entry {
uint32_t mclk_max;
uint32_t mc_data[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE];
};
typedef struct _phw_iceland_mc_reg_entry phw_iceland_mc_reg_entry;
struct _phw_iceland_mc_reg_table {
uint8_t last; /* number of registers*/
uint8_t num_entries; /* number of entries in mc_reg_table_entry used*/
uint16_t validflag; /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/
phw_iceland_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
SMU71_Discrete_MCRegisterAddress mc_reg_address[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE];
};
typedef struct _phw_iceland_mc_reg_table phw_iceland_mc_reg_table;
#define DISABLE_MC_LOADMICROCODE 1
#define DISABLE_MC_CFGPROGRAMMING 2
/*Ultra Low Voltage parameter structure */
struct phw_iceland_ulv_parm{
bool ulv_supported;
uint32_t ch_ulv_parameter;
uint32_t ulv_volt_change_delay;
struct iceland_performance_level ulv_power_level;
};
#define ICELAND_MAX_LEAKAGE_COUNT 8
struct phw_iceland_leakage_voltage {
uint16_t count;
uint16_t leakage_id[ICELAND_MAX_LEAKAGE_COUNT];
uint16_t actual_voltage[ICELAND_MAX_LEAKAGE_COUNT];
};
struct _phw_iceland_display_timing {
uint32_t min_clock_insr;
uint32_t num_existing_displays;
};
typedef struct _phw_iceland_display_timing phw_iceland_display_timing;
struct phw_iceland_thermal_temperature_setting
{
long temperature_low;
long temperature_high;
long temperature_shutdown;
};
struct _phw_iceland_dpmlevel_enable_mask {
uint32_t uvd_dpm_enable_mask;
uint32_t vce_dpm_enable_mask;
uint32_t acp_dpm_enable_mask;
uint32_t samu_dpm_enable_mask;
uint32_t sclk_dpm_enable_mask;
uint32_t mclk_dpm_enable_mask;
uint32_t pcie_dpm_enable_mask;
};
typedef struct _phw_iceland_dpmlevel_enable_mask phw_iceland_dpmlevel_enable_mask;
struct _phw_iceland_pcie_perf_range {
uint16_t max;
uint16_t min;
};
typedef struct _phw_iceland_pcie_perf_range phw_iceland_pcie_perf_range;
struct _phw_iceland_vbios_boot_state {
uint16_t mvdd_bootup_value;
uint16_t vddc_bootup_value;
uint16_t vddci_bootup_value;
uint16_t vddgfx_bootup_value;
uint32_t sclk_bootup_value;
uint32_t mclk_bootup_value;
uint16_t pcie_gen_bootup_value;
uint16_t pcie_lane_bootup_value;
};
typedef struct _phw_iceland_vbios_boot_state phw_iceland_vbios_boot_state;
#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
#define DPMTABLE_UPDATE_SCLK 0x00000004
#define DPMTABLE_UPDATE_MCLK 0x00000008
/* We need to review which fields are needed. */
/* This is mostly a copy of the RV7xx/Evergreen structure which is close, but not identical to the N.Islands one. */
struct iceland_hwmgr {
struct iceland_dpm_table dpm_table;
struct iceland_dpm_table golden_dpm_table;
uint32_t voting_rights_clients0;
uint32_t voting_rights_clients1;
uint32_t voting_rights_clients2;
uint32_t voting_rights_clients3;
uint32_t voting_rights_clients4;
uint32_t voting_rights_clients5;
uint32_t voting_rights_clients6;
uint32_t voting_rights_clients7;
uint32_t static_screen_threshold_unit;
uint32_t static_screen_threshold;
uint32_t voltage_control;
uint32_t vdd_gfx_control;
uint32_t vddc_vddci_delta;
uint32_t vddc_vddgfx_delta;
struct pp_interrupt_registration_info internal_high_thermal_interrupt_info;
struct pp_interrupt_registration_info internal_low_thermal_interrupt_info;
struct pp_interrupt_registration_info smc_to_host_interrupt_info;
uint32_t active_auto_throttle_sources;
struct pp_interrupt_registration_info external_throttle_interrupt;
irq_handler_func_t external_throttle_callback;
void *external_throttle_context;
struct pp_interrupt_registration_info ctf_interrupt_info;
irq_handler_func_t ctf_callback;
void *ctf_context;
phw_iceland_clock_registers clock_registers;
phw_iceland_voltage_smio_registers voltage_smio_registers;
bool is_memory_GDDR5;
uint16_t acpi_vddc;
bool pspp_notify_required; /* Flag to indicate if PSPP notification to SBIOS is required */
uint16_t force_pcie_gen; /* The forced PCI-E speed if not 0xffff */
uint16_t acpi_pcie_gen; /* The PCI-E speed at ACPI time */
uint32_t pcie_gen_cap; /* The PCI-E speed capabilities bitmap from CAIL */
uint32_t pcie_lane_cap; /* The PCI-E lane capabilities bitmap from CAIL */
uint32_t pcie_spc_cap; /* Symbol Per Clock Capabilities from registry */
struct phw_iceland_leakage_voltage vddc_leakage; /* The Leakage VDDC supported (based on leakage ID).*/
struct phw_iceland_leakage_voltage vddcgfx_leakage; /* The Leakage VDDC supported (based on leakage ID). */
struct phw_iceland_leakage_voltage vddci_leakage; /* The Leakage VDDCI supported (based on leakage ID). */
uint32_t mvdd_control;
uint32_t vddc_mask_low;
uint32_t mvdd_mask_low;
uint16_t max_vddc_in_pp_table; /* the maximum VDDC value in the powerplay table*/
uint16_t min_vddc_in_pp_table;
uint16_t max_vddci_in_pp_table; /* the maximum VDDCI value in the powerplay table */
uint16_t min_vddci_in_pp_table;
uint32_t mclk_strobe_mode_threshold;
uint32_t mclk_stutter_mode_threshold;
uint32_t mclk_edc_enable_threshold;
uint32_t mclk_edc_wr_enable_threshold;
bool is_uvd_enabled;
bool is_xdma_enabled;
phw_iceland_vbios_boot_state vbios_boot_state;
bool battery_state;
bool is_tlu_enabled;
bool pcie_performance_request;
/* -------------- SMC SRAM Address of firmware header tables ----------------*/
uint32_t sram_end; /* The first address after the SMC SRAM. */
uint32_t dpm_table_start; /* The start of the dpm table in the SMC SRAM. */
uint32_t soft_regs_start; /* The start of the soft registers in the SMC SRAM. */
uint32_t mc_reg_table_start; /* The start of the mc register table in the SMC SRAM. */
uint32_t fan_table_start; /* The start of the fan table in the SMC SRAM. */
uint32_t arb_table_start; /* The start of the ARB setting table in the SMC SRAM. */
uint32_t ulv_settings_start;
SMU71_Discrete_DpmTable smc_state_table; /* The carbon copy of the SMC state table. */
SMU71_Discrete_MCRegisters mc_reg_table;
SMU71_Discrete_Ulv ulv_setting; /* The carbon copy of ULV setting. */
/* -------------- Stuff originally coming from Evergreen --------------------*/
phw_iceland_mc_reg_table iceland_mc_reg_table;
uint32_t vdd_ci_control;
pp_atomctrl_voltage_table vddc_voltage_table;
pp_atomctrl_voltage_table vddci_voltage_table;
pp_atomctrl_voltage_table vddgfx_voltage_table;
pp_atomctrl_voltage_table mvdd_voltage_table;
uint32_t mgcg_cgtt_local2;
uint32_t mgcg_cgtt_local3;
uint32_t gpio_debug;
uint32_t mc_micro_code_feature;
uint32_t highest_mclk;
uint16_t acpi_vdd_ci;
uint8_t mvdd_high_index;
uint8_t mvdd_low_index;
bool dll_defaule_on;
bool performance_request_registered;
/* ----------------- Low Power Features ---------------------*/
phw_iceland_bacos bacos;
struct phw_iceland_ulv_parm ulv;
/* ----------------- CAC Stuff ---------------------*/
uint32_t cac_table_start;
bool cac_configuration_required; /* TRUE if PP_CACConfigurationRequired == 1 */
bool driver_calculate_cac_leakage; /* TRUE if PP_DriverCalculateCACLeakage == 1 */
bool cac_enabled;
/* ----------------- DPM2 Parameters ---------------------*/
uint32_t power_containment_features;
bool enable_bapm_feature;
bool enable_dte_feature;
bool enable_tdc_limit_feature;
bool enable_pkg_pwr_tracking_feature;
bool disable_uvd_power_tune_feature;
struct iceland_pt_defaults *power_tune_defaults;
SMU71_Discrete_PmFuses power_tune_table;
uint32_t ul_dte_tj_offset; /* Fudge factor in DPM table to correct HW DTE errors */
uint32_t fast_watermark_threshold; /* use fast watermark if clock is equal or above this. In percentage of the target high sclk. */
/* ----------------- Phase Shedding ---------------------*/
bool vddc_phase_shed_control;
/* --------------------- DI/DT --------------------------*/
phw_iceland_display_timing display_timing;
/* --------- ReadRegistry data for memory and engine clock margins ---- */
uint32_t engine_clock_data;
uint32_t memory_clock_data;
/* -------- Thermal Temperature Setting --------------*/
struct phw_iceland_thermal_temperature_setting thermal_temp_setting;
phw_iceland_dpmlevel_enable_mask dpm_level_enable_mask;
uint32_t need_update_smu7_dpm_table;
uint32_t sclk_dpm_key_disabled;
uint32_t mclk_dpm_key_disabled;
uint32_t pcie_dpm_key_disabled;
/* used to store the previous dal min sclock */
uint32_t min_engine_clocks;
phw_iceland_pcie_perf_range pcie_gen_performance;
phw_iceland_pcie_perf_range pcie_lane_performance;
phw_iceland_pcie_perf_range pcie_gen_power_saving;
phw_iceland_pcie_perf_range pcie_lane_power_saving;
bool use_pcie_performance_levels;
bool use_pcie_power_saving_levels;
/* percentage value from 0-100, default 50 */
uint32_t activity_target[SMU71_MAX_LEVELS_GRAPHICS];
uint32_t mclk_activity_target;
uint32_t low_sclk_interrupt_threshold;
uint32_t last_mclk_dpm_enable_mask;
bool uvd_enabled;
uint32_t pcc_monitor_enabled;
/* --------- Power Gating States ------------*/
bool uvd_power_gated; /* 1: gated, 0:not gated */
bool vce_power_gated; /* 1: gated, 0:not gated */
bool samu_power_gated; /* 1: gated, 0:not gated */
bool acp_power_gated; /* 1: gated, 0:not gated */
bool pg_acp_init;
/* soft pptable for re-uploading into smu */
void *soft_pp_table;
};
typedef struct iceland_hwmgr iceland_hwmgr;
int iceland_hwmgr_init(struct pp_hwmgr *hwmgr);
int iceland_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
uint32_t iceland_get_xclk(struct pp_hwmgr *hwmgr);
int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr);
int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr);
#define ICELAND_DPM2_NEAR_TDP_DEC 10
#define ICELAND_DPM2_ABOVE_SAFE_INC 5
#define ICELAND_DPM2_BELOW_SAFE_INC 20
/*
* Log2 of the LTA window size (l2numWin_TDP). Eg. If LTA windows size
* is 128, then this value should be Log2(128) = 7.
*/
#define ICELAND_DPM2_LTA_WINDOW_SIZE 7
#define ICELAND_DPM2_LTS_TRUNCATE 0
#define ICELAND_DPM2_TDP_SAFE_LIMIT_PERCENT 80 // Maximum 100
#define ICELAND_DPM2_MAXPS_PERCENT_H 90 // Maximum 0xFF
#define ICELAND_DPM2_MAXPS_PERCENT_M 90 // Maximum 0xFF
#define ICELAND_DPM2_PWREFFICIENCYRATIO_MARGIN 50
#define ICELAND_DPM2_SQ_RAMP_MAX_POWER 0x3FFF
#define ICELAND_DPM2_SQ_RAMP_MIN_POWER 0x12
#define ICELAND_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15
#define ICELAND_DPM2_SQ_RAMP_SHORT_TERM_INTERVAL_SIZE 0x1E
#define ICELAND_DPM2_SQ_RAMP_LONG_TERM_INTERVAL_RATIO 0xF
#define ICELAND_VOLTAGE_CONTROL_NONE 0x0
#define ICELAND_VOLTAGE_CONTROL_BY_GPIO 0x1
#define ICELAND_VOLTAGE_CONTROL_BY_SVID2 0x2
/* convert to Q8.8 format for firmware */
#define ICELAND_Q88_FORMAT_CONVERSION_UNIT 256
#define ICELAND_UNUSED_GPIO_PIN 0x7F
#endif

View File

@ -1,490 +0,0 @@
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Author: Huang Rui <ray.huang@amd.com>
*
*/
#include "amdgpu.h"
#include "hwmgr.h"
#include "smumgr.h"
#include "iceland_hwmgr.h"
#include "iceland_powertune.h"
#include "iceland_smumgr.h"
#include "smu71_discrete.h"
#include "smu71.h"
#include "pp_debug.h"
#include "cgs_common.h"
#include "pp_endian.h"
#include "bif/bif_5_0_d.h"
#include "bif/bif_5_0_sh_mask.h"
#define VOLTAGE_SCALE 4
#define POWERTUNE_DEFAULT_SET_MAX 1
#define DEVICE_ID_VI_ICELAND_M_6900 0x6900
#define DEVICE_ID_VI_ICELAND_M_6901 0x6901
#define DEVICE_ID_VI_ICELAND_M_6902 0x6902
#define DEVICE_ID_VI_ICELAND_M_6903 0x6903
struct iceland_pt_defaults defaults_iceland =
{
/*
* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc,
* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
*/
1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
{ 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
};
/* 35W - XT, XTL */
struct iceland_pt_defaults defaults_icelandxt =
{
/*
* sviLoadLIneEn, SviLoadLineVddC,
* TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
* TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
* BAPM_TEMP_GRADIENT
*/
1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
{ 0xA7, 0x0, 0x0, 0xB5, 0x0, 0x0, 0x9F, 0x0, 0x0, 0xD6, 0x0, 0x0, 0xD7, 0x0, 0x0},
{ 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
};
/* 25W - PRO, LE */
struct iceland_pt_defaults defaults_icelandpro =
{
/*
* sviLoadLIneEn, SviLoadLineVddC,
* TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
* TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
* BAPM_TEMP_GRADIENT
*/
1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
{ 0xB7, 0x0, 0x0, 0xC3, 0x0, 0x0, 0xB5, 0x0, 0x0, 0xEA, 0x0, 0x0, 0xE6, 0x0, 0x0},
{ 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
};
void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
{
struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
uint32_t tmp = 0;
struct cgs_system_info sys_info = {0};
uint32_t pdev_id;
sys_info.size = sizeof(struct cgs_system_info);
sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
cgs_query_system_info(hwmgr->device, &sys_info);
pdev_id = (uint32_t)sys_info.value;
switch (pdev_id) {
case DEVICE_ID_VI_ICELAND_M_6900:
case DEVICE_ID_VI_ICELAND_M_6903:
data->power_tune_defaults = &defaults_icelandxt;
break;
case DEVICE_ID_VI_ICELAND_M_6901:
case DEVICE_ID_VI_ICELAND_M_6902:
data->power_tune_defaults = &defaults_icelandpro;
break;
default:
/* TODO: need to assign valid defaults */
data->power_tune_defaults = &defaults_iceland;
pr_warning("Unknown V.I. Device ID.\n");
break;
}
/* Assume disabled */
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_CAC);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SQRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DBRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TDRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TCPRamping);
data->ul_dte_tj_offset = tmp;
if (!tmp) {
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_CAC);
data->fast_watermark_threshold = 100;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment)) {
tmp = 1;
data->enable_dte_feature = tmp ? false : true;
data->enable_tdc_limit_feature = tmp ? true : false;
data->enable_pkg_pwr_tracking_feature = tmp ? true : false;
}
}
}
int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
{
struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
struct iceland_pt_defaults *defaults = data->power_tune_defaults;
SMU71_Discrete_DpmTable *dpm_table = &(data->smc_state_table);
struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
uint16_t *def1, *def2;
int i, j, k;
/*
* TDP number of fraction bits are changed from 8 to 7 for Iceland
* as requested by SMC team
*/
dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
dpm_table->DTETjOffset = (uint8_t)data->ul_dte_tj_offset;
dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
dpm_table->GpuTjHyst = 8;
dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
/* The following are for new Iceland Multi-input fan/thermal control */
if(NULL != ppm) {
dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
} else {
dpm_table->PPM_PkgPwrLimit = 0;
dpm_table->PPM_TemperatureLimit = 0;
}
CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient);
def1 = defaults->bapmti_r;
def2 = defaults->bapmti_rc;
for (i = 0; i < SMU71_DTE_ITERATIONS; i++) {
for (j = 0; j < SMU71_DTE_SOURCES; j++) {
for (k = 0; k < SMU71_DTE_SINKS; k++) {
dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
def1++;
def2++;
}
}
}
return 0;
}
static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr)
{
struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
const struct iceland_pt_defaults *defaults = data->power_tune_defaults;
data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc;
data->power_tune_table.SviLoadLineTrimVddC = 3;
data->power_tune_table.SviLoadLineOffsetVddC = 0;
return 0;
}
static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr)
{
uint16_t tdc_limit;
struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
const struct iceland_pt_defaults *defaults = data->power_tune_defaults;
/* TDC number of fraction bits are changed from 8 to 7
* for Iceland as requested by SMC team
*/
tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
data->power_tune_table.TDC_VDDC_PkgLimit =
CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
defaults->tdc_vddc_throttle_release_limit_perc;
data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
return 0;
}
static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
{
struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
const struct iceland_pt_defaults *defaults = data->power_tune_defaults;
uint32_t temp;
if (iceland_read_smc_sram_dword(hwmgr->smumgr,
fuse_table_offset +
offsetof(SMU71_Discrete_PmFuses, TdcWaterfallCtl),
(uint32_t *)&temp, data->sram_end))
PP_ASSERT_WITH_CODE(false,
"Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
return -EINVAL);
else
data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
return 0;
}
static int iceland_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
{
return 0;
}
static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
{
int i;
struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
/* Currently not used. Set all to zero. */
for (i = 0; i < 8; i++)
data->power_tune_table.GnbLPML[i] = 0;
return 0;
}
static int iceland_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
{
return 0;
}
static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
{
struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
uint16_t HiSidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd;
uint16_t LoSidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd;
struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
data->power_tune_table.BapmVddCBaseLeakageHiSidd =
CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
data->power_tune_table.BapmVddCBaseLeakageLoSidd =
CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
return 0;
}
int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr)
{
struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
uint32_t pm_fuse_table_offset;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment)) {
if (iceland_read_smc_sram_dword(hwmgr->smumgr,
SMU71_FIRMWARE_HEADER_LOCATION +
offsetof(SMU71_Firmware_Header, PmFuseTable),
&pm_fuse_table_offset, data->sram_end))
PP_ASSERT_WITH_CODE(false,
"Attempt to get pm_fuse_table_offset Failed!",
return -EINVAL);
/* DW0 - DW3 */
if (iceland_populate_bapm_vddc_vid_sidd(hwmgr))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate bapm vddc vid Failed!",
return -EINVAL);
/* DW4 - DW5 */
if (iceland_populate_vddc_vid(hwmgr))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate vddc vid Failed!",
return -EINVAL);
/* DW6 */
if (iceland_populate_svi_load_line(hwmgr))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate SviLoadLine Failed!",
return -EINVAL);
/* DW7 */
if (iceland_populate_tdc_limit(hwmgr))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate TDCLimit Failed!", return -EINVAL);
/* DW8 */
if (iceland_populate_dw8(hwmgr, pm_fuse_table_offset))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate TdcWaterfallCtl, "
"LPMLTemperature Min and Max Failed!",
return -EINVAL);
/* DW9-DW12 */
if (0 != iceland_populate_temperature_scaler(hwmgr))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate LPMLTemperatureScaler Failed!",
return -EINVAL);
/* DW13-DW16 */
if (iceland_populate_gnb_lpml(hwmgr))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate GnbLPML Failed!",
return -EINVAL);
/* DW17 */
if (iceland_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate GnbLPML Min and Max Vid Failed!",
return -EINVAL);
/* DW18 */
if (iceland_populate_bapm_vddc_base_leakage_sidd(hwmgr))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!",
return -EINVAL);
if (iceland_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
(uint8_t *)&data->power_tune_table,
sizeof(struct SMU71_Discrete_PmFuses), data->sram_end))
PP_ASSERT_WITH_CODE(false,
"Attempt to download PmFuseTable Failed!",
return -EINVAL);
}
return 0;
}
int iceland_enable_smc_cac(struct pp_hwmgr *hwmgr)
{
struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
int result = 0;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_CAC)) {
int smc_result;
smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
(uint16_t)(PPSMC_MSG_EnableCac));
PP_ASSERT_WITH_CODE((0 == smc_result),
"Failed to enable CAC in SMC.", result = -1);
data->cac_enabled = (0 == smc_result) ? true : false;
}
return result;
}
static int iceland_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
{
struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
if(data->power_containment_features &
POWERCONTAINMENT_FEATURE_PkgPwrLimit)
return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_PkgPwrSetLimit, n);
return 0;
}
static int iceland_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
{
return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr,
PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
}
int iceland_enable_power_containment(struct pp_hwmgr *hwmgr)
{
struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
SMU71_Discrete_DpmTable *dpm_table = &data->smc_state_table;
int smc_result;
int result = 0;
uint32_t is_asic_kicker;
data->power_containment_features = 0;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment)) {
is_asic_kicker = cgs_read_register(hwmgr->device, mmCC_BIF_BX_STRAP2);
is_asic_kicker = (is_asic_kicker >> 12) & 0x01;
if (data->enable_bapm_feature &&
(!is_asic_kicker ||
phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DisableUsingActualTemperatureForPowerCalc))) {
smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
(uint16_t)(PPSMC_MSG_EnableDTE));
PP_ASSERT_WITH_CODE((0 == smc_result),
"Failed to enable BAPM in SMC.", result = -1;);
if (0 == smc_result)
data->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
}
if (is_asic_kicker && !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DisableUsingActualTemperatureForPowerCalc))
dpm_table->DTEMode = 2;
if (data->enable_tdc_limit_feature) {
smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
(uint16_t)(PPSMC_MSG_TDCLimitEnable));
PP_ASSERT_WITH_CODE((0 == smc_result),
"Failed to enable TDCLimit in SMC.", result = -1;);
if (0 == smc_result)
data->power_containment_features |=
POWERCONTAINMENT_FEATURE_TDCLimit;
}
if (data->enable_pkg_pwr_tracking_feature) {
smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
(uint16_t)(PPSMC_MSG_PkgPwrLimitEnable));
PP_ASSERT_WITH_CODE((0 == smc_result),
"Failed to enable PkgPwrTracking in SMC.", result = -1;);
if (0 == smc_result) {
struct phm_cac_tdp_table *cac_table =
hwmgr->dyn_state.cac_dtp_table;
uint32_t default_limit =
(uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256);
data->power_containment_features |=
POWERCONTAINMENT_FEATURE_PkgPwrLimit;
if (iceland_set_power_limit(hwmgr, default_limit))
printk(KERN_ERR "Failed to set Default Power Limit in SMC!");
}
}
}
return result;
}
int iceland_power_control_set_level(struct pp_hwmgr *hwmgr)
{
struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
int adjust_percent, target_tdp;
int result = 0;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment)) {
/* adjustment percentage has already been validated */
adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
hwmgr->platform_descriptor.TDPAdjustment :
(-1 * hwmgr->platform_descriptor.TDPAdjustment);
/*
* SMC requested that target_tdp to be 7 bit fraction in DPM table
* but message to be 8 bit fraction for messages
*/
target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
result = iceland_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
}
return result;
}

View File

@ -1,74 +0,0 @@
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Author: Huang Rui <ray.huang@amd.com>
*
*/
#ifndef ICELAND_POWERTUNE_H
#define ICELAND_POWERTUNE_H
#include "smu71.h"
enum iceland_pt_config_reg_type {
ICELAND_CONFIGREG_MMR = 0,
ICELAND_CONFIGREG_SMC_IND,
ICELAND_CONFIGREG_DIDT_IND,
ICELAND_CONFIGREG_CACHE,
ICELAND_CONFIGREG_MAX
};
/* PowerContainment Features */
#define POWERCONTAINMENT_FEATURE_DTE 0x00000001
#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
#define POWERCONTAINMENT_FEATURE_BAPM 0x00000001
struct iceland_pt_config_reg {
uint32_t offset;
uint32_t mask;
uint32_t shift;
uint32_t value;
enum iceland_pt_config_reg_type type;
};
struct iceland_pt_defaults
{
uint8_t svi_load_line_en;
uint8_t svi_load_line_vddc;
uint8_t tdc_vddc_throttle_release_limit_perc;
uint8_t tdc_mawt;
uint8_t tdc_waterfall_ctl;
uint8_t dte_ambient_temp_base;
uint32_t display_cac;
uint32_t bamp_temp_gradient;
uint16_t bapmti_r[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS];
uint16_t bapmti_rc[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS];
};
void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr);
int iceland_enable_smc_cac(struct pp_hwmgr *hwmgr);
int iceland_enable_power_containment(struct pp_hwmgr *hwmgr);
int iceland_power_control_set_level(struct pp_hwmgr *hwmgr);
#endif /* ICELAND_POWERTUNE_H */

View File

@ -1,595 +0,0 @@
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Author: Huang Rui <ray.huang@amd.com>
*
*/
#include <asm/div64.h>
#include "iceland_thermal.h"
#include "iceland_hwmgr.h"
#include "iceland_smumgr.h"
#include "atombios.h"
#include "ppsmc.h"
#include "gmc/gmc_8_1_d.h"
#include "gmc/gmc_8_1_sh_mask.h"
#include "bif/bif_5_0_d.h"
#include "bif/bif_5_0_sh_mask.h"
#include "smu/smu_7_1_1_d.h"
#include "smu/smu_7_1_1_sh_mask.h"
/**
* Get Fan Speed Control Parameters.
* @param hwmgr the address of the powerplay hardware manager.
* @param pSpeed is the address of the structure where the result is to be placed.
* @exception Always succeeds except if we cannot zero out the output structure.
*/
int iceland_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
struct phm_fan_speed_info *fan_speed_info)
{
if (hwmgr->thermal_controller.fanInfo.bNoFan)
return 0;
fan_speed_info->supports_percent_read = true;
fan_speed_info->supports_percent_write = true;
fan_speed_info->min_percent = 0;
fan_speed_info->max_percent = 100;
if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
fan_speed_info->supports_rpm_read = true;
fan_speed_info->supports_rpm_write = true;
fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM;
fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
} else {
fan_speed_info->min_rpm = 0;
fan_speed_info->max_rpm = 0;
}
return 0;
}
/**
* Get Fan Speed in percent.
* @param hwmgr the address of the powerplay hardware manager.
* @param pSpeed is the address of the structure where the result is to be placed.
* @exception Fails is the 100% setting appears to be 0.
*/
int iceland_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed)
{
uint32_t duty100;
uint32_t duty;
uint64_t tmp64;
if (hwmgr->thermal_controller.fanInfo.bNoFan)
return 0;
duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_STATUS, FDO_PWM_DUTY);
if (0 == duty100)
return -EINVAL;
tmp64 = (uint64_t)duty * 100;
do_div(tmp64, duty100);
*speed = (uint32_t)tmp64;
if (*speed > 100)
*speed = 100;
return 0;
}
/**
* Get Fan Speed in RPM.
* @param hwmgr the address of the powerplay hardware manager.
* @param speed is the address of the structure where the result is to be placed.
* @exception Returns not supported if no fan is found or if pulses per revolution are not set
*/
int iceland_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
{
return 0;
}
/**
* Set Fan Speed Control to static mode, so that the user can decide what speed to use.
* @param hwmgr the address of the powerplay hardware manager.
* mode the fan control mode, 0 default, 1 by percent, 5, by RPM
* @exception Should always succeed.
*/
int iceland_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
{
if (hwmgr->fan_ctrl_is_in_default_mode) {
hwmgr->fan_ctrl_default_mode = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE);
hwmgr->tmin = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN);
hwmgr->fan_ctrl_is_in_default_mode = false;
}
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, 0);
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, mode);
return 0;
}
/**
* Reset Fan Speed Control to default mode.
* @param hwmgr the address of the powerplay hardware manager.
* @exception Should always succeed.
*/
static int iceland_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
{
if (!hwmgr->fan_ctrl_is_in_default_mode) {
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode);
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, hwmgr->tmin);
hwmgr->fan_ctrl_is_in_default_mode = true;
}
return 0;
}
int iceland_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
{
return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl) == 0) ? 0 : -EINVAL;
}
int iceland_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
{
return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl) == 0) ? 0 : -EINVAL;
}
/**
* Set Fan Speed in percent.
* @param hwmgr the address of the powerplay hardware manager.
* @param speed is the percentage value (0% - 100%) to be set.
* @exception Fails is the 100% setting appears to be 0.
*/
int iceland_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed)
{
uint32_t duty100;
uint32_t duty;
uint64_t tmp64;
if (hwmgr->thermal_controller.fanInfo.bNoFan)
return -EINVAL;
if (speed > 100) {
pr_warning("Cannot set more than 100%% duty cycle. Set it to 100.\n");
speed = 100;
}
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
iceland_fan_ctrl_stop_smc_fan_control(hwmgr);
duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
if (0 == duty100)
return -EINVAL;
tmp64 = (uint64_t)speed * duty100;
do_div(tmp64, 100);
duty = (uint32_t)tmp64;
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL0, FDO_STATIC_DUTY, duty);
return iceland_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
}
/**
* Reset Fan Speed to default.
* @param hwmgr the address of the powerplay hardware manager.
* @exception Always succeeds.
*/
int iceland_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
{
int result;
if (hwmgr->thermal_controller.fanInfo.bNoFan)
return 0;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) {
result = iceland_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
if (0 == result)
result = iceland_fan_ctrl_start_smc_fan_control(hwmgr);
} else
result = iceland_fan_ctrl_set_default_mode(hwmgr);
return result;
}
/**
* Set Fan Speed in RPM.
* @param hwmgr the address of the powerplay hardware manager.
* @param speed is the percentage value (min - max) to be set.
* @exception Fails is the speed not lie between min and max.
*/
int iceland_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
{
return 0;
}
/**
* Reads the remote temperature from the SIslands thermal controller.
*
* @param hwmgr The address of the hardware manager.
*/
int iceland_thermal_get_temperature(struct pp_hwmgr *hwmgr)
{
int temp;
temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_STATUS, CTF_TEMP);
/*
* Bit 9 means the reading is lower than the lowest usable
* value.
*/
if (0 != (0x200 & temp))
temp = ICELAND_THERMAL_MAXIMUM_TEMP_READING;
else
temp = (temp & 0x1ff);
temp = temp * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
return temp;
}
/**
* Set the requested temperature range for high and low alert signals
*
* @param hwmgr The address of the hardware manager.
* @param range Temperature range to be programmed for high and low alert signals
* @exception PP_Result_BadInput if the input data is not valid.
*/
static int iceland_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, uint32_t low_temp, uint32_t high_temp)
{
uint32_t low = ICELAND_THERMAL_MINIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
uint32_t high = ICELAND_THERMAL_MAXIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
if (low < low_temp)
low = low_temp;
if (high > high_temp)
high = high_temp;
if (low > high)
return -EINVAL;
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, DIG_THERM_DPM, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
return 0;
}
/**
* Programs thermal controller one-time setting registers
*
* @param hwmgr The address of the hardware manager.
*/
static int iceland_thermal_initialize(struct pp_hwmgr *hwmgr)
{
if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_TACH_CTRL, EDGE_PER_REV,
hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution - 1);
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28);
return 0;
}
/**
* Enable thermal alerts on the RV770 thermal controller.
*
* @param hwmgr The address of the hardware manager.
*/
static int iceland_thermal_enable_alert(struct pp_hwmgr *hwmgr)
{
uint32_t alert;
alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK);
alert &= ~(ICELAND_THERMAL_HIGH_ALERT_MASK | ICELAND_THERMAL_LOW_ALERT_MASK);
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert);
/* send message to SMU to enable internal thermal interrupts */
return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable) == 0) ? 0 : -1;
}
/**
* Disable thermal alerts on the RV770 thermal controller.
* @param hwmgr The address of the hardware manager.
*/
static int iceland_thermal_disable_alert(struct pp_hwmgr *hwmgr)
{
uint32_t alert;
alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK);
alert |= (ICELAND_THERMAL_HIGH_ALERT_MASK | ICELAND_THERMAL_LOW_ALERT_MASK);
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert);
/* send message to SMU to disable internal thermal interrupts */
return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable) == 0) ? 0 : -1;
}
/**
* Uninitialize the thermal controller.
* Currently just disables alerts.
* @param hwmgr The address of the hardware manager.
*/
int iceland_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
{
int result = iceland_thermal_disable_alert(hwmgr);
if (result)
pr_warning("Failed to disable thermal alerts!\n");
if (hwmgr->thermal_controller.fanInfo.bNoFan)
iceland_fan_ctrl_set_default_mode(hwmgr);
return result;
}
/**
* Set up the fan table to control the fan using the SMC.
* @param hwmgr the address of the powerplay hardware manager.
* @param pInput the pointer to input data
* @param pOutput the pointer to output data
* @param pStorage the pointer to temporary storage
* @param Result the last failure code
* @return result from set temperature range routine
*/
int tf_iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
{
struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
SMU71_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
uint32_t duty100;
uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
uint16_t fdo_min, slope1, slope2;
uint32_t reference_clock;
int res;
uint64_t tmp64;
if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
return 0;
if (0 == data->fan_table_start) {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
return 0;
}
duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
if (0 == duty100) {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
return 0;
}
tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
do_div(tmp64, 10000);
fdo_min = (uint16_t)tmp64;
t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
fan_table.Slope1 = cpu_to_be16(slope1);
fan_table.Slope2 = cpu_to_be16(slope2);
fan_table.FdoMin = cpu_to_be16(fdo_min);
fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
fan_table.HystUp = cpu_to_be16(1);
fan_table.HystSlope = cpu_to_be16(1);
fan_table.TempRespLim = cpu_to_be16(5);
reference_clock = iceland_get_xclk(hwmgr);
fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
//fan_table.FanControl_GL_Flag = 1;
res = iceland_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), data->sram_end);
/* TO DO FOR SOME DEVICE ID 0X692b, send this msg return invalid command.
if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit != 0)
res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanMinPwm, \
hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit) ? 0 : -1);
if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit != 0)
res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanSclkTarget, \
hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit) ? 0 : -1);
if (0 != res)
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
*/
return 0;
}
/**
* Start the fan control on the SMC.
* @param hwmgr the address of the powerplay hardware manager.
* @param pInput the pointer to input data
* @param pOutput the pointer to output data
* @param pStorage the pointer to temporary storage
* @param Result the last failure code
* @return result from set temperature range routine
*/
int tf_iceland_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
{
/* If the fantable setup has failed we could have disabled PHM_PlatformCaps_MicrocodeFanControl even after this function was included in the table.
* Make sure that we still think controlling the fan is OK.
*/
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) {
iceland_fan_ctrl_start_smc_fan_control(hwmgr);
iceland_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
}
return 0;
}
/**
* Set temperature range for high and low alerts
* @param hwmgr the address of the powerplay hardware manager.
* @param pInput the pointer to input data
* @param pOutput the pointer to output data
* @param pStorage the pointer to temporary storage
* @param Result the last failure code
* @return result from set temperature range routine
*/
static int tf_iceland_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result)
{
struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
if (range == NULL)
return -EINVAL;
return iceland_thermal_set_temperature_range(hwmgr, range->min, range->max);
}
/**
* Programs one-time setting registers
* @param hwmgr the address of the powerplay hardware manager.
* @param pInput the pointer to input data
* @param pOutput the pointer to output data
* @param pStorage the pointer to temporary storage
* @param Result the last failure code
* @return result from initialize thermal controller routine
*/
static int tf_iceland_thermal_initialize(struct pp_hwmgr *hwmgr, void *input,
void *output, void *storage, int result)
{
return iceland_thermal_initialize(hwmgr);
}
/**
* Enable high and low alerts
* @param hwmgr the address of the powerplay hardware manager.
* @param pInput the pointer to input data
* @param pOutput the pointer to output data
* @param pStorage the pointer to temporary storage
* @param Result the last failure code
* @return result from enable alert routine
*/
static int tf_iceland_thermal_enable_alert(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result)
{
return iceland_thermal_enable_alert(hwmgr);
}
/**
* Disable high and low alerts
* @param hwmgr the address of the powerplay hardware manager.
* @param pInput the pointer to input data
* @param pOutput the pointer to output data
* @param pStorage the pointer to temporary storage
* @param Result the last failure code
* @return result from disable alert routine
*/
static int tf_iceland_thermal_disable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
{
return iceland_thermal_disable_alert(hwmgr);
}
static const struct phm_master_table_item iceland_thermal_start_thermal_controller_master_list[] = {
{ NULL, tf_iceland_thermal_initialize },
{ NULL, tf_iceland_thermal_set_temperature_range },
{ NULL, tf_iceland_thermal_enable_alert },
/*
* We should restrict performance levels to low before we halt
* the SMC. On the other hand we are still in boot state when
* we do this so it would be pointless. If this assumption
* changes we have to revisit this table.
*/
{ NULL, tf_iceland_thermal_setup_fan_table},
{ NULL, tf_iceland_thermal_start_smc_fan_control},
{ NULL, NULL }
};
static const struct phm_master_table_header iceland_thermal_start_thermal_controller_master = {
0,
PHM_MasterTableFlag_None,
iceland_thermal_start_thermal_controller_master_list
};
static const struct phm_master_table_item iceland_thermal_set_temperature_range_master_list[] = {
{ NULL, tf_iceland_thermal_disable_alert},
{ NULL, tf_iceland_thermal_set_temperature_range},
{ NULL, tf_iceland_thermal_enable_alert},
{ NULL, NULL }
};
static const struct phm_master_table_header iceland_thermal_set_temperature_range_master = {
0,
PHM_MasterTableFlag_None,
iceland_thermal_set_temperature_range_master_list
};
int iceland_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
{
if (!hwmgr->thermal_controller.fanInfo.bNoFan)
iceland_fan_ctrl_set_default_mode(hwmgr);
return 0;
}
/**
* Initializes the thermal controller related functions in the Hardware Manager structure.
* @param hwmgr The address of the hardware manager.
* @exception Any error code from the low-level communication.
*/
int pp_iceland_thermal_initialize(struct pp_hwmgr *hwmgr)
{
int result;
result = phm_construct_table(hwmgr, &iceland_thermal_set_temperature_range_master, &(hwmgr->set_temperature_range));
if (0 == result) {
result = phm_construct_table(hwmgr,
&iceland_thermal_start_thermal_controller_master,
&(hwmgr->start_thermal_controller));
if (0 != result)
phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
}
if (0 == result)
hwmgr->fan_ctrl_is_in_default_mode = true;
return result;
}

View File

@ -1,58 +0,0 @@
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Author: Huang Rui <ray.huang@amd.com>
*
*/
#ifndef ICELAND_THERMAL_H
#define ICELAND_THERMAL_H
#include "hwmgr.h"
#define ICELAND_THERMAL_HIGH_ALERT_MASK 0x1
#define ICELAND_THERMAL_LOW_ALERT_MASK 0x2
#define ICELAND_THERMAL_MINIMUM_TEMP_READING -256
#define ICELAND_THERMAL_MAXIMUM_TEMP_READING 255
#define ICELAND_THERMAL_MINIMUM_ALERT_TEMP 0
#define ICELAND_THERMAL_MAXIMUM_ALERT_TEMP 255
#define FDO_PWM_MODE_STATIC 1
#define FDO_PWM_MODE_STATIC_RPM 5
extern int iceland_thermal_get_temperature(struct pp_hwmgr *hwmgr);
extern int iceland_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
extern int iceland_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
extern int iceland_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
extern int iceland_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
extern int iceland_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
extern int iceland_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
extern int pp_iceland_thermal_initialize(struct pp_hwmgr *hwmgr);
extern int iceland_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
extern int iceland_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
extern int iceland_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
extern int iceland_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
#endif

View File

@ -1,62 +0,0 @@
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef POLARIS10_DYN_DEFAULTS_H
#define POLARIS10_DYN_DEFAULTS_H
enum Polaris10dpm_TrendDetection {
Polaris10Adpm_TrendDetection_AUTO,
Polaris10Adpm_TrendDetection_UP,
Polaris10Adpm_TrendDetection_DOWN
};
typedef enum Polaris10dpm_TrendDetection Polaris10dpm_TrendDetection;
/* We need to fill in the default values */
#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102
#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT1 0x000400
#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080
#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200
#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680
#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033
#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033
#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000
#define PPPOLARIS10_THERMALPROTECTCOUNTER_DFLT 0x200
#define PPPOLARIS10_STATICSCREENTHRESHOLDUNIT_DFLT 0
#define PPPOLARIS10_STATICSCREENTHRESHOLD_DFLT 0x00C8
#define PPPOLARIS10_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200
#define PPPOLARIS10_REFERENCEDIVIDER_DFLT 4
#define PPPOLARIS10_ULVVOLTAGECHANGEDELAY_DFLT 1687
#define PPPOLARIS10_CGULVPARAMETER_DFLT 0x00040035
#define PPPOLARIS10_CGULVCONTROL_DFLT 0x00007450
#define PPPOLARIS10_TARGETACTIVITY_DFLT 50
#define PPPOLARIS10_MCLK_TARGETACTIVITY_DFLT 10
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,716 +0,0 @@
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <asm/div64.h>
#include "polaris10_thermal.h"
#include "polaris10_hwmgr.h"
#include "polaris10_smumgr.h"
#include "polaris10_ppsmc.h"
#include "smu/smu_7_1_3_d.h"
#include "smu/smu_7_1_3_sh_mask.h"
int polaris10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
struct phm_fan_speed_info *fan_speed_info)
{
if (hwmgr->thermal_controller.fanInfo.bNoFan)
return 0;
fan_speed_info->supports_percent_read = true;
fan_speed_info->supports_percent_write = true;
fan_speed_info->min_percent = 0;
fan_speed_info->max_percent = 100;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_FanSpeedInTableIsRPM) &&
hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
fan_speed_info->supports_rpm_read = true;
fan_speed_info->supports_rpm_write = true;
fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM;
fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
} else {
fan_speed_info->min_rpm = 0;
fan_speed_info->max_rpm = 0;
}
return 0;
}
int polaris10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
uint32_t *speed)
{
uint32_t duty100;
uint32_t duty;
uint64_t tmp64;
if (hwmgr->thermal_controller.fanInfo.bNoFan)
return 0;
duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL1, FMAX_DUTY100);
duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_THERMAL_STATUS, FDO_PWM_DUTY);
if (duty100 == 0)
return -EINVAL;
tmp64 = (uint64_t)duty * 100;
do_div(tmp64, duty100);
*speed = (uint32_t)tmp64;
if (*speed > 100)
*speed = 100;
return 0;
}
int polaris10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
{
uint32_t tach_period;
uint32_t crystal_clock_freq;
if (hwmgr->thermal_controller.fanInfo.bNoFan ||
(hwmgr->thermal_controller.fanInfo.
ucTachometerPulsesPerRevolution == 0))
return 0;
tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_TACH_STATUS, TACH_PERIOD);
if (tach_period == 0)
return -EINVAL;
crystal_clock_freq = tonga_get_xclk(hwmgr);
*speed = 60 * crystal_clock_freq * 10000 / tach_period;
return 0;
}
/**
* Set Fan Speed Control to static mode, so that the user can decide what speed to use.
* @param hwmgr the address of the powerplay hardware manager.
* mode the fan control mode, 0 default, 1 by percent, 5, by RPM
* @exception Should always succeed.
*/
int polaris10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
{
if (hwmgr->fan_ctrl_is_in_default_mode) {
hwmgr->fan_ctrl_default_mode =
PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL2, FDO_PWM_MODE);
hwmgr->tmin =
PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL2, TMIN);
hwmgr->fan_ctrl_is_in_default_mode = false;
}
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL2, TMIN, 0);
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL2, FDO_PWM_MODE, mode);
return 0;
}
/**
* Reset Fan Speed Control to default mode.
* @param hwmgr the address of the powerplay hardware manager.
* @exception Should always succeed.
*/
int polaris10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
{
if (!hwmgr->fan_ctrl_is_in_default_mode) {
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode);
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL2, TMIN, hwmgr->tmin);
hwmgr->fan_ctrl_is_in_default_mode = true;
}
return 0;
}
static int polaris10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
{
int result;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ODFuzzyFanControlSupport)) {
cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY);
result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_FanSpeedInTableIsRPM))
hwmgr->hwmgr_func->set_max_fan_rpm_output(hwmgr,
hwmgr->thermal_controller.
advanceFanControlParameters.usMaxFanRPM);
else
hwmgr->hwmgr_func->set_max_fan_pwm_output(hwmgr,
hwmgr->thermal_controller.
advanceFanControlParameters.usMaxFanPWM);
} else {
cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE);
result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
}
if (!result && hwmgr->thermal_controller.
advanceFanControlParameters.ucTargetTemperature)
result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SetFanTemperatureTarget,
hwmgr->thermal_controller.
advanceFanControlParameters.ucTargetTemperature);
return result;
}
int polaris10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
{
return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl);
}
/**
* Set Fan Speed in percent.
* @param hwmgr the address of the powerplay hardware manager.
* @param speed is the percentage value (0% - 100%) to be set.
* @exception Fails is the 100% setting appears to be 0.
*/
int polaris10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
uint32_t speed)
{
uint32_t duty100;
uint32_t duty;
uint64_t tmp64;
if (hwmgr->thermal_controller.fanInfo.bNoFan)
return 0;
if (speed > 100)
speed = 100;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl))
polaris10_fan_ctrl_stop_smc_fan_control(hwmgr);
duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL1, FMAX_DUTY100);
if (duty100 == 0)
return -EINVAL;
tmp64 = (uint64_t)speed * duty100;
do_div(tmp64, 100);
duty = (uint32_t)tmp64;
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL0, FDO_STATIC_DUTY, duty);
return polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
}
/**
* Reset Fan Speed to default.
* @param hwmgr the address of the powerplay hardware manager.
* @exception Always succeeds.
*/
int polaris10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
{
int result;
if (hwmgr->thermal_controller.fanInfo.bNoFan)
return 0;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl)) {
result = polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
if (!result)
result = polaris10_fan_ctrl_start_smc_fan_control(hwmgr);
} else
result = polaris10_fan_ctrl_set_default_mode(hwmgr);
return result;
}
/**
* Set Fan Speed in RPM.
* @param hwmgr the address of the powerplay hardware manager.
* @param speed is the percentage value (min - max) to be set.
* @exception Fails is the speed not lie between min and max.
*/
int polaris10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
{
uint32_t tach_period;
uint32_t crystal_clock_freq;
if (hwmgr->thermal_controller.fanInfo.bNoFan ||
(hwmgr->thermal_controller.fanInfo.
ucTachometerPulsesPerRevolution == 0) ||
(speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
(speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
return 0;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl))
polaris10_fan_ctrl_stop_smc_fan_control(hwmgr);
crystal_clock_freq = tonga_get_xclk(hwmgr);
tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_TACH_STATUS, TACH_PERIOD, tach_period);
return polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
}
/**
* Reads the remote temperature from the SIslands thermal controller.
*
* @param hwmgr The address of the hardware manager.
*/
int polaris10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
{
int temp;
temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_MULT_THERMAL_STATUS, CTF_TEMP);
/* Bit 9 means the reading is lower than the lowest usable value. */
if (temp & 0x200)
temp = POLARIS10_THERMAL_MAXIMUM_TEMP_READING;
else
temp = temp & 0x1ff;
temp *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
return temp;
}
/**
* Set the requested temperature range for high and low alert signals
*
* @param hwmgr The address of the hardware manager.
* @param range Temperature range to be programmed for high and low alert signals
* @exception PP_Result_BadInput if the input data is not valid.
*/
static int polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
uint32_t low_temp, uint32_t high_temp)
{
uint32_t low = POLARIS10_THERMAL_MINIMUM_ALERT_TEMP *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
uint32_t high = POLARIS10_THERMAL_MAXIMUM_ALERT_TEMP *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
if (low < low_temp)
low = low_temp;
if (high > high_temp)
high = high_temp;
if (low > high)
return -EINVAL;
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_THERMAL_INT, DIG_THERM_INTH,
(high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_THERMAL_INT, DIG_THERM_INTL,
(low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_THERMAL_CTRL, DIG_THERM_DPM,
(high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
return 0;
}
/**
* Programs thermal controller one-time setting registers
*
* @param hwmgr The address of the hardware manager.
*/
static int polaris10_thermal_initialize(struct pp_hwmgr *hwmgr)
{
if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_TACH_CTRL, EDGE_PER_REV,
hwmgr->thermal_controller.fanInfo.
ucTachometerPulsesPerRevolution - 1);
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28);
return 0;
}
/**
* Enable thermal alerts on the RV770 thermal controller.
*
* @param hwmgr The address of the hardware manager.
*/
static int polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr)
{
uint32_t alert;
alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_THERMAL_INT, THERM_INT_MASK);
alert &= ~(POLARIS10_THERMAL_HIGH_ALERT_MASK | POLARIS10_THERMAL_LOW_ALERT_MASK);
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_THERMAL_INT, THERM_INT_MASK, alert);
/* send message to SMU to enable internal thermal interrupts */
return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable);
}
/**
* Disable thermal alerts on the RV770 thermal controller.
* @param hwmgr The address of the hardware manager.
*/
static int polaris10_thermal_disable_alert(struct pp_hwmgr *hwmgr)
{
uint32_t alert;
alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_THERMAL_INT, THERM_INT_MASK);
alert |= (POLARIS10_THERMAL_HIGH_ALERT_MASK | POLARIS10_THERMAL_LOW_ALERT_MASK);
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_THERMAL_INT, THERM_INT_MASK, alert);
/* send message to SMU to disable internal thermal interrupts */
return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable);
}
/**
* Uninitialize the thermal controller.
* Currently just disables alerts.
* @param hwmgr The address of the hardware manager.
*/
int polaris10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
{
int result = polaris10_thermal_disable_alert(hwmgr);
if (!hwmgr->thermal_controller.fanInfo.bNoFan)
polaris10_fan_ctrl_set_default_mode(hwmgr);
return result;
}
/**
* Set up the fan table to control the fan using the SMC.
* @param hwmgr the address of the powerplay hardware manager.
* @param pInput the pointer to input data
* @param pOutput the pointer to output data
* @param pStorage the pointer to temporary storage
* @param Result the last failure code
* @return result from set temperature range routine
*/
static int tf_polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
uint32_t duty100;
uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
uint16_t fdo_min, slope1, slope2;
uint32_t reference_clock;
int res;
uint64_t tmp64;
if (data->fan_table_start == 0) {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl);
return 0;
}
duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL1, FMAX_DUTY100);
if (duty100 == 0) {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl);
return 0;
}
tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
usPWMMin * duty100;
do_div(tmp64, 10000);
fdo_min = (uint16_t)tmp64;
t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
fan_table.TempMin = cpu_to_be16((50 + hwmgr->
thermal_controller.advanceFanControlParameters.usTMin) / 100);
fan_table.TempMed = cpu_to_be16((50 + hwmgr->
thermal_controller.advanceFanControlParameters.usTMed) / 100);
fan_table.TempMax = cpu_to_be16((50 + hwmgr->
thermal_controller.advanceFanControlParameters.usTMax) / 100);
fan_table.Slope1 = cpu_to_be16(slope1);
fan_table.Slope2 = cpu_to_be16(slope2);
fan_table.FdoMin = cpu_to_be16(fdo_min);
fan_table.HystDown = cpu_to_be16(hwmgr->
thermal_controller.advanceFanControlParameters.ucTHyst);
fan_table.HystUp = cpu_to_be16(1);
fan_table.HystSlope = cpu_to_be16(1);
fan_table.TempRespLim = cpu_to_be16(5);
reference_clock = tonga_get_xclk(hwmgr);
fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
thermal_controller.advanceFanControlParameters.ulCycleDelay *
reference_clock) / 1600);
fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
hwmgr->device, CGS_IND_REG__SMC,
CG_MULT_THERMAL_CTRL, TEMP_SEL);
res = polaris10_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start,
(uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
data->sram_end);
if (!res && hwmgr->thermal_controller.
advanceFanControlParameters.ucMinimumPWMLimit)
res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SetFanMinPwm,
hwmgr->thermal_controller.
advanceFanControlParameters.ucMinimumPWMLimit);
if (!res && hwmgr->thermal_controller.
advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SetFanSclkTarget,
hwmgr->thermal_controller.
advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
if (res)
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl);
return 0;
}
/**
* Start the fan control on the SMC.
* @param hwmgr the address of the powerplay hardware manager.
* @param pInput the pointer to input data
* @param pOutput the pointer to output data
* @param pStorage the pointer to temporary storage
* @param Result the last failure code
* @return result from set temperature range routine
*/
static int tf_polaris10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result)
{
/* If the fantable setup has failed we could have disabled
* PHM_PlatformCaps_MicrocodeFanControl even after
* this function was included in the table.
* Make sure that we still think controlling the fan is OK.
*/
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl)) {
polaris10_fan_ctrl_start_smc_fan_control(hwmgr);
polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
}
return 0;
}
/**
* Set temperature range for high and low alerts
* @param hwmgr the address of the powerplay hardware manager.
* @param pInput the pointer to input data
* @param pOutput the pointer to output data
* @param pStorage the pointer to temporary storage
* @param Result the last failure code
* @return result from set temperature range routine
*/
int tf_polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result)
{
struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
if (range == NULL)
return -EINVAL;
return polaris10_thermal_set_temperature_range(hwmgr, range->min, range->max);
}
/**
* Programs one-time setting registers
* @param hwmgr the address of the powerplay hardware manager.
* @param pInput the pointer to input data
* @param pOutput the pointer to output data
* @param pStorage the pointer to temporary storage
* @param Result the last failure code
* @return result from initialize thermal controller routine
*/
int tf_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result)
{
return polaris10_thermal_initialize(hwmgr);
}
/**
* Enable high and low alerts
* @param hwmgr the address of the powerplay hardware manager.
* @param pInput the pointer to input data
* @param pOutput the pointer to output data
* @param pStorage the pointer to temporary storage
* @param Result the last failure code
* @return result from enable alert routine
*/
int tf_polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result)
{
return polaris10_thermal_enable_alert(hwmgr);
}
/**
* Disable high and low alerts
* @param hwmgr the address of the powerplay hardware manager.
* @param pInput the pointer to input data
* @param pOutput the pointer to output data
* @param pStorage the pointer to temporary storage
* @param Result the last failure code
* @return result from disable alert routine
*/
static int tf_polaris10_thermal_disable_alert(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result)
{
return polaris10_thermal_disable_alert(hwmgr);
}
static int tf_polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result)
{
int ret;
struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
return 0;
ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ?
0 : -1;
if (!ret)
/* If this param is not changed, this function could fire unnecessarily */
smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
return ret;
}
static const struct phm_master_table_item
polaris10_thermal_start_thermal_controller_master_list[] = {
{NULL, tf_polaris10_thermal_initialize},
{NULL, tf_polaris10_thermal_set_temperature_range},
{NULL, tf_polaris10_thermal_enable_alert},
{NULL, tf_polaris10_thermal_avfs_enable},
/* We should restrict performance levels to low before we halt the SMC.
* On the other hand we are still in boot state when we do this
* so it would be pointless.
* If this assumption changes we have to revisit this table.
*/
{NULL, tf_polaris10_thermal_setup_fan_table},
{NULL, tf_polaris10_thermal_start_smc_fan_control},
{NULL, NULL}
};
static const struct phm_master_table_header
polaris10_thermal_start_thermal_controller_master = {
0,
PHM_MasterTableFlag_None,
polaris10_thermal_start_thermal_controller_master_list
};
static const struct phm_master_table_item
polaris10_thermal_set_temperature_range_master_list[] = {
{NULL, tf_polaris10_thermal_disable_alert},
{NULL, tf_polaris10_thermal_set_temperature_range},
{NULL, tf_polaris10_thermal_enable_alert},
{NULL, NULL}
};
static const struct phm_master_table_header
polaris10_thermal_set_temperature_range_master = {
0,
PHM_MasterTableFlag_None,
polaris10_thermal_set_temperature_range_master_list
};
int polaris10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
{
if (!hwmgr->thermal_controller.fanInfo.bNoFan)
polaris10_fan_ctrl_set_default_mode(hwmgr);
return 0;
}
/**
* Initializes the thermal controller related functions in the Hardware Manager structure.
* @param hwmgr The address of the hardware manager.
* @exception Any error code from the low-level communication.
*/
int pp_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr)
{
int result;
result = phm_construct_table(hwmgr,
&polaris10_thermal_set_temperature_range_master,
&(hwmgr->set_temperature_range));
if (!result) {
result = phm_construct_table(hwmgr,
&polaris10_thermal_start_thermal_controller_master,
&(hwmgr->start_thermal_controller));
if (result)
phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
}
if (!result)
hwmgr->fan_ctrl_is_in_default_mode = true;
return result;
}

View File

@ -1,62 +0,0 @@
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef _POLARIS10_THERMAL_H_
#define _POLARIS10_THERMAL_H_
#include "hwmgr.h"
#define POLARIS10_THERMAL_HIGH_ALERT_MASK 0x1
#define POLARIS10_THERMAL_LOW_ALERT_MASK 0x2
#define POLARIS10_THERMAL_MINIMUM_TEMP_READING -256
#define POLARIS10_THERMAL_MAXIMUM_TEMP_READING 255
#define POLARIS10_THERMAL_MINIMUM_ALERT_TEMP 0
#define POLARIS10_THERMAL_MAXIMUM_ALERT_TEMP 255
#define FDO_PWM_MODE_STATIC 1
#define FDO_PWM_MODE_STATIC_RPM 5
extern int tf_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
extern int tf_polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
extern int tf_polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
extern int polaris10_thermal_get_temperature(struct pp_hwmgr *hwmgr);
extern int polaris10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
extern int polaris10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
extern int polaris10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
extern int polaris10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
extern int polaris10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
extern int polaris10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
extern int polaris10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
extern int pp_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr);
extern int polaris10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
extern int polaris10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
extern int polaris10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
extern int polaris10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
extern uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr);
#endif

View File

@ -21,9 +21,53 @@
* *
*/ */
#include "polaris10_clockpowergating.h" #include "smu7_hwmgr.h"
#include "smu7_clockpowergating.h"
#include "smu7_common.h"
int polaris10_phm_powerdown_uvd(struct pp_hwmgr *hwmgr) static int smu7_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
{
return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
PPSMC_MSG_UVDDPM_Enable :
PPSMC_MSG_UVDDPM_Disable);
}
static int smu7_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
{
return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
PPSMC_MSG_VCEDPM_Enable :
PPSMC_MSG_VCEDPM_Disable);
}
static int smu7_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
{
return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
PPSMC_MSG_SAMUDPM_Enable :
PPSMC_MSG_SAMUDPM_Disable);
}
static int smu7_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
{
if (!bgate)
smum_update_smc_table(hwmgr, SMU_UVD_TABLE);
return smu7_enable_disable_uvd_dpm(hwmgr, !bgate);
}
static int smu7_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate)
{
if (!bgate)
smum_update_smc_table(hwmgr, SMU_VCE_TABLE);
return smu7_enable_disable_vce_dpm(hwmgr, !bgate);
}
static int smu7_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
{
if (!bgate)
smum_update_smc_table(hwmgr, SMU_SAMU_TABLE);
return smu7_enable_disable_samu_dpm(hwmgr, !bgate);
}
int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
{ {
if (phm_cf_want_uvd_power_gating(hwmgr)) if (phm_cf_want_uvd_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr->smumgr, return smum_send_msg_to_smc(hwmgr->smumgr,
@ -31,7 +75,7 @@ int polaris10_phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
return 0; return 0;
} }
static int polaris10_phm_powerup_uvd(struct pp_hwmgr *hwmgr) int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
{ {
if (phm_cf_want_uvd_power_gating(hwmgr)) { if (phm_cf_want_uvd_power_gating(hwmgr)) {
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@ -47,7 +91,7 @@ static int polaris10_phm_powerup_uvd(struct pp_hwmgr *hwmgr)
return 0; return 0;
} }
static int polaris10_phm_powerdown_vce(struct pp_hwmgr *hwmgr) int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
{ {
if (phm_cf_want_vce_power_gating(hwmgr)) if (phm_cf_want_vce_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr->smumgr, return smum_send_msg_to_smc(hwmgr->smumgr,
@ -55,7 +99,7 @@ static int polaris10_phm_powerdown_vce(struct pp_hwmgr *hwmgr)
return 0; return 0;
} }
static int polaris10_phm_powerup_vce(struct pp_hwmgr *hwmgr) int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
{ {
if (phm_cf_want_vce_power_gating(hwmgr)) if (phm_cf_want_vce_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr->smumgr, return smum_send_msg_to_smc(hwmgr->smumgr,
@ -63,7 +107,7 @@ static int polaris10_phm_powerup_vce(struct pp_hwmgr *hwmgr)
return 0; return 0;
} }
static int polaris10_phm_powerdown_samu(struct pp_hwmgr *hwmgr) int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
{ {
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SamuPowerGating)) PHM_PlatformCaps_SamuPowerGating))
@ -72,7 +116,7 @@ static int polaris10_phm_powerdown_samu(struct pp_hwmgr *hwmgr)
return 0; return 0;
} }
static int polaris10_phm_powerup_samu(struct pp_hwmgr *hwmgr) int smu7_powerup_samu(struct pp_hwmgr *hwmgr)
{ {
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SamuPowerGating)) PHM_PlatformCaps_SamuPowerGating))
@ -81,27 +125,24 @@ static int polaris10_phm_powerup_samu(struct pp_hwmgr *hwmgr)
return 0; return 0;
} }
int polaris10_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr) int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
{ {
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
data->uvd_power_gated = false; data->uvd_power_gated = false;
data->vce_power_gated = false; data->vce_power_gated = false;
data->samu_power_gated = false; data->samu_power_gated = false;
polaris10_phm_powerup_uvd(hwmgr); smu7_powerup_uvd(hwmgr);
polaris10_phm_powerup_vce(hwmgr); smu7_powerup_vce(hwmgr);
polaris10_phm_powerup_samu(hwmgr); smu7_powerup_samu(hwmgr);
return 0; return 0;
} }
int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
{ {
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (data->uvd_power_gated == bgate)
return 0;
data->uvd_power_gated = bgate; data->uvd_power_gated = bgate;
@ -109,11 +150,11 @@ int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
cgs_set_clockgating_state(hwmgr->device, cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD, AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_GATE); AMD_CG_STATE_GATE);
polaris10_update_uvd_dpm(hwmgr, true); smu7_update_uvd_dpm(hwmgr, true);
polaris10_phm_powerdown_uvd(hwmgr); smu7_powerdown_uvd(hwmgr);
} else { } else {
polaris10_phm_powerup_uvd(hwmgr); smu7_powerup_uvd(hwmgr);
polaris10_update_uvd_dpm(hwmgr, false); smu7_update_uvd_dpm(hwmgr, false);
cgs_set_clockgating_state(hwmgr->device, cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD, AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_UNGATE); AMD_CG_STATE_UNGATE);
@ -122,9 +163,9 @@ int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
return 0; return 0;
} }
int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
{ {
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (data->vce_power_gated == bgate) if (data->vce_power_gated == bgate)
return 0; return 0;
@ -135,11 +176,11 @@ int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
cgs_set_clockgating_state(hwmgr->device, cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE, AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_GATE); AMD_CG_STATE_GATE);
polaris10_update_vce_dpm(hwmgr, true); smu7_update_vce_dpm(hwmgr, true);
polaris10_phm_powerdown_vce(hwmgr); smu7_powerdown_vce(hwmgr);
} else { } else {
polaris10_phm_powerup_vce(hwmgr); smu7_powerup_vce(hwmgr);
polaris10_update_vce_dpm(hwmgr, false); smu7_update_vce_dpm(hwmgr, false);
cgs_set_clockgating_state(hwmgr->device, cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE, AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_UNGATE); AMD_CG_STATE_UNGATE);
@ -147,9 +188,9 @@ int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
return 0; return 0;
} }
int polaris10_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate) int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
{ {
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (data->samu_power_gated == bgate) if (data->samu_power_gated == bgate)
return 0; return 0;
@ -157,22 +198,25 @@ int polaris10_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
data->samu_power_gated = bgate; data->samu_power_gated = bgate;
if (bgate) { if (bgate) {
polaris10_update_samu_dpm(hwmgr, true); smu7_update_samu_dpm(hwmgr, true);
polaris10_phm_powerdown_samu(hwmgr); smu7_powerdown_samu(hwmgr);
} else { } else {
polaris10_phm_powerup_samu(hwmgr); smu7_powerup_samu(hwmgr);
polaris10_update_samu_dpm(hwmgr, false); smu7_update_samu_dpm(hwmgr, false);
} }
return 0; return 0;
} }
int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
const uint32_t *msg_id) const uint32_t *msg_id)
{ {
PPSMC_Msg msg; PPSMC_Msg msg;
uint32_t value; uint32_t value;
if (!(hwmgr->feature_mask & PP_ENABLE_GFX_CG_THRU_SMU))
return 0;
switch ((*msg_id & PP_GROUP_MASK) >> PP_GROUP_SHIFT) { switch ((*msg_id & PP_GROUP_MASK) >> PP_GROUP_SHIFT) {
case PP_GROUP_GFX: case PP_GROUP_GFX:
switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) { switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
@ -185,7 +229,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter( if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value)) hwmgr->smumgr, msg, value))
return -1; return -EINVAL;
} }
if (PP_STATE_SUPPORT_LS & *msg_id) { if (PP_STATE_SUPPORT_LS & *msg_id) {
msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
@ -195,7 +239,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter( if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value)) hwmgr->smumgr, msg, value))
return -1; return -EINVAL;
} }
break; break;
@ -208,7 +252,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter( if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value)) hwmgr->smumgr, msg, value))
return -1; return -EINVAL;
} }
if (PP_STATE_SUPPORT_LS & *msg_id) { if (PP_STATE_SUPPORT_LS & *msg_id) {
@ -219,7 +263,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter( if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value)) hwmgr->smumgr, msg, value))
return -1; return -EINVAL;
} }
break; break;
@ -232,7 +276,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter( if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value)) hwmgr->smumgr, msg, value))
return -1; return -EINVAL;
} }
break; break;
@ -245,7 +289,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter( if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value)) hwmgr->smumgr, msg, value))
return -1; return -EINVAL;
} }
break; break;
@ -259,12 +303,12 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter( if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value)) hwmgr->smumgr, msg, value))
return -1; return -EINVAL;
} }
break; break;
default: default:
return -1; return -EINVAL;
} }
break; break;
@ -279,7 +323,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter( if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value)) hwmgr->smumgr, msg, value))
return -1; return -EINVAL;
} }
if (PP_STATE_SUPPORT_LS & *msg_id) { if (PP_STATE_SUPPORT_LS & *msg_id) {
msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ? msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
@ -289,7 +333,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter( if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value)) hwmgr->smumgr, msg, value))
return -1; return -EINVAL;
} }
break; break;
@ -302,7 +346,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter( if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value)) hwmgr->smumgr, msg, value))
return -1; return -EINVAL;
} }
if (PP_STATE_SUPPORT_LS & *msg_id) { if (PP_STATE_SUPPORT_LS & *msg_id) {
@ -313,7 +357,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter( if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value)) hwmgr->smumgr, msg, value))
return -1; return -EINVAL;
} }
break; break;
@ -326,7 +370,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter( if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value)) hwmgr->smumgr, msg, value))
return -1; return -EINVAL;
} }
if (PP_STATE_SUPPORT_LS & *msg_id) { if (PP_STATE_SUPPORT_LS & *msg_id) {
msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ? msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
@ -336,7 +380,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter( if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value)) hwmgr->smumgr, msg, value))
return -1; return -EINVAL;
} }
break; break;
@ -349,7 +393,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter( if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value)) hwmgr->smumgr, msg, value))
return -1; return -EINVAL;
} }
if (PP_STATE_SUPPORT_LS & *msg_id) { if (PP_STATE_SUPPORT_LS & *msg_id) {
@ -360,7 +404,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter( if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value)) hwmgr->smumgr, msg, value))
return -1; return -EINVAL;
} }
break; break;
@ -373,7 +417,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter( if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value)) hwmgr->smumgr, msg, value))
return -1; return -EINVAL;
} }
if (PP_STATE_SUPPORT_LS & *msg_id) { if (PP_STATE_SUPPORT_LS & *msg_id) {
@ -384,7 +428,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter( if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value)) hwmgr->smumgr, msg, value))
return -1; return -EINVAL;
} }
break; break;
@ -397,18 +441,18 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter( if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value)) hwmgr->smumgr, msg, value))
return -1; return -EINVAL;
} }
break; break;
default: default:
return -1; return -EINVAL;
} }
break; break;
default: default:
return -1; return -EINVAL;
} }
@ -419,7 +463,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
* Powerplay will only control the static per CU Power Gating. * Powerplay will only control the static per CU Power Gating.
* Dynamic per CU Power Gating will be done in gfx. * Dynamic per CU Power Gating will be done in gfx.
*/ */
int polaris10_phm_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable) int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable)
{ {
struct cgs_system_info sys_info = {0}; struct cgs_system_info sys_info = {0};
uint32_t active_cus; uint32_t active_cus;
@ -432,8 +476,8 @@ int polaris10_phm_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable
if (result) if (result)
return -EINVAL; return -EINVAL;
else
active_cus = sys_info.value; active_cus = sys_info.value;
if (enable) if (enable)
return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2015 Advanced Micro Devices, Inc. * Copyright 2016 Advanced Micro Devices, Inc.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@ -21,15 +21,20 @@
* *
*/ */
#ifndef _FIJI_CLOCK_POWER_GATING_H_ #ifndef _SMU7_CLOCK_POWER_GATING_H_
#define _FIJI_CLOCK_POWER_GATING_H_ #define _SMU7_CLOCK__POWER_GATING_H_
#include "fiji_hwmgr.h" #include "smu7_hwmgr.h"
#include "pp_asicblocks.h" #include "pp_asicblocks.h"
extern int fiji_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
extern int fiji_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
extern int fiji_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate); int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr);
extern int fiji_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate); int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate);
extern int fiji_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr); int smu7_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
#endif /* _TONGA_CLOCK_POWER_GATING_H_ */ int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
const uint32_t *msg_id);
int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable);
#endif

View File

@ -0,0 +1,55 @@
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef _SMU7_DYN_DEFAULTS_H
#define _SMU7_DYN_DEFAULTS_H
/* We need to fill in the default values */
#define SMU7_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102
#define SMU7_VOTINGRIGHTSCLIENTS_DFLT1 0x000400
#define SMU7_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080
#define SMU7_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200
#define SMU7_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680
#define SMU7_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033
#define SMU7_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033
#define SMU7_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000
#define SMU7_THERMALPROTECTCOUNTER_DFLT 0x200
#define SMU7_STATICSCREENTHRESHOLDUNIT_DFLT 0
#define SMU7_STATICSCREENTHRESHOLD_DFLT 0x00C8
#define SMU7_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200
#define SMU7_REFERENCEDIVIDER_DFLT 4
#define SMU7_ULVVOLTAGECHANGEDELAY_DFLT 1687
#define SMU7_CGULVPARAMETER_DFLT 0x00040035
#define SMU7_CGULVCONTROL_DFLT 0x00007450
#define SMU7_TARGETACTIVITY_DFLT 50
#define SMU7_MCLK_TARGETACTIVITY_DFLT 10
#endif

File diff suppressed because it is too large Load Diff

View File

@ -21,82 +21,100 @@
* *
*/ */
#ifndef POLARIS10_HWMGR_H #ifndef _SMU7_HWMGR_H
#define POLARIS10_HWMGR_H #define _SMU7_HWMGR_H
#include "hwmgr.h" #include "hwmgr.h"
#include "smu74.h"
#include "smu74_discrete.h"
#include "ppatomctrl.h" #include "ppatomctrl.h"
#include "polaris10_ppsmc.h"
#include "polaris10_powertune.h"
#include "polaris10_smumgr.h"
#define POLARIS10_MAX_HARDWARE_POWERLEVELS 2 #define SMU7_MAX_HARDWARE_POWERLEVELS 2
#define POLARIS10_VOLTAGE_CONTROL_NONE 0x0 #define SMU7_VOLTAGE_CONTROL_NONE 0x0
#define POLARIS10_VOLTAGE_CONTROL_BY_GPIO 0x1 #define SMU7_VOLTAGE_CONTROL_BY_GPIO 0x1
#define POLARIS10_VOLTAGE_CONTROL_BY_SVID2 0x2 #define SMU7_VOLTAGE_CONTROL_BY_SVID2 0x2
#define POLARIS10_VOLTAGE_CONTROL_MERGED 0x3 #define SMU7_VOLTAGE_CONTROL_MERGED 0x3
#define DPMTABLE_OD_UPDATE_SCLK 0x00000001 #define DPMTABLE_OD_UPDATE_SCLK 0x00000001
#define DPMTABLE_OD_UPDATE_MCLK 0x00000002 #define DPMTABLE_OD_UPDATE_MCLK 0x00000002
#define DPMTABLE_UPDATE_SCLK 0x00000004 #define DPMTABLE_UPDATE_SCLK 0x00000004
#define DPMTABLE_UPDATE_MCLK 0x00000008 #define DPMTABLE_UPDATE_MCLK 0x00000008
struct polaris10_performance_level { enum gpu_pt_config_reg_type {
GPU_CONFIGREG_MMR = 0,
GPU_CONFIGREG_SMC_IND,
GPU_CONFIGREG_DIDT_IND,
GPU_CONFIGREG_GC_CAC_IND,
GPU_CONFIGREG_CACHE,
GPU_CONFIGREG_MAX
};
struct gpu_pt_config_reg {
uint32_t offset;
uint32_t mask;
uint32_t shift;
uint32_t value;
enum gpu_pt_config_reg_type type;
};
struct smu7_performance_level {
uint32_t memory_clock; uint32_t memory_clock;
uint32_t engine_clock; uint32_t engine_clock;
uint16_t pcie_gen; uint16_t pcie_gen;
uint16_t pcie_lane; uint16_t pcie_lane;
}; };
struct polaris10_uvd_clocks { struct smu7_thermal_temperature_setting {
long temperature_low;
long temperature_high;
long temperature_shutdown;
};
struct smu7_uvd_clocks {
uint32_t vclk; uint32_t vclk;
uint32_t dclk; uint32_t dclk;
}; };
struct polaris10_vce_clocks { struct smu7_vce_clocks {
uint32_t evclk; uint32_t evclk;
uint32_t ecclk; uint32_t ecclk;
}; };
struct polaris10_power_state { struct smu7_power_state {
uint32_t magic; uint32_t magic;
struct polaris10_uvd_clocks uvd_clks; struct smu7_uvd_clocks uvd_clks;
struct polaris10_vce_clocks vce_clks; struct smu7_vce_clocks vce_clks;
uint32_t sam_clk; uint32_t sam_clk;
uint16_t performance_level_count; uint16_t performance_level_count;
bool dc_compatible; bool dc_compatible;
uint32_t sclk_threshold; uint32_t sclk_threshold;
struct polaris10_performance_level performance_levels[POLARIS10_MAX_HARDWARE_POWERLEVELS]; struct smu7_performance_level performance_levels[SMU7_MAX_HARDWARE_POWERLEVELS];
}; };
struct polaris10_dpm_level { struct smu7_dpm_level {
bool enabled; bool enabled;
uint32_t value; uint32_t value;
uint32_t param1; uint32_t param1;
}; };
#define POLARIS10_MAX_DEEPSLEEP_DIVIDER_ID 5 #define SMU7_MAX_DEEPSLEEP_DIVIDER_ID 5
#define MAX_REGULAR_DPM_NUMBER 8 #define MAX_REGULAR_DPM_NUMBER 8
#define POLARIS10_MINIMUM_ENGINE_CLOCK 2500 #define SMU7_MINIMUM_ENGINE_CLOCK 2500
struct polaris10_single_dpm_table { struct smu7_single_dpm_table {
uint32_t count; uint32_t count;
struct polaris10_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER]; struct smu7_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
}; };
struct polaris10_dpm_table { struct smu7_dpm_table {
struct polaris10_single_dpm_table sclk_table; struct smu7_single_dpm_table sclk_table;
struct polaris10_single_dpm_table mclk_table; struct smu7_single_dpm_table mclk_table;
struct polaris10_single_dpm_table pcie_speed_table; struct smu7_single_dpm_table pcie_speed_table;
struct polaris10_single_dpm_table vddc_table; struct smu7_single_dpm_table vddc_table;
struct polaris10_single_dpm_table vddci_table; struct smu7_single_dpm_table vddci_table;
struct polaris10_single_dpm_table mvdd_table; struct smu7_single_dpm_table mvdd_table;
}; };
struct polaris10_clock_registers { struct smu7_clock_registers {
uint32_t vCG_SPLL_FUNC_CNTL; uint32_t vCG_SPLL_FUNC_CNTL;
uint32_t vCG_SPLL_FUNC_CNTL_2; uint32_t vCG_SPLL_FUNC_CNTL_2;
uint32_t vCG_SPLL_FUNC_CNTL_3; uint32_t vCG_SPLL_FUNC_CNTL_3;
@ -117,42 +135,35 @@ struct polaris10_clock_registers {
#define DISABLE_MC_LOADMICROCODE 1 #define DISABLE_MC_LOADMICROCODE 1
#define DISABLE_MC_CFGPROGRAMMING 2 #define DISABLE_MC_CFGPROGRAMMING 2
struct polaris10_voltage_smio_registers { struct smu7_voltage_smio_registers {
uint32_t vS0_VID_LOWER_SMIO_CNTL; uint32_t vS0_VID_LOWER_SMIO_CNTL;
}; };
#define POLARIS10_MAX_LEAKAGE_COUNT 8 #define SMU7_MAX_LEAKAGE_COUNT 8
struct polaris10_leakage_voltage { struct smu7_leakage_voltage {
uint16_t count; uint16_t count;
uint16_t leakage_id[POLARIS10_MAX_LEAKAGE_COUNT]; uint16_t leakage_id[SMU7_MAX_LEAKAGE_COUNT];
uint16_t actual_voltage[POLARIS10_MAX_LEAKAGE_COUNT]; uint16_t actual_voltage[SMU7_MAX_LEAKAGE_COUNT];
}; };
struct polaris10_vbios_boot_state { struct smu7_vbios_boot_state {
uint16_t mvdd_bootup_value; uint16_t mvdd_bootup_value;
uint16_t vddc_bootup_value; uint16_t vddc_bootup_value;
uint16_t vddci_bootup_value; uint16_t vddci_bootup_value;
uint16_t vddgfx_bootup_value;
uint32_t sclk_bootup_value; uint32_t sclk_bootup_value;
uint32_t mclk_bootup_value; uint32_t mclk_bootup_value;
uint16_t pcie_gen_bootup_value; uint16_t pcie_gen_bootup_value;
uint16_t pcie_lane_bootup_value; uint16_t pcie_lane_bootup_value;
}; };
/* Ultra Low Voltage parameter structure */ struct smu7_display_timing {
struct polaris10_ulv_parm {
bool ulv_supported;
uint32_t cg_ulv_parameter;
uint32_t ulv_volt_change_delay;
struct polaris10_performance_level ulv_power_level;
};
struct polaris10_display_timing {
uint32_t min_clock_in_sr; uint32_t min_clock_in_sr;
uint32_t num_existing_displays; uint32_t num_existing_displays;
}; };
struct polaris10_dpmlevel_enable_mask { struct smu7_dpmlevel_enable_mask {
uint32_t uvd_dpm_enable_mask; uint32_t uvd_dpm_enable_mask;
uint32_t vce_dpm_enable_mask; uint32_t vce_dpm_enable_mask;
uint32_t acp_dpm_enable_mask; uint32_t acp_dpm_enable_mask;
@ -162,18 +173,15 @@ struct polaris10_dpmlevel_enable_mask {
uint32_t pcie_dpm_enable_mask; uint32_t pcie_dpm_enable_mask;
}; };
struct polaris10_pcie_perf_range { struct smu7_pcie_perf_range {
uint16_t max; uint16_t max;
uint16_t min; uint16_t min;
}; };
struct polaris10_hwmgr { struct smu7_hwmgr {
struct polaris10_dpm_table dpm_table; struct smu7_dpm_table dpm_table;
struct polaris10_dpm_table golden_dpm_table; struct smu7_dpm_table golden_dpm_table;
SMU74_Discrete_DpmTable smc_state_table;
struct SMU74_Discrete_Ulv ulv_setting;
struct polaris10_range_table range_table[NUM_SCLK_RANGE];
uint32_t voting_rights_clients0; uint32_t voting_rights_clients0;
uint32_t voting_rights_clients1; uint32_t voting_rights_clients1;
uint32_t voting_rights_clients2; uint32_t voting_rights_clients2;
@ -185,12 +193,11 @@ struct polaris10_hwmgr {
uint32_t static_screen_threshold_unit; uint32_t static_screen_threshold_unit;
uint32_t static_screen_threshold; uint32_t static_screen_threshold;
uint32_t voltage_control; uint32_t voltage_control;
uint32_t vddc_vddci_delta; uint32_t vdd_gfx_control;
uint32_t vddc_vddgfx_delta;
uint32_t active_auto_throttle_sources; uint32_t active_auto_throttle_sources;
struct polaris10_clock_registers clock_registers; struct smu7_clock_registers clock_registers;
struct polaris10_voltage_smio_registers voltage_smio_registers;
bool is_memory_gddr5; bool is_memory_gddr5;
uint16_t acpi_vddc; uint16_t acpi_vddc;
@ -200,8 +207,9 @@ struct polaris10_hwmgr {
uint32_t pcie_gen_cap; uint32_t pcie_gen_cap;
uint32_t pcie_lane_cap; uint32_t pcie_lane_cap;
uint32_t pcie_spc_cap; uint32_t pcie_spc_cap;
struct polaris10_leakage_voltage vddc_leakage; struct smu7_leakage_voltage vddc_leakage;
struct polaris10_leakage_voltage Vddci_leakage; struct smu7_leakage_voltage vddci_leakage;
struct smu7_leakage_voltage vddcgfx_leakage;
uint32_t mvdd_control; uint32_t mvdd_control;
uint32_t vddc_mask_low; uint32_t vddc_mask_low;
@ -210,30 +218,23 @@ struct polaris10_hwmgr {
uint16_t min_vddc_in_pptable; uint16_t min_vddc_in_pptable;
uint16_t max_vddci_in_pptable; uint16_t max_vddci_in_pptable;
uint16_t min_vddci_in_pptable; uint16_t min_vddci_in_pptable;
uint32_t mclk_strobe_mode_threshold;
uint32_t mclk_stutter_mode_threshold;
uint32_t mclk_edc_enable_threshold;
uint32_t mclk_edcwr_enable_threshold;
bool is_uvd_enabled; bool is_uvd_enabled;
struct polaris10_vbios_boot_state vbios_boot_state; struct smu7_vbios_boot_state vbios_boot_state;
bool pcie_performance_request; bool pcie_performance_request;
bool battery_state; bool battery_state;
bool is_tlu_enabled; bool is_tlu_enabled;
bool disable_handshake;
bool smc_voltage_control_enabled;
bool vbi_time_out_support;
/* ---- SMC SRAM Address of firmware header tables ---- */ uint32_t soft_regs_start;
uint32_t sram_end;
uint32_t dpm_table_start;
uint32_t soft_regs_start;
uint32_t mc_reg_table_start;
uint32_t fan_table_start;
uint32_t arb_table_start;
/* ---- Stuff originally coming from Evergreen ---- */ /* ---- Stuff originally coming from Evergreen ---- */
uint32_t vddci_control; uint32_t vddci_control;
struct pp_atomctrl_voltage_table vddc_voltage_table; struct pp_atomctrl_voltage_table vddc_voltage_table;
struct pp_atomctrl_voltage_table vddci_voltage_table; struct pp_atomctrl_voltage_table vddci_voltage_table;
struct pp_atomctrl_voltage_table mvdd_voltage_table; struct pp_atomctrl_voltage_table mvdd_voltage_table;
struct pp_atomctrl_voltage_table vddgfx_voltage_table;
uint32_t mgcg_cgtt_local2; uint32_t mgcg_cgtt_local2;
uint32_t mgcg_cgtt_local3; uint32_t mgcg_cgtt_local3;
@ -247,7 +248,7 @@ struct polaris10_hwmgr {
bool performance_request_registered; bool performance_request_registered;
/* ---- Low Power Features ---- */ /* ---- Low Power Features ---- */
struct polaris10_ulv_parm ulv; bool ulv_supported;
/* ---- CAC Stuff ---- */ /* ---- CAC Stuff ---- */
uint32_t cac_table_start; uint32_t cac_table_start;
@ -261,8 +262,8 @@ struct polaris10_hwmgr {
bool enable_tdc_limit_feature; bool enable_tdc_limit_feature;
bool enable_pkg_pwr_tracking_feature; bool enable_pkg_pwr_tracking_feature;
bool disable_uvd_power_tune_feature; bool disable_uvd_power_tune_feature;
const struct polaris10_pt_defaults *power_tune_defaults;
struct SMU74_Discrete_PmFuses power_tune_table;
uint32_t dte_tj_offset; uint32_t dte_tj_offset;
uint32_t fast_watermark_threshold; uint32_t fast_watermark_threshold;
@ -270,23 +271,22 @@ struct polaris10_hwmgr {
bool vddc_phase_shed_control; bool vddc_phase_shed_control;
/* ---- DI/DT ---- */ /* ---- DI/DT ---- */
struct polaris10_display_timing display_timing; struct smu7_display_timing display_timing;
uint32_t bif_sclk_table[SMU74_MAX_LEVELS_LINK];
/* ---- Thermal Temperature Setting ---- */ /* ---- Thermal Temperature Setting ---- */
struct polaris10_dpmlevel_enable_mask dpm_level_enable_mask; struct smu7_thermal_temperature_setting thermal_temp_setting;
struct smu7_dpmlevel_enable_mask dpm_level_enable_mask;
uint32_t need_update_smu7_dpm_table; uint32_t need_update_smu7_dpm_table;
uint32_t sclk_dpm_key_disabled; uint32_t sclk_dpm_key_disabled;
uint32_t mclk_dpm_key_disabled; uint32_t mclk_dpm_key_disabled;
uint32_t pcie_dpm_key_disabled; uint32_t pcie_dpm_key_disabled;
uint32_t min_engine_clocks; uint32_t min_engine_clocks;
struct polaris10_pcie_perf_range pcie_gen_performance; struct smu7_pcie_perf_range pcie_gen_performance;
struct polaris10_pcie_perf_range pcie_lane_performance; struct smu7_pcie_perf_range pcie_lane_performance;
struct polaris10_pcie_perf_range pcie_gen_power_saving; struct smu7_pcie_perf_range pcie_gen_power_saving;
struct polaris10_pcie_perf_range pcie_lane_power_saving; struct smu7_pcie_perf_range pcie_lane_power_saving;
bool use_pcie_performance_levels; bool use_pcie_performance_levels;
bool use_pcie_power_saving_levels; bool use_pcie_power_saving_levels;
uint32_t activity_target[SMU74_MAX_LEVELS_GRAPHICS];
uint32_t mclk_activity_target; uint32_t mclk_activity_target;
uint32_t mclk_dpm0_activity_target; uint32_t mclk_dpm0_activity_target;
uint32_t low_sclk_interrupt_threshold; uint32_t low_sclk_interrupt_threshold;
@ -306,49 +306,48 @@ struct polaris10_hwmgr {
uint32_t up_hyst; uint32_t up_hyst;
uint32_t disable_dpm_mask; uint32_t disable_dpm_mask;
bool apply_optimized_settings; bool apply_optimized_settings;
uint32_t avfs_vdroop_override_setting; uint32_t avfs_vdroop_override_setting;
bool apply_avfs_cks_off_voltage; bool apply_avfs_cks_off_voltage;
uint32_t frame_time_x2; uint32_t frame_time_x2;
uint16_t mem_latency_high;
uint16_t mem_latency_low;
}; };
/* To convert to Q8.8 format for firmware */ /* To convert to Q8.8 format for firmware */
#define POLARIS10_Q88_FORMAT_CONVERSION_UNIT 256 #define SMU7_Q88_FORMAT_CONVERSION_UNIT 256
enum Polaris10_I2CLineID { enum SMU7_I2CLineID {
Polaris10_I2CLineID_DDC1 = 0x90, SMU7_I2CLineID_DDC1 = 0x90,
Polaris10_I2CLineID_DDC2 = 0x91, SMU7_I2CLineID_DDC2 = 0x91,
Polaris10_I2CLineID_DDC3 = 0x92, SMU7_I2CLineID_DDC3 = 0x92,
Polaris10_I2CLineID_DDC4 = 0x93, SMU7_I2CLineID_DDC4 = 0x93,
Polaris10_I2CLineID_DDC5 = 0x94, SMU7_I2CLineID_DDC5 = 0x94,
Polaris10_I2CLineID_DDC6 = 0x95, SMU7_I2CLineID_DDC6 = 0x95,
Polaris10_I2CLineID_SCLSDA = 0x96, SMU7_I2CLineID_SCLSDA = 0x96,
Polaris10_I2CLineID_DDCVGA = 0x97 SMU7_I2CLineID_DDCVGA = 0x97
}; };
#define POLARIS10_I2C_DDC1DATA 0 #define SMU7_I2C_DDC1DATA 0
#define POLARIS10_I2C_DDC1CLK 1 #define SMU7_I2C_DDC1CLK 1
#define POLARIS10_I2C_DDC2DATA 2 #define SMU7_I2C_DDC2DATA 2
#define POLARIS10_I2C_DDC2CLK 3 #define SMU7_I2C_DDC2CLK 3
#define POLARIS10_I2C_DDC3DATA 4 #define SMU7_I2C_DDC3DATA 4
#define POLARIS10_I2C_DDC3CLK 5 #define SMU7_I2C_DDC3CLK 5
#define POLARIS10_I2C_SDA 40 #define SMU7_I2C_SDA 40
#define POLARIS10_I2C_SCL 41 #define SMU7_I2C_SCL 41
#define POLARIS10_I2C_DDC4DATA 65 #define SMU7_I2C_DDC4DATA 65
#define POLARIS10_I2C_DDC4CLK 66 #define SMU7_I2C_DDC4CLK 66
#define POLARIS10_I2C_DDC5DATA 0x48 #define SMU7_I2C_DDC5DATA 0x48
#define POLARIS10_I2C_DDC5CLK 0x49 #define SMU7_I2C_DDC5CLK 0x49
#define POLARIS10_I2C_DDC6DATA 0x4a #define SMU7_I2C_DDC6DATA 0x4a
#define POLARIS10_I2C_DDC6CLK 0x4b #define SMU7_I2C_DDC6CLK 0x4b
#define POLARIS10_I2C_DDCVGADATA 0x4c #define SMU7_I2C_DDCVGADATA 0x4c
#define POLARIS10_I2C_DDCVGACLK 0x4d #define SMU7_I2C_DDCVGACLK 0x4d
#define POLARIS10_UNUSED_GPIO_PIN 0x7F #define SMU7_UNUSED_GPIO_PIN 0x7F
uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr);
int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr); uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
uint32_t clock_insr);
int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate);
int polaris10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate);
#endif #endif

View File

@ -20,17 +20,8 @@
* OTHER DEALINGS IN THE SOFTWARE. * OTHER DEALINGS IN THE SOFTWARE.
* *
*/ */
#ifndef POLARIS10_POWERTUNE_H #ifndef _SMU7_POWERTUNE_H
#define POLARIS10_POWERTUNE_H #define _SMU7_POWERTUNE_H
enum polaris10_pt_config_reg_type {
POLARIS10_CONFIGREG_MMR = 0,
POLARIS10_CONFIGREG_SMC_IND,
POLARIS10_CONFIGREG_DIDT_IND,
POLARIS10_CONFIGREG_GC_CAC_IND,
POLARIS10_CONFIGREG_CACHE,
POLARIS10_CONFIGREG_MAX
};
#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xfffc0000 #define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xfffc0000
#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x12 #define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x12
@ -52,30 +43,20 @@ enum polaris10_pt_config_reg_type {
#define ixGC_CAC_CNTL 0x0000 #define ixGC_CAC_CNTL 0x0000
#define ixDIDT_SQ_STALL_CTRL 0x0004 #define ixDIDT_SQ_STALL_CTRL 0x0004
#define ixDIDT_SQ_TUNING_CTRL 0x0005 #define ixDIDT_SQ_TUNING_CTRL 0x0005
#define ixDIDT_TD_STALL_CTRL 0x0044 #define ixDIDT_TD_STALL_CTRL 0x0044
#define ixDIDT_TD_TUNING_CTRL 0x0045 #define ixDIDT_TD_TUNING_CTRL 0x0045
#define ixDIDT_TCP_STALL_CTRL 0x0064 #define ixDIDT_TCP_STALL_CTRL 0x0064
#define ixDIDT_TCP_TUNING_CTRL 0x0065 #define ixDIDT_TCP_TUNING_CTRL 0x0065
struct polaris10_pt_config_reg {
uint32_t offset;
uint32_t mask;
uint32_t shift;
uint32_t value;
enum polaris10_pt_config_reg_type type;
};
int smu7_enable_smc_cac(struct pp_hwmgr *hwmgr);
void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr); int smu7_disable_smc_cac(struct pp_hwmgr *hwmgr);
int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr); int smu7_enable_power_containment(struct pp_hwmgr *hwmgr);
int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr); int smu7_disable_power_containment(struct pp_hwmgr *hwmgr);
int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr); int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr); int smu7_power_control_set_level(struct pp_hwmgr *hwmgr);
int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr); int smu7_enable_didt_config(struct pp_hwmgr *hwmgr);
int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr); int smu7_disable_didt_config(struct pp_hwmgr *hwmgr);
int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n); #endif /* DGPU_POWERTUNE_H */
int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr);
int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr);
#endif /* POLARIS10_POWERTUNE_H */

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2015 Advanced Micro Devices, Inc. * Copyright 2016 Advanced Micro Devices, Inc.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@ -20,18 +20,15 @@
* OTHER DEALINGS IN THE SOFTWARE. * OTHER DEALINGS IN THE SOFTWARE.
* *
*/ */
#include <asm/div64.h>
#include "fiji_thermal.h"
#include "fiji_hwmgr.h"
#include "fiji_smumgr.h"
#include "fiji_ppsmc.h"
#include "smu/smu_7_1_3_d.h"
#include "smu/smu_7_1_3_sh_mask.h"
int fiji_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, #include <asm/div64.h>
#include "smu7_thermal.h"
#include "smu7_hwmgr.h"
#include "smu7_common.h"
int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
struct phm_fan_speed_info *fan_speed_info) struct phm_fan_speed_info *fan_speed_info)
{ {
if (hwmgr->thermal_controller.fanInfo.bNoFan) if (hwmgr->thermal_controller.fanInfo.bNoFan)
return 0; return 0;
@ -55,7 +52,7 @@ int fiji_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
return 0; return 0;
} }
int fiji_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
uint32_t *speed) uint32_t *speed)
{ {
uint32_t duty100; uint32_t duty100;
@ -84,7 +81,7 @@ int fiji_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
return 0; return 0;
} }
int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
{ {
uint32_t tach_period; uint32_t tach_period;
uint32_t crystal_clock_freq; uint32_t crystal_clock_freq;
@ -100,9 +97,9 @@ int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
if (tach_period == 0) if (tach_period == 0)
return -EINVAL; return -EINVAL;
crystal_clock_freq = tonga_get_xclk(hwmgr); crystal_clock_freq = smu7_get_xclk(hwmgr);
*speed = 60 * crystal_clock_freq * 10000/ tach_period; *speed = 60 * crystal_clock_freq * 10000 / tach_period;
return 0; return 0;
} }
@ -113,7 +110,7 @@ int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
* mode the fan control mode, 0 default, 1 by percent, 5, by RPM * mode the fan control mode, 0 default, 1 by percent, 5, by RPM
* @exception Should always succeed. * @exception Should always succeed.
*/ */
int fiji_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode) int smu7_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
{ {
if (hwmgr->fan_ctrl_is_in_default_mode) { if (hwmgr->fan_ctrl_is_in_default_mode) {
@ -139,7 +136,7 @@ int fiji_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
* @param hwmgr the address of the powerplay hardware manager. * @param hwmgr the address of the powerplay hardware manager.
* @exception Should always succeed. * @exception Should always succeed.
*/ */
int fiji_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr) int smu7_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
{ {
if (!hwmgr->fan_ctrl_is_in_default_mode) { if (!hwmgr->fan_ctrl_is_in_default_mode) {
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
@ -152,7 +149,7 @@ int fiji_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
return 0; return 0;
} }
static int fiji_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr) static int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
{ {
int result; int result;
@ -187,7 +184,7 @@ static int fiji_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
} }
int fiji_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr) int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
{ {
return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl); return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl);
} }
@ -198,7 +195,7 @@ int fiji_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
* @param speed is the percentage value (0% - 100%) to be set. * @param speed is the percentage value (0% - 100%) to be set.
* @exception Fails is the 100% setting appears to be 0. * @exception Fails is the 100% setting appears to be 0.
*/ */
int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
uint32_t speed) uint32_t speed)
{ {
uint32_t duty100; uint32_t duty100;
@ -213,7 +210,7 @@ int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl)) PHM_PlatformCaps_MicrocodeFanControl))
fiji_fan_ctrl_stop_smc_fan_control(hwmgr); smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL1, FMAX_DUTY100); CG_FDO_CTRL1, FMAX_DUTY100);
@ -228,7 +225,7 @@ int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL0, FDO_STATIC_DUTY, duty); CG_FDO_CTRL0, FDO_STATIC_DUTY, duty);
return fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); return smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
} }
/** /**
@ -236,7 +233,7 @@ int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
* @param hwmgr the address of the powerplay hardware manager. * @param hwmgr the address of the powerplay hardware manager.
* @exception Always succeeds. * @exception Always succeeds.
*/ */
int fiji_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr) int smu7_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
{ {
int result; int result;
@ -245,11 +242,11 @@ int fiji_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl)) { PHM_PlatformCaps_MicrocodeFanControl)) {
result = fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); result = smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
if (!result) if (!result)
result = fiji_fan_ctrl_start_smc_fan_control(hwmgr); result = smu7_fan_ctrl_start_smc_fan_control(hwmgr);
} else } else
result = fiji_fan_ctrl_set_default_mode(hwmgr); result = smu7_fan_ctrl_set_default_mode(hwmgr);
return result; return result;
} }
@ -260,7 +257,7 @@ int fiji_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
* @param speed is the percentage value (min - max) to be set. * @param speed is the percentage value (min - max) to be set.
* @exception Fails is the speed not lie between min and max. * @exception Fails is the speed not lie between min and max.
*/ */
int fiji_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
{ {
uint32_t tach_period; uint32_t tach_period;
uint32_t crystal_clock_freq; uint32_t crystal_clock_freq;
@ -272,14 +269,18 @@ int fiji_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
(speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
return 0; return 0;
crystal_clock_freq = tonga_get_xclk(hwmgr); if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl))
smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
crystal_clock_freq = smu7_get_xclk(hwmgr);
tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_TACH_STATUS, TACH_PERIOD, tach_period); CG_TACH_STATUS, TACH_PERIOD, tach_period);
return fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); return smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
} }
/** /**
@ -287,7 +288,7 @@ int fiji_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
* *
* @param hwmgr The address of the hardware manager. * @param hwmgr The address of the hardware manager.
*/ */
int fiji_thermal_get_temperature(struct pp_hwmgr *hwmgr) int smu7_thermal_get_temperature(struct pp_hwmgr *hwmgr)
{ {
int temp; int temp;
@ -296,7 +297,7 @@ int fiji_thermal_get_temperature(struct pp_hwmgr *hwmgr)
/* Bit 9 means the reading is lower than the lowest usable value. */ /* Bit 9 means the reading is lower than the lowest usable value. */
if (temp & 0x200) if (temp & 0x200)
temp = FIJI_THERMAL_MAXIMUM_TEMP_READING; temp = SMU7_THERMAL_MAXIMUM_TEMP_READING;
else else
temp = temp & 0x1ff; temp = temp & 0x1ff;
@ -312,12 +313,12 @@ int fiji_thermal_get_temperature(struct pp_hwmgr *hwmgr)
* @param range Temperature range to be programmed for high and low alert signals * @param range Temperature range to be programmed for high and low alert signals
* @exception PP_Result_BadInput if the input data is not valid. * @exception PP_Result_BadInput if the input data is not valid.
*/ */
static int fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, static int smu7_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
uint32_t low_temp, uint32_t high_temp) uint32_t low_temp, uint32_t high_temp)
{ {
uint32_t low = FIJI_THERMAL_MINIMUM_ALERT_TEMP * uint32_t low = SMU7_THERMAL_MINIMUM_ALERT_TEMP *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES; PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
uint32_t high = FIJI_THERMAL_MAXIMUM_ALERT_TEMP * uint32_t high = SMU7_THERMAL_MAXIMUM_ALERT_TEMP *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES; PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
if (low < low_temp) if (low < low_temp)
@ -346,7 +347,7 @@ static int fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
* *
* @param hwmgr The address of the hardware manager. * @param hwmgr The address of the hardware manager.
*/ */
static int fiji_thermal_initialize(struct pp_hwmgr *hwmgr) static int smu7_thermal_initialize(struct pp_hwmgr *hwmgr)
{ {
if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
@ -365,13 +366,13 @@ static int fiji_thermal_initialize(struct pp_hwmgr *hwmgr)
* *
* @param hwmgr The address of the hardware manager. * @param hwmgr The address of the hardware manager.
*/ */
static int fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr) int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr)
{ {
uint32_t alert; uint32_t alert;
alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_THERMAL_INT, THERM_INT_MASK); CG_THERMAL_INT, THERM_INT_MASK);
alert &= ~(FIJI_THERMAL_HIGH_ALERT_MASK | FIJI_THERMAL_LOW_ALERT_MASK); alert &= ~(SMU7_THERMAL_HIGH_ALERT_MASK | SMU7_THERMAL_LOW_ALERT_MASK);
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_THERMAL_INT, THERM_INT_MASK, alert); CG_THERMAL_INT, THERM_INT_MASK, alert);
@ -383,13 +384,13 @@ static int fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr)
* Disable thermal alerts on the RV770 thermal controller. * Disable thermal alerts on the RV770 thermal controller.
* @param hwmgr The address of the hardware manager. * @param hwmgr The address of the hardware manager.
*/ */
static int fiji_thermal_disable_alert(struct pp_hwmgr *hwmgr) int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr)
{ {
uint32_t alert; uint32_t alert;
alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_THERMAL_INT, THERM_INT_MASK); CG_THERMAL_INT, THERM_INT_MASK);
alert |= (FIJI_THERMAL_HIGH_ALERT_MASK | FIJI_THERMAL_LOW_ALERT_MASK); alert |= (SMU7_THERMAL_HIGH_ALERT_MASK | SMU7_THERMAL_LOW_ALERT_MASK);
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_THERMAL_INT, THERM_INT_MASK, alert); CG_THERMAL_INT, THERM_INT_MASK, alert);
@ -402,128 +403,16 @@ static int fiji_thermal_disable_alert(struct pp_hwmgr *hwmgr)
* Currently just disables alerts. * Currently just disables alerts.
* @param hwmgr The address of the hardware manager. * @param hwmgr The address of the hardware manager.
*/ */
int fiji_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr) int smu7_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
{ {
int result = fiji_thermal_disable_alert(hwmgr); int result = smu7_thermal_disable_alert(hwmgr);
if (hwmgr->thermal_controller.fanInfo.bNoFan) if (!hwmgr->thermal_controller.fanInfo.bNoFan)
fiji_fan_ctrl_set_default_mode(hwmgr); smu7_fan_ctrl_set_default_mode(hwmgr);
return result; return result;
} }
/**
* Set up the fan table to control the fan using the SMC.
* @param hwmgr the address of the powerplay hardware manager.
* @param pInput the pointer to input data
* @param pOutput the pointer to output data
* @param pStorage the pointer to temporary storage
* @param Result the last failure code
* @return result from set temperature range routine
*/
static int tf_fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
uint32_t duty100;
uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
uint16_t fdo_min, slope1, slope2;
uint32_t reference_clock;
int res;
uint64_t tmp64;
if (data->fan_table_start == 0) {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl);
return 0;
}
duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL1, FMAX_DUTY100);
if (duty100 == 0) {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl);
return 0;
}
tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
usPWMMin * duty100;
do_div(tmp64, 10000);
fdo_min = (uint16_t)tmp64;
t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
fan_table.TempMin = cpu_to_be16((50 + hwmgr->
thermal_controller.advanceFanControlParameters.usTMin) / 100);
fan_table.TempMed = cpu_to_be16((50 + hwmgr->
thermal_controller.advanceFanControlParameters.usTMed) / 100);
fan_table.TempMax = cpu_to_be16((50 + hwmgr->
thermal_controller.advanceFanControlParameters.usTMax) / 100);
fan_table.Slope1 = cpu_to_be16(slope1);
fan_table.Slope2 = cpu_to_be16(slope2);
fan_table.FdoMin = cpu_to_be16(fdo_min);
fan_table.HystDown = cpu_to_be16(hwmgr->
thermal_controller.advanceFanControlParameters.ucTHyst);
fan_table.HystUp = cpu_to_be16(1);
fan_table.HystSlope = cpu_to_be16(1);
fan_table.TempRespLim = cpu_to_be16(5);
reference_clock = tonga_get_xclk(hwmgr);
fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
thermal_controller.advanceFanControlParameters.ulCycleDelay *
reference_clock) / 1600);
fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
hwmgr->device, CGS_IND_REG__SMC,
CG_MULT_THERMAL_CTRL, TEMP_SEL);
res = fiji_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start,
(uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
data->sram_end);
if (!res && hwmgr->thermal_controller.
advanceFanControlParameters.ucMinimumPWMLimit)
res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SetFanMinPwm,
hwmgr->thermal_controller.
advanceFanControlParameters.ucMinimumPWMLimit);
if (!res && hwmgr->thermal_controller.
advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SetFanSclkTarget,
hwmgr->thermal_controller.
advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
if (res)
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl);
return 0;
}
/** /**
* Start the fan control on the SMC. * Start the fan control on the SMC.
* @param hwmgr the address of the powerplay hardware manager. * @param hwmgr the address of the powerplay hardware manager.
@ -533,7 +422,7 @@ static int tf_fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
* @param Result the last failure code * @param Result the last failure code
* @return result from set temperature range routine * @return result from set temperature range routine
*/ */
static int tf_fiji_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, static int tf_smu7_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result) void *input, void *output, void *storage, int result)
{ {
/* If the fantable setup has failed we could have disabled /* If the fantable setup has failed we could have disabled
@ -543,8 +432,8 @@ static int tf_fiji_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
*/ */
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl)) { PHM_PlatformCaps_MicrocodeFanControl)) {
fiji_fan_ctrl_start_smc_fan_control(hwmgr); smu7_fan_ctrl_start_smc_fan_control(hwmgr);
fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
} }
return 0; return 0;
@ -559,7 +448,7 @@ static int tf_fiji_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
* @param Result the last failure code * @param Result the last failure code
* @return result from set temperature range routine * @return result from set temperature range routine
*/ */
int tf_fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, static int tf_smu7_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result) void *input, void *output, void *storage, int result)
{ {
struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input; struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
@ -567,7 +456,7 @@ int tf_fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
if (range == NULL) if (range == NULL)
return -EINVAL; return -EINVAL;
return fiji_thermal_set_temperature_range(hwmgr, range->min, range->max); return smu7_thermal_set_temperature_range(hwmgr, range->min, range->max);
} }
/** /**
@ -579,10 +468,10 @@ int tf_fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
* @param Result the last failure code * @param Result the last failure code
* @return result from initialize thermal controller routine * @return result from initialize thermal controller routine
*/ */
int tf_fiji_thermal_initialize(struct pp_hwmgr *hwmgr, static int tf_smu7_thermal_initialize(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result) void *input, void *output, void *storage, int result)
{ {
return fiji_thermal_initialize(hwmgr); return smu7_thermal_initialize(hwmgr);
} }
/** /**
@ -594,10 +483,10 @@ int tf_fiji_thermal_initialize(struct pp_hwmgr *hwmgr,
* @param Result the last failure code * @param Result the last failure code
* @return result from enable alert routine * @return result from enable alert routine
*/ */
int tf_fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr, static int tf_smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result) void *input, void *output, void *storage, int result)
{ {
return fiji_thermal_enable_alert(hwmgr); return smu7_thermal_enable_alert(hwmgr);
} }
/** /**
@ -609,53 +498,54 @@ int tf_fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr,
* @param Result the last failure code * @param Result the last failure code
* @return result from disable alert routine * @return result from disable alert routine
*/ */
static int tf_fiji_thermal_disable_alert(struct pp_hwmgr *hwmgr, static int tf_smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result) void *input, void *output, void *storage, int result)
{ {
return fiji_thermal_disable_alert(hwmgr); return smu7_thermal_disable_alert(hwmgr);
} }
static const struct phm_master_table_item static const struct phm_master_table_item
fiji_thermal_start_thermal_controller_master_list[] = { phm_thermal_start_thermal_controller_master_list[] = {
{NULL, tf_fiji_thermal_initialize}, {NULL, tf_smu7_thermal_initialize},
{NULL, tf_fiji_thermal_set_temperature_range}, {NULL, tf_smu7_thermal_set_temperature_range},
{NULL, tf_fiji_thermal_enable_alert}, {NULL, tf_smu7_thermal_enable_alert},
{NULL, smum_thermal_avfs_enable},
/* We should restrict performance levels to low before we halt the SMC. /* We should restrict performance levels to low before we halt the SMC.
* On the other hand we are still in boot state when we do this * On the other hand we are still in boot state when we do this
* so it would be pointless. * so it would be pointless.
* If this assumption changes we have to revisit this table. * If this assumption changes we have to revisit this table.
*/ */
{NULL, tf_fiji_thermal_setup_fan_table}, {NULL, smum_thermal_setup_fan_table},
{NULL, tf_fiji_thermal_start_smc_fan_control}, {NULL, tf_smu7_thermal_start_smc_fan_control},
{NULL, NULL} {NULL, NULL}
}; };
static const struct phm_master_table_header static const struct phm_master_table_header
fiji_thermal_start_thermal_controller_master = { phm_thermal_start_thermal_controller_master = {
0, 0,
PHM_MasterTableFlag_None, PHM_MasterTableFlag_None,
fiji_thermal_start_thermal_controller_master_list phm_thermal_start_thermal_controller_master_list
}; };
static const struct phm_master_table_item static const struct phm_master_table_item
fiji_thermal_set_temperature_range_master_list[] = { phm_thermal_set_temperature_range_master_list[] = {
{NULL, tf_fiji_thermal_disable_alert}, {NULL, tf_smu7_thermal_disable_alert},
{NULL, tf_fiji_thermal_set_temperature_range}, {NULL, tf_smu7_thermal_set_temperature_range},
{NULL, tf_fiji_thermal_enable_alert}, {NULL, tf_smu7_thermal_enable_alert},
{NULL, NULL} {NULL, NULL}
}; };
static const struct phm_master_table_header static const struct phm_master_table_header
fiji_thermal_set_temperature_range_master = { phm_thermal_set_temperature_range_master = {
0, 0,
PHM_MasterTableFlag_None, PHM_MasterTableFlag_None,
fiji_thermal_set_temperature_range_master_list phm_thermal_set_temperature_range_master_list
}; };
int fiji_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr) int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
{ {
if (!hwmgr->thermal_controller.fanInfo.bNoFan) if (!hwmgr->thermal_controller.fanInfo.bNoFan)
fiji_fan_ctrl_set_default_mode(hwmgr); smu7_fan_ctrl_set_default_mode(hwmgr);
return 0; return 0;
} }
@ -664,17 +554,17 @@ int fiji_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
* @param hwmgr The address of the hardware manager. * @param hwmgr The address of the hardware manager.
* @exception Any error code from the low-level communication. * @exception Any error code from the low-level communication.
*/ */
int pp_fiji_thermal_initialize(struct pp_hwmgr *hwmgr) int pp_smu7_thermal_initialize(struct pp_hwmgr *hwmgr)
{ {
int result; int result;
result = phm_construct_table(hwmgr, result = phm_construct_table(hwmgr,
&fiji_thermal_set_temperature_range_master, &phm_thermal_set_temperature_range_master,
&(hwmgr->set_temperature_range)); &(hwmgr->set_temperature_range));
if (!result) { if (!result) {
result = phm_construct_table(hwmgr, result = phm_construct_table(hwmgr,
&fiji_thermal_start_thermal_controller_master, &phm_thermal_start_thermal_controller_master,
&(hwmgr->start_thermal_controller)); &(hwmgr->start_thermal_controller));
if (result) if (result)
phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range)); phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));

View File

@ -0,0 +1,58 @@
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef _SMU7_THERMAL_H_
#define _SMU7_THERMAL_H_
#include "hwmgr.h"
#define SMU7_THERMAL_HIGH_ALERT_MASK 0x1
#define SMU7_THERMAL_LOW_ALERT_MASK 0x2
#define SMU7_THERMAL_MINIMUM_TEMP_READING -256
#define SMU7_THERMAL_MAXIMUM_TEMP_READING 255
#define SMU7_THERMAL_MINIMUM_ALERT_TEMP 0
#define SMU7_THERMAL_MAXIMUM_ALERT_TEMP 255
#define FDO_PWM_MODE_STATIC 1
#define FDO_PWM_MODE_STATIC_RPM 5
extern int smu7_thermal_get_temperature(struct pp_hwmgr *hwmgr);
extern int smu7_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
extern int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
extern int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
extern int smu7_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
extern int smu7_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
extern int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
extern int smu7_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
extern int pp_smu7_thermal_initialize(struct pp_hwmgr *hwmgr);
extern int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
extern int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
extern int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
extern int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
extern int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr);
extern int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr);
#endif

View File

@ -1,350 +0,0 @@
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "hwmgr.h"
#include "tonga_clockpowergating.h"
#include "tonga_ppsmc.h"
#include "tonga_hwmgr.h"
int tonga_phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_uvd_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr->smumgr,
PPSMC_MSG_UVDPowerOFF);
return 0;
}
int tonga_phm_powerup_uvd(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_uvd_power_gating(hwmgr)) {
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_UVDDynamicPowerGating)) {
return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_UVDPowerON, 1);
} else {
return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_UVDPowerON, 0);
}
}
return 0;
}
int tonga_phm_powerdown_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_vce_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr->smumgr,
PPSMC_MSG_VCEPowerOFF);
return 0;
}
int tonga_phm_powerup_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_vce_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr->smumgr,
PPSMC_MSG_VCEPowerON);
return 0;
}
int tonga_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating)
{
int ret = 0;
switch (block) {
case PHM_AsicBlock_UVD_MVC:
case PHM_AsicBlock_UVD:
case PHM_AsicBlock_UVD_HD:
case PHM_AsicBlock_UVD_SD:
if (gating == PHM_ClockGateSetting_StaticOff)
ret = tonga_phm_powerdown_uvd(hwmgr);
else
ret = tonga_phm_powerup_uvd(hwmgr);
break;
case PHM_AsicBlock_GFX:
default:
break;
}
return ret;
}
int tonga_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
{
struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
data->uvd_power_gated = false;
data->vce_power_gated = false;
tonga_phm_powerup_uvd(hwmgr);
tonga_phm_powerup_vce(hwmgr);
return 0;
}
int tonga_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
{
struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
if (data->uvd_power_gated == bgate)
return 0;
data->uvd_power_gated = bgate;
if (bgate) {
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_UNGATE);
cgs_set_powergating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_GATE);
tonga_update_uvd_dpm(hwmgr, true);
tonga_phm_powerdown_uvd(hwmgr);
} else {
tonga_phm_powerup_uvd(hwmgr);
cgs_set_powergating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_UNGATE);
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_GATE);
tonga_update_uvd_dpm(hwmgr, false);
}
return 0;
}
int tonga_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
{
struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
struct phm_set_power_state_input states;
const struct pp_power_state *pcurrent;
struct pp_power_state *requested;
pcurrent = hwmgr->current_ps;
requested = hwmgr->request_ps;
states.pcurrent_state = &(pcurrent->hardware);
states.pnew_state = &(requested->hardware);
if (phm_cf_want_vce_power_gating(hwmgr)) {
if (data->vce_power_gated != bgate) {
if (bgate) {
cgs_set_clockgating_state(
hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_UNGATE);
cgs_set_powergating_state(
hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_GATE);
tonga_enable_disable_vce_dpm(hwmgr, false);
data->vce_power_gated = true;
} else {
tonga_phm_powerup_vce(hwmgr);
data->vce_power_gated = false;
cgs_set_powergating_state(
hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_UNGATE);
cgs_set_clockgating_state(
hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_GATE);
tonga_update_vce_dpm(hwmgr, &states);
tonga_enable_disable_vce_dpm(hwmgr, true);
return 0;
}
}
} else {
tonga_update_vce_dpm(hwmgr, &states);
tonga_enable_disable_vce_dpm(hwmgr, true);
return 0;
}
if (!data->vce_power_gated)
tonga_update_vce_dpm(hwmgr, &states);
return 0;
}
int tonga_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
const uint32_t *msg_id)
{
PPSMC_Msg msg;
uint32_t value;
switch ((*msg_id & PP_GROUP_MASK) >> PP_GROUP_SHIFT) {
case PP_GROUP_GFX:
switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
case PP_BLOCK_GFX_CG:
if (PP_STATE_SUPPORT_CG & *msg_id) {
msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
? PPSMC_MSG_EnableClockGatingFeature
: PPSMC_MSG_DisableClockGatingFeature;
value = CG_GFX_CGCG_MASK;
if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
return -1;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
? PPSMC_MSG_EnableClockGatingFeature
: PPSMC_MSG_DisableClockGatingFeature;
value = CG_GFX_CGLS_MASK;
if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
return -1;
}
break;
case PP_BLOCK_GFX_MG:
/* For GFX MGCG, there are three different ones;
* CPF, RLC, and all others. CPF MGCG will not be used for Tonga.
* For GFX MGLS, Tonga will not support it.
* */
if (PP_STATE_SUPPORT_CG & *msg_id) {
msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
? PPSMC_MSG_EnableClockGatingFeature
: PPSMC_MSG_DisableClockGatingFeature;
value = (CG_RLC_MGCG_MASK | CG_GFX_OTHERS_MGCG_MASK);
if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
return -1;
}
break;
default:
return -1;
}
break;
case PP_GROUP_SYS:
switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
case PP_BLOCK_SYS_BIF:
if (PP_STATE_SUPPORT_LS & *msg_id) {
msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
? PPSMC_MSG_EnableClockGatingFeature
: PPSMC_MSG_DisableClockGatingFeature;
value = CG_SYS_BIF_MGLS_MASK;
if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
return -1;
}
break;
case PP_BLOCK_SYS_MC:
if (PP_STATE_SUPPORT_CG & *msg_id) {
msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
? PPSMC_MSG_EnableClockGatingFeature
: PPSMC_MSG_DisableClockGatingFeature;
value = CG_SYS_MC_MGCG_MASK;
if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
return -1;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
? PPSMC_MSG_EnableClockGatingFeature
: PPSMC_MSG_DisableClockGatingFeature;
value = CG_SYS_MC_MGLS_MASK;
if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
return -1;
}
break;
case PP_BLOCK_SYS_HDP:
if (PP_STATE_SUPPORT_CG & *msg_id) {
msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
? PPSMC_MSG_EnableClockGatingFeature
: PPSMC_MSG_DisableClockGatingFeature;
value = CG_SYS_HDP_MGCG_MASK;
if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
return -1;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
? PPSMC_MSG_EnableClockGatingFeature
: PPSMC_MSG_DisableClockGatingFeature;
value = CG_SYS_HDP_MGLS_MASK;
if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
return -1;
}
break;
case PP_BLOCK_SYS_SDMA:
if (PP_STATE_SUPPORT_CG & *msg_id) {
msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
? PPSMC_MSG_EnableClockGatingFeature
: PPSMC_MSG_DisableClockGatingFeature;
value = CG_SYS_SDMA_MGCG_MASK;
if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
return -1;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
? PPSMC_MSG_EnableClockGatingFeature
: PPSMC_MSG_DisableClockGatingFeature;
value = CG_SYS_SDMA_MGLS_MASK;
if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
return -1;
}
break;
case PP_BLOCK_SYS_ROM:
if (PP_STATE_SUPPORT_CG & *msg_id) {
msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
? PPSMC_MSG_EnableClockGatingFeature
: PPSMC_MSG_DisableClockGatingFeature;
value = CG_SYS_ROM_MASK;
if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
return -1;
}
break;
default:
return -1;
}
break;
default:
return -1;
}
return 0;
}

View File

@ -1,107 +0,0 @@
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef TONGA_DYN_DEFAULTS_H
#define TONGA_DYN_DEFAULTS_H
/** \file
* Volcanic Islands Dynamic default parameters.
*/
enum TONGAdpm_TrendDetection {
TONGAdpm_TrendDetection_AUTO,
TONGAdpm_TrendDetection_UP,
TONGAdpm_TrendDetection_DOWN
};
typedef enum TONGAdpm_TrendDetection TONGAdpm_TrendDetection;
/* Bit vector representing same fields as hardware register. */
#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 /* CP_Gfx_busy */
/* HDP_busy */
/* IH_busy */
/* DRM_busy */
/* DRMDMA_busy */
/* UVD_busy */
/* VCE_busy */
/* ACP_busy */
/* SAMU_busy */
/* AVP_busy */
/* SDMA enabled */
#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT1 0x000400 /* FE_Gfx_busy - Intended for primary usage. Rest are for flexibility. */
/* SH_Gfx_busy */
/* RB_Gfx_busy */
/* VCE_busy */
#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080 /* SH_Gfx_busy - Intended for primary usage. Rest are for flexibility. */
/* FE_Gfx_busy */
/* RB_Gfx_busy */
/* ACP_busy */
#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200 /* RB_Gfx_busy - Intended for primary usage. Rest are for flexibility. */
/* FE_Gfx_busy */
/* SH_Gfx_busy */
/* UVD_busy */
#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680 /* UVD_busy */
/* VCE_busy */
/* ACP_busy */
/* SAMU_busy */
#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033 /* GFX, HDP, DRMDMA */
#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033 /* GFX, HDP, DRMDMA */
#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000 /* GFX, HDP, DRMDMA */
/* thermal protection counter (units).*/
#define PPTONGA_THERMALPROTECTCOUNTER_DFLT 0x200 /* ~19us */
/* static screen threshold unit */
#define PPTONGA_STATICSCREENTHRESHOLDUNIT_DFLT 0
/* static screen threshold */
#define PPTONGA_STATICSCREENTHRESHOLD_DFLT 0x00C8
/* gfx idle clock stop threshold */
#define PPTONGA_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200 /* ~19us with static screen threshold unit of 0 */
/* Fixed reference divider to use when building baby stepping tables. */
#define PPTONGA_REFERENCEDIVIDER_DFLT 4
/*
* ULV voltage change delay time
* Used to be delay_vreg in N.I. split for S.I.
* Using N.I. delay_vreg value as default
* ReferenceClock = 2700
* VoltageResponseTime = 1000
* VDDCDelayTime = (VoltageResponseTime * ReferenceClock) / 1600 = 1687
*/
#define PPTONGA_ULVVOLTAGECHANGEDELAY_DFLT 1687
#define PPTONGA_CGULVPARAMETER_DFLT 0x00040035
#define PPTONGA_CGULVCONTROL_DFLT 0x00007450
#define PPTONGA_TARGETACTIVITY_DFLT 30 /*30% */
#define PPTONGA_MCLK_TARGETACTIVITY_DFLT 10 /*10% */
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,402 +0,0 @@
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef TONGA_HWMGR_H
#define TONGA_HWMGR_H
#include "hwmgr.h"
#include "smu72_discrete.h"
#include "ppatomctrl.h"
#include "ppinterrupt.h"
#include "tonga_powertune.h"
#include "pp_endian.h"
#define TONGA_MAX_HARDWARE_POWERLEVELS 2
#define TONGA_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS 15
struct tonga_performance_level {
uint32_t memory_clock;
uint32_t engine_clock;
uint16_t pcie_gen;
uint16_t pcie_lane;
};
struct _phw_tonga_bacos {
uint32_t best_match;
uint32_t baco_flags;
struct tonga_performance_level performance_level;
};
typedef struct _phw_tonga_bacos phw_tonga_bacos;
struct _phw_tonga_uvd_clocks {
uint32_t VCLK;
uint32_t DCLK;
};
typedef struct _phw_tonga_uvd_clocks phw_tonga_uvd_clocks;
struct _phw_tonga_vce_clocks {
uint32_t EVCLK;
uint32_t ECCLK;
};
typedef struct _phw_tonga_vce_clocks phw_tonga_vce_clocks;
struct tonga_power_state {
uint32_t magic;
phw_tonga_uvd_clocks uvd_clocks;
phw_tonga_vce_clocks vce_clocks;
uint32_t sam_clk;
uint32_t acp_clk;
uint16_t performance_level_count;
bool dc_compatible;
uint32_t sclk_threshold;
struct tonga_performance_level performance_levels[TONGA_MAX_HARDWARE_POWERLEVELS];
};
struct _phw_tonga_dpm_level {
bool enabled;
uint32_t value;
uint32_t param1;
};
typedef struct _phw_tonga_dpm_level phw_tonga_dpm_level;
#define TONGA_MAX_DEEPSLEEP_DIVIDER_ID 5
#define MAX_REGULAR_DPM_NUMBER 8
#define TONGA_MINIMUM_ENGINE_CLOCK 2500
struct tonga_single_dpm_table {
uint32_t count;
phw_tonga_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
};
struct tonga_dpm_table {
struct tonga_single_dpm_table sclk_table;
struct tonga_single_dpm_table mclk_table;
struct tonga_single_dpm_table pcie_speed_table;
struct tonga_single_dpm_table vddc_table;
struct tonga_single_dpm_table vdd_gfx_table;
struct tonga_single_dpm_table vdd_ci_table;
struct tonga_single_dpm_table mvdd_table;
};
typedef struct _phw_tonga_dpm_table phw_tonga_dpm_table;
struct _phw_tonga_clock_regisiters {
uint32_t vCG_SPLL_FUNC_CNTL;
uint32_t vCG_SPLL_FUNC_CNTL_2;
uint32_t vCG_SPLL_FUNC_CNTL_3;
uint32_t vCG_SPLL_FUNC_CNTL_4;
uint32_t vCG_SPLL_SPREAD_SPECTRUM;
uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
uint32_t vDLL_CNTL;
uint32_t vMCLK_PWRMGT_CNTL;
uint32_t vMPLL_AD_FUNC_CNTL;
uint32_t vMPLL_DQ_FUNC_CNTL;
uint32_t vMPLL_FUNC_CNTL;
uint32_t vMPLL_FUNC_CNTL_1;
uint32_t vMPLL_FUNC_CNTL_2;
uint32_t vMPLL_SS1;
uint32_t vMPLL_SS2;
};
typedef struct _phw_tonga_clock_regisiters phw_tonga_clock_registers;
struct _phw_tonga_voltage_smio_registers {
uint32_t vs0_vid_lower_smio_cntl;
};
typedef struct _phw_tonga_voltage_smio_registers phw_tonga_voltage_smio_registers;
struct _phw_tonga_mc_reg_entry {
uint32_t mclk_max;
uint32_t mc_data[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE];
};
typedef struct _phw_tonga_mc_reg_entry phw_tonga_mc_reg_entry;
struct _phw_tonga_mc_reg_table {
uint8_t last; /* number of registers*/
uint8_t num_entries; /* number of entries in mc_reg_table_entry used*/
uint16_t validflag; /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/
phw_tonga_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
SMU72_Discrete_MCRegisterAddress mc_reg_address[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE];
};
typedef struct _phw_tonga_mc_reg_table phw_tonga_mc_reg_table;
#define DISABLE_MC_LOADMICROCODE 1
#define DISABLE_MC_CFGPROGRAMMING 2
/*Ultra Low Voltage parameter structure */
struct _phw_tonga_ulv_parm{
bool ulv_supported;
uint32_t ch_ulv_parameter;
uint32_t ulv_volt_change_delay;
struct tonga_performance_level ulv_power_level;
};
typedef struct _phw_tonga_ulv_parm phw_tonga_ulv_parm;
#define TONGA_MAX_LEAKAGE_COUNT 8
struct _phw_tonga_leakage_voltage {
uint16_t count;
uint16_t leakage_id[TONGA_MAX_LEAKAGE_COUNT];
uint16_t actual_voltage[TONGA_MAX_LEAKAGE_COUNT];
};
typedef struct _phw_tonga_leakage_voltage phw_tonga_leakage_voltage;
struct _phw_tonga_display_timing {
uint32_t min_clock_insr;
uint32_t num_existing_displays;
};
typedef struct _phw_tonga_display_timing phw_tonga_display_timing;
struct _phw_tonga_dpmlevel_enable_mask {
uint32_t uvd_dpm_enable_mask;
uint32_t vce_dpm_enable_mask;
uint32_t acp_dpm_enable_mask;
uint32_t samu_dpm_enable_mask;
uint32_t sclk_dpm_enable_mask;
uint32_t mclk_dpm_enable_mask;
uint32_t pcie_dpm_enable_mask;
};
typedef struct _phw_tonga_dpmlevel_enable_mask phw_tonga_dpmlevel_enable_mask;
struct _phw_tonga_pcie_perf_range {
uint16_t max;
uint16_t min;
};
typedef struct _phw_tonga_pcie_perf_range phw_tonga_pcie_perf_range;
struct _phw_tonga_vbios_boot_state {
uint16_t mvdd_bootup_value;
uint16_t vddc_bootup_value;
uint16_t vddci_bootup_value;
uint16_t vddgfx_bootup_value;
uint32_t sclk_bootup_value;
uint32_t mclk_bootup_value;
uint16_t pcie_gen_bootup_value;
uint16_t pcie_lane_bootup_value;
};
typedef struct _phw_tonga_vbios_boot_state phw_tonga_vbios_boot_state;
#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
#define DPMTABLE_UPDATE_SCLK 0x00000004
#define DPMTABLE_UPDATE_MCLK 0x00000008
/* We need to review which fields are needed. */
/* This is mostly a copy of the RV7xx/Evergreen structure which is close, but not identical to the N.Islands one. */
struct tonga_hwmgr {
struct tonga_dpm_table dpm_table;
struct tonga_dpm_table golden_dpm_table;
uint32_t voting_rights_clients0;
uint32_t voting_rights_clients1;
uint32_t voting_rights_clients2;
uint32_t voting_rights_clients3;
uint32_t voting_rights_clients4;
uint32_t voting_rights_clients5;
uint32_t voting_rights_clients6;
uint32_t voting_rights_clients7;
uint32_t static_screen_threshold_unit;
uint32_t static_screen_threshold;
uint32_t voltage_control;
uint32_t vdd_gfx_control;
uint32_t vddc_vddci_delta;
uint32_t vddc_vddgfx_delta;
struct pp_interrupt_registration_info internal_high_thermal_interrupt_info;
struct pp_interrupt_registration_info internal_low_thermal_interrupt_info;
struct pp_interrupt_registration_info smc_to_host_interrupt_info;
uint32_t active_auto_throttle_sources;
struct pp_interrupt_registration_info external_throttle_interrupt;
irq_handler_func_t external_throttle_callback;
void *external_throttle_context;
struct pp_interrupt_registration_info ctf_interrupt_info;
irq_handler_func_t ctf_callback;
void *ctf_context;
phw_tonga_clock_registers clock_registers;
phw_tonga_voltage_smio_registers voltage_smio_registers;
bool is_memory_GDDR5;
uint16_t acpi_vddc;
bool pspp_notify_required; /* Flag to indicate if PSPP notification to SBIOS is required */
uint16_t force_pcie_gen; /* The forced PCI-E speed if not 0xffff */
uint16_t acpi_pcie_gen; /* The PCI-E speed at ACPI time */
uint32_t pcie_gen_cap; /* The PCI-E speed capabilities bitmap from CAIL */
uint32_t pcie_lane_cap; /* The PCI-E lane capabilities bitmap from CAIL */
uint32_t pcie_spc_cap; /* Symbol Per Clock Capabilities from registry */
phw_tonga_leakage_voltage vddc_leakage; /* The Leakage VDDC supported (based on leakage ID).*/
phw_tonga_leakage_voltage vddcgfx_leakage; /* The Leakage VDDC supported (based on leakage ID). */
phw_tonga_leakage_voltage vddci_leakage; /* The Leakage VDDCI supported (based on leakage ID). */
uint32_t mvdd_control;
uint32_t vddc_mask_low;
uint32_t mvdd_mask_low;
uint16_t max_vddc_in_pp_table; /* the maximum VDDC value in the powerplay table*/
uint16_t min_vddc_in_pp_table;
uint16_t max_vddci_in_pp_table; /* the maximum VDDCI value in the powerplay table */
uint16_t min_vddci_in_pp_table;
uint32_t mclk_strobe_mode_threshold;
uint32_t mclk_stutter_mode_threshold;
uint32_t mclk_edc_enable_threshold;
uint32_t mclk_edc_wr_enable_threshold;
bool is_uvd_enabled;
bool is_xdma_enabled;
phw_tonga_vbios_boot_state vbios_boot_state;
bool battery_state;
bool is_tlu_enabled;
bool pcie_performance_request;
/* -------------- SMC SRAM Address of firmware header tables ----------------*/
uint32_t sram_end; /* The first address after the SMC SRAM. */
uint32_t dpm_table_start; /* The start of the dpm table in the SMC SRAM. */
uint32_t soft_regs_start; /* The start of the soft registers in the SMC SRAM. */
uint32_t mc_reg_table_start; /* The start of the mc register table in the SMC SRAM. */
uint32_t fan_table_start; /* The start of the fan table in the SMC SRAM. */
uint32_t arb_table_start; /* The start of the ARB setting table in the SMC SRAM. */
SMU72_Discrete_DpmTable smc_state_table; /* The carbon copy of the SMC state table. */
SMU72_Discrete_MCRegisters mc_reg_table;
SMU72_Discrete_Ulv ulv_setting; /* The carbon copy of ULV setting. */
/* -------------- Stuff originally coming from Evergreen --------------------*/
phw_tonga_mc_reg_table tonga_mc_reg_table;
uint32_t vdd_ci_control;
pp_atomctrl_voltage_table vddc_voltage_table;
pp_atomctrl_voltage_table vddci_voltage_table;
pp_atomctrl_voltage_table vddgfx_voltage_table;
pp_atomctrl_voltage_table mvdd_voltage_table;
uint32_t mgcg_cgtt_local2;
uint32_t mgcg_cgtt_local3;
uint32_t gpio_debug;
uint32_t mc_micro_code_feature;
uint32_t highest_mclk;
uint16_t acpi_vdd_ci;
uint8_t mvdd_high_index;
uint8_t mvdd_low_index;
bool dll_defaule_on;
bool performance_request_registered;
/* ----------------- Low Power Features ---------------------*/
phw_tonga_bacos bacos;
phw_tonga_ulv_parm ulv;
/* ----------------- CAC Stuff ---------------------*/
uint32_t cac_table_start;
bool cac_configuration_required; /* TRUE if PP_CACConfigurationRequired == 1 */
bool driver_calculate_cac_leakage; /* TRUE if PP_DriverCalculateCACLeakage == 1 */
bool cac_enabled;
/* ----------------- DPM2 Parameters ---------------------*/
uint32_t power_containment_features;
bool enable_bapm_feature;
bool enable_tdc_limit_feature;
bool enable_pkg_pwr_tracking_feature;
bool disable_uvd_power_tune_feature;
struct tonga_pt_defaults *power_tune_defaults;
SMU72_Discrete_PmFuses power_tune_table;
uint32_t dte_tj_offset; /* Fudge factor in DPM table to correct HW DTE errors */
uint32_t fast_watermark_threshold; /* use fast watermark if clock is equal or above this. In percentage of the target high sclk. */
bool enable_dte_feature;
/* ----------------- Phase Shedding ---------------------*/
bool vddc_phase_shed_control;
/* --------------------- DI/DT --------------------------*/
phw_tonga_display_timing display_timing;
/* --------- ReadRegistry data for memory and engine clock margins ---- */
uint32_t engine_clock_data;
uint32_t memory_clock_data;
/* -------- Thermal Temperature Setting --------------*/
phw_tonga_dpmlevel_enable_mask dpm_level_enable_mask;
uint32_t need_update_smu7_dpm_table;
uint32_t sclk_dpm_key_disabled;
uint32_t mclk_dpm_key_disabled;
uint32_t pcie_dpm_key_disabled;
uint32_t min_engine_clocks; /* used to store the previous dal min sclock */
phw_tonga_pcie_perf_range pcie_gen_performance;
phw_tonga_pcie_perf_range pcie_lane_performance;
phw_tonga_pcie_perf_range pcie_gen_power_saving;
phw_tonga_pcie_perf_range pcie_lane_power_saving;
bool use_pcie_performance_levels;
bool use_pcie_power_saving_levels;
uint32_t activity_target[SMU72_MAX_LEVELS_GRAPHICS]; /* percentage value from 0-100, default 50 */
uint32_t mclk_activity_target;
uint32_t low_sclk_interrupt_threshold;
uint32_t last_mclk_dpm_enable_mask;
bool uvd_enabled;
uint32_t pcc_monitor_enabled;
/* --------- Power Gating States ------------*/
bool uvd_power_gated; /* 1: gated, 0:not gated */
bool vce_power_gated; /* 1: gated, 0:not gated */
bool samu_power_gated; /* 1: gated, 0:not gated */
bool acp_power_gated; /* 1: gated, 0:not gated */
bool pg_acp_init;
};
typedef struct tonga_hwmgr tonga_hwmgr;
#define TONGA_DPM2_NEAR_TDP_DEC 10
#define TONGA_DPM2_ABOVE_SAFE_INC 5
#define TONGA_DPM2_BELOW_SAFE_INC 20
#define TONGA_DPM2_LTA_WINDOW_SIZE 7 /* Log2 of the LTA window size (l2numWin_TDP). Eg. If LTA windows size is 128, then this value should be Log2(128) = 7. */
#define TONGA_DPM2_LTS_TRUNCATE 0
#define TONGA_DPM2_TDP_SAFE_LIMIT_PERCENT 80 /* Maximum 100 */
#define TONGA_DPM2_MAXPS_PERCENT_H 90 /* Maximum 0xFF */
#define TONGA_DPM2_MAXPS_PERCENT_M 90 /* Maximum 0xFF */
#define TONGA_DPM2_PWREFFICIENCYRATIO_MARGIN 50
#define TONGA_DPM2_SQ_RAMP_MAX_POWER 0x3FFF
#define TONGA_DPM2_SQ_RAMP_MIN_POWER 0x12
#define TONGA_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15
#define TONGA_DPM2_SQ_RAMP_SHORT_TERM_INTERVAL_SIZE 0x1E
#define TONGA_DPM2_SQ_RAMP_LONG_TERM_INTERVAL_RATIO 0xF
#define TONGA_VOLTAGE_CONTROL_NONE 0x0
#define TONGA_VOLTAGE_CONTROL_BY_GPIO 0x1
#define TONGA_VOLTAGE_CONTROL_BY_SVID2 0x2
#define TONGA_VOLTAGE_CONTROL_MERGED 0x3
#define TONGA_Q88_FORMAT_CONVERSION_UNIT 256 /*To convert to Q8.8 format for firmware */
#define TONGA_UNUSED_GPIO_PIN 0x7F
int tonga_hwmgr_init(struct pp_hwmgr *hwmgr);
int tonga_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input);
int tonga_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
int tonga_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable);
int tonga_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr);
#endif

View File

@ -1,495 +0,0 @@
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "hwmgr.h"
#include "smumgr.h"
#include "tonga_hwmgr.h"
#include "tonga_powertune.h"
#include "tonga_smumgr.h"
#include "smu72_discrete.h"
#include "pp_debug.h"
#include "tonga_ppsmc.h"
#define VOLTAGE_SCALE 4
#define POWERTUNE_DEFAULT_SET_MAX 1
struct tonga_pt_defaults tonga_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
{1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
{0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
{0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
};
void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
{
struct tonga_hwmgr *tonga_hwmgr = (struct tonga_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
uint32_t tmp = 0;
if (table_info &&
table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
table_info->cac_dtp_table->usPowerTuneDataSetID)
tonga_hwmgr->power_tune_defaults =
&tonga_power_tune_data_set_array
[table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
else
tonga_hwmgr->power_tune_defaults = &tonga_power_tune_data_set_array[0];
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_CAC);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SQRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DBRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TDRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TCPRamping);
tonga_hwmgr->dte_tj_offset = tmp;
if (!tmp) {
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_CAC);
tonga_hwmgr->fast_watermark_threshold = 100;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment)) {
tmp = 1;
tonga_hwmgr->enable_dte_feature = tmp ? false : true;
tonga_hwmgr->enable_tdc_limit_feature = tmp ? true : false;
tonga_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false;
}
}
}
int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
{
struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
struct tonga_pt_defaults *defaults = data->power_tune_defaults;
SMU72_Discrete_DpmTable *dpm_table = &(data->smc_state_table);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
int i, j, k;
uint16_t *pdef1;
uint16_t *pdef2;
/* TDP number of fraction bits are changed from 8 to 7 for Fiji
* as requested by SMC team
*/
dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
(uint16_t)(cac_dtp_table->usTDP * 256));
dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
(uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
"Target Operating Temp is out of Range!",
);
dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
dpm_table->GpuTjHyst = 8;
dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient);
pdef1 = defaults->bapmti_r;
pdef2 = defaults->bapmti_rc;
for (i = 0; i < SMU72_DTE_ITERATIONS; i++) {
for (j = 0; j < SMU72_DTE_SOURCES; j++) {
for (k = 0; k < SMU72_DTE_SINKS; k++) {
dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
pdef1++;
pdef2++;
}
}
}
return 0;
}
static int tonga_populate_svi_load_line(struct pp_hwmgr *hwmgr)
{
struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
const struct tonga_pt_defaults *defaults = data->power_tune_defaults;
data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddC;
data->power_tune_table.SviLoadLineTrimVddC = 3;
data->power_tune_table.SviLoadLineOffsetVddC = 0;
return 0;
}
static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr)
{
uint16_t tdc_limit;
struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
const struct tonga_pt_defaults *defaults = data->power_tune_defaults;
/* TDC number of fraction bits are changed from 8 to 7
* for Fiji as requested by SMC team
*/
tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 256);
data->power_tune_table.TDC_VDDC_PkgLimit =
CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
defaults->tdc_vddc_throttle_release_limit_perc;
data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
return 0;
}
static int tonga_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
{
struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
const struct tonga_pt_defaults *defaults = data->power_tune_defaults;
uint32_t temp;
if (tonga_read_smc_sram_dword(hwmgr->smumgr,
fuse_table_offset +
offsetof(SMU72_Discrete_PmFuses, TdcWaterfallCtl),
(uint32_t *)&temp, data->sram_end))
PP_ASSERT_WITH_CODE(false,
"Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
return -EINVAL);
else
data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
return 0;
}
static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
{
int i;
struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
/* Currently not used. Set all to zero. */
for (i = 0; i < 16; i++)
data->power_tune_table.LPMLTemperatureScaler[i] = 0;
return 0;
}
static int tonga_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
{
struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
if ((hwmgr->thermal_controller.advanceFanControlParameters.
usFanOutputSensitivity & (1 << 15)) ||
(hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity == 0))
hwmgr->thermal_controller.advanceFanControlParameters.
usFanOutputSensitivity = hwmgr->thermal_controller.
advanceFanControlParameters.usDefaultFanOutputSensitivity;
data->power_tune_table.FuzzyFan_PwmSetDelta =
PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
advanceFanControlParameters.usFanOutputSensitivity);
return 0;
}
static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
{
int i;
struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
/* Currently not used. Set all to zero. */
for (i = 0; i < 16; i++)
data->power_tune_table.GnbLPML[i] = 0;
return 0;
}
static int tonga_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
{
return 0;
}
static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
{
struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
uint16_t hi_sidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd;
uint16_t lo_sidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd;
struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
data->power_tune_table.BapmVddCBaseLeakageHiSidd =
CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
data->power_tune_table.BapmVddCBaseLeakageLoSidd =
CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
return 0;
}
int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr)
{
struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
uint32_t pm_fuse_table_offset;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment)) {
if (tonga_read_smc_sram_dword(hwmgr->smumgr,
SMU72_FIRMWARE_HEADER_LOCATION +
offsetof(SMU72_Firmware_Header, PmFuseTable),
&pm_fuse_table_offset, data->sram_end))
PP_ASSERT_WITH_CODE(false,
"Attempt to get pm_fuse_table_offset Failed!",
return -EINVAL);
/* DW6 */
if (tonga_populate_svi_load_line(hwmgr))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate SviLoadLine Failed!",
return -EINVAL);
/* DW7 */
if (tonga_populate_tdc_limit(hwmgr))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate TDCLimit Failed!", return -EINVAL);
/* DW8 */
if (tonga_populate_dw8(hwmgr, pm_fuse_table_offset))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate TdcWaterfallCtl Failed !",
return -EINVAL);
/* DW9-DW12 */
if (tonga_populate_temperature_scaler(hwmgr) != 0)
PP_ASSERT_WITH_CODE(false,
"Attempt to populate LPMLTemperatureScaler Failed!",
return -EINVAL);
/* DW13-DW14 */
if (tonga_populate_fuzzy_fan(hwmgr))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate Fuzzy Fan Control parameters Failed!",
return -EINVAL);
/* DW15-DW18 */
if (tonga_populate_gnb_lpml(hwmgr))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate GnbLPML Failed!",
return -EINVAL);
/* DW19 */
if (tonga_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate GnbLPML Min and Max Vid Failed!",
return -EINVAL);
/* DW20 */
if (tonga_populate_bapm_vddc_base_leakage_sidd(hwmgr))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!",
return -EINVAL);
if (tonga_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
(uint8_t *)&data->power_tune_table,
sizeof(struct SMU72_Discrete_PmFuses), data->sram_end))
PP_ASSERT_WITH_CODE(false,
"Attempt to download PmFuseTable Failed!",
return -EINVAL);
}
return 0;
}
int tonga_enable_smc_cac(struct pp_hwmgr *hwmgr)
{
struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
int result = 0;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_CAC)) {
int smc_result;
smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
(uint16_t)(PPSMC_MSG_EnableCac));
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to enable CAC in SMC.", result = -1);
data->cac_enabled = (smc_result == 0) ? true : false;
}
return result;
}
int tonga_disable_smc_cac(struct pp_hwmgr *hwmgr)
{
struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
int result = 0;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_CAC) && data->cac_enabled) {
int smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
(uint16_t)(PPSMC_MSG_DisableCac));
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to disable CAC in SMC.", result = -1);
data->cac_enabled = false;
}
return result;
}
int tonga_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
{
struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_PkgPwrLimit)
return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_PkgPwrSetLimit, n);
return 0;
}
static int tonga_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
{
return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr,
PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
}
int tonga_enable_power_containment(struct pp_hwmgr *hwmgr)
{
struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
int smc_result;
int result = 0;
data->power_containment_features = 0;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment)) {
if (data->enable_dte_feature) {
smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
(uint16_t)(PPSMC_MSG_EnableDTE));
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to enable DTE in SMC.", result = -1;);
if (smc_result == 0)
data->power_containment_features |= POWERCONTAINMENT_FEATURE_DTE;
}
if (data->enable_tdc_limit_feature) {
smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
(uint16_t)(PPSMC_MSG_TDCLimitEnable));
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to enable TDCLimit in SMC.", result = -1;);
if (smc_result == 0)
data->power_containment_features |=
POWERCONTAINMENT_FEATURE_TDCLimit;
}
if (data->enable_pkg_pwr_tracking_feature) {
smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
(uint16_t)(PPSMC_MSG_PkgPwrLimitEnable));
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to enable PkgPwrTracking in SMC.", result = -1;);
if (smc_result == 0) {
struct phm_cac_tdp_table *cac_table =
table_info->cac_dtp_table;
uint32_t default_limit =
(uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256);
data->power_containment_features |=
POWERCONTAINMENT_FEATURE_PkgPwrLimit;
if (tonga_set_power_limit(hwmgr, default_limit))
printk(KERN_ERR "Failed to set Default Power Limit in SMC!");
}
}
}
return result;
}
int tonga_disable_power_containment(struct pp_hwmgr *hwmgr)
{
struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
int result = 0;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment) &&
data->power_containment_features) {
int smc_result;
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_TDCLimit) {
smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
(uint16_t)(PPSMC_MSG_TDCLimitDisable));
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to disable TDCLimit in SMC.",
result = smc_result);
}
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_DTE) {
smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
(uint16_t)(PPSMC_MSG_DisableDTE));
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to disable DTE in SMC.",
result = smc_result);
}
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
(uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to disable PkgPwrTracking in SMC.",
result = smc_result);
}
data->power_containment_features = 0;
}
return result;
}
int tonga_power_control_set_level(struct pp_hwmgr *hwmgr)
{
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
int adjust_percent, target_tdp;
int result = 0;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment)) {
/* adjustment percentage has already been validated */
adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
hwmgr->platform_descriptor.TDPAdjustment :
(-1 * hwmgr->platform_descriptor.TDPAdjustment);
/* SMC requested that target_tdp to be 7 bit fraction in DPM table
* but message to be 8 bit fraction for messages
*/
target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
result = tonga_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
}
return result;
}

View File

@ -1,590 +0,0 @@
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <asm/div64.h>
#include "tonga_thermal.h"
#include "tonga_hwmgr.h"
#include "tonga_smumgr.h"
#include "tonga_ppsmc.h"
#include "smu/smu_7_1_2_d.h"
#include "smu/smu_7_1_2_sh_mask.h"
/**
* Get Fan Speed Control Parameters.
* @param hwmgr the address of the powerplay hardware manager.
* @param pSpeed is the address of the structure where the result is to be placed.
* @exception Always succeeds except if we cannot zero out the output structure.
*/
int tonga_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info)
{
if (hwmgr->thermal_controller.fanInfo.bNoFan)
return 0;
fan_speed_info->supports_percent_read = true;
fan_speed_info->supports_percent_write = true;
fan_speed_info->min_percent = 0;
fan_speed_info->max_percent = 100;
if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
fan_speed_info->supports_rpm_read = true;
fan_speed_info->supports_rpm_write = true;
fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM;
fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
} else {
fan_speed_info->min_rpm = 0;
fan_speed_info->max_rpm = 0;
}
return 0;
}
/**
* Get Fan Speed in percent.
* @param hwmgr the address of the powerplay hardware manager.
* @param pSpeed is the address of the structure where the result is to be placed.
* @exception Fails is the 100% setting appears to be 0.
*/
int tonga_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed)
{
uint32_t duty100;
uint32_t duty;
uint64_t tmp64;
if (hwmgr->thermal_controller.fanInfo.bNoFan)
return 0;
duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_STATUS, FDO_PWM_DUTY);
if (0 == duty100)
return -EINVAL;
tmp64 = (uint64_t)duty * 100;
do_div(tmp64, duty100);
*speed = (uint32_t)tmp64;
if (*speed > 100)
*speed = 100;
return 0;
}
/**
* Get Fan Speed in RPM.
* @param hwmgr the address of the powerplay hardware manager.
* @param speed is the address of the structure where the result is to be placed.
* @exception Returns not supported if no fan is found or if pulses per revolution are not set
*/
int tonga_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
{
return 0;
}
/**
* Set Fan Speed Control to static mode, so that the user can decide what speed to use.
* @param hwmgr the address of the powerplay hardware manager.
* mode the fan control mode, 0 default, 1 by percent, 5, by RPM
* @exception Should always succeed.
*/
int tonga_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
{
if (hwmgr->fan_ctrl_is_in_default_mode) {
hwmgr->fan_ctrl_default_mode = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE);
hwmgr->tmin = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN);
hwmgr->fan_ctrl_is_in_default_mode = false;
}
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, 0);
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, mode);
return 0;
}
/**
* Reset Fan Speed Control to default mode.
* @param hwmgr the address of the powerplay hardware manager.
* @exception Should always succeed.
*/
int tonga_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
{
if (!hwmgr->fan_ctrl_is_in_default_mode) {
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode);
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, hwmgr->tmin);
hwmgr->fan_ctrl_is_in_default_mode = true;
}
return 0;
}
int tonga_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
{
int result;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ODFuzzyFanControlSupport)) {
cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY);
result = (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl) == 0) ? 0 : -EINVAL;
/*
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_FanSpeedInTableIsRPM))
hwmgr->set_max_fan_rpm_output(hwmgr, hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM);
else
hwmgr->set_max_fan_pwm_output(hwmgr, hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM);
*/
} else {
cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE);
result = (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl) == 0) ? 0 : -EINVAL;
}
/* TO DO FOR SOME DEVICE ID 0X692b, send this msg return invalid command.
if (result == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature != 0)
result = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanTemperatureTarget, \
hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature) ? 0 : -EINVAL);
*/
return result;
}
int tonga_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
{
return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl) == 0) ? 0 : -EINVAL;
}
/**
* Set Fan Speed in percent.
* @param hwmgr the address of the powerplay hardware manager.
* @param speed is the percentage value (0% - 100%) to be set.
* @exception Fails is the 100% setting appears to be 0.
*/
int tonga_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed)
{
uint32_t duty100;
uint32_t duty;
uint64_t tmp64;
if (hwmgr->thermal_controller.fanInfo.bNoFan)
return -EINVAL;
if (speed > 100)
speed = 100;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
tonga_fan_ctrl_stop_smc_fan_control(hwmgr);
duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
if (0 == duty100)
return -EINVAL;
tmp64 = (uint64_t)speed * duty100;
do_div(tmp64, 100);
duty = (uint32_t)tmp64;
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL0, FDO_STATIC_DUTY, duty);
return tonga_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
}
/**
* Reset Fan Speed to default.
* @param hwmgr the address of the powerplay hardware manager.
* @exception Always succeeds.
*/
int tonga_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
{
int result;
if (hwmgr->thermal_controller.fanInfo.bNoFan)
return 0;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) {
result = tonga_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
if (0 == result)
result = tonga_fan_ctrl_start_smc_fan_control(hwmgr);
} else
result = tonga_fan_ctrl_set_default_mode(hwmgr);
return result;
}
/**
* Set Fan Speed in RPM.
* @param hwmgr the address of the powerplay hardware manager.
* @param speed is the percentage value (min - max) to be set.
* @exception Fails is the speed not lie between min and max.
*/
int tonga_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
{
return 0;
}
/**
* Reads the remote temperature from the SIslands thermal controller.
*
* @param hwmgr The address of the hardware manager.
*/
int tonga_thermal_get_temperature(struct pp_hwmgr *hwmgr)
{
int temp;
temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_STATUS, CTF_TEMP);
/* Bit 9 means the reading is lower than the lowest usable value. */
if (0 != (0x200 & temp))
temp = TONGA_THERMAL_MAXIMUM_TEMP_READING;
else
temp = (temp & 0x1ff);
temp = temp * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
return temp;
}
/**
* Set the requested temperature range for high and low alert signals
*
* @param hwmgr The address of the hardware manager.
* @param range Temperature range to be programmed for high and low alert signals
* @exception PP_Result_BadInput if the input data is not valid.
*/
static int tonga_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, uint32_t low_temp, uint32_t high_temp)
{
uint32_t low = TONGA_THERMAL_MINIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
uint32_t high = TONGA_THERMAL_MAXIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
if (low < low_temp)
low = low_temp;
if (high > high_temp)
high = high_temp;
if (low > high)
return -EINVAL;
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, DIG_THERM_DPM, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
return 0;
}
/**
* Programs thermal controller one-time setting registers
*
* @param hwmgr The address of the hardware manager.
*/
static int tonga_thermal_initialize(struct pp_hwmgr *hwmgr)
{
if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_TACH_CTRL, EDGE_PER_REV,
hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution - 1);
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28);
return 0;
}
/**
* Enable thermal alerts on the RV770 thermal controller.
*
* @param hwmgr The address of the hardware manager.
*/
static int tonga_thermal_enable_alert(struct pp_hwmgr *hwmgr)
{
uint32_t alert;
alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK);
alert &= ~(TONGA_THERMAL_HIGH_ALERT_MASK | TONGA_THERMAL_LOW_ALERT_MASK);
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert);
/* send message to SMU to enable internal thermal interrupts */
return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable) == 0) ? 0 : -1;
}
/**
* Disable thermal alerts on the RV770 thermal controller.
* @param hwmgr The address of the hardware manager.
*/
static int tonga_thermal_disable_alert(struct pp_hwmgr *hwmgr)
{
uint32_t alert;
alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK);
alert |= (TONGA_THERMAL_HIGH_ALERT_MASK | TONGA_THERMAL_LOW_ALERT_MASK);
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert);
/* send message to SMU to disable internal thermal interrupts */
return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable) == 0) ? 0 : -1;
}
/**
* Uninitialize the thermal controller.
* Currently just disables alerts.
* @param hwmgr The address of the hardware manager.
*/
int tonga_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
{
int result = tonga_thermal_disable_alert(hwmgr);
if (hwmgr->thermal_controller.fanInfo.bNoFan)
tonga_fan_ctrl_set_default_mode(hwmgr);
return result;
}
/**
* Set up the fan table to control the fan using the SMC.
* @param hwmgr the address of the powerplay hardware manager.
* @param pInput the pointer to input data
* @param pOutput the pointer to output data
* @param pStorage the pointer to temporary storage
* @param Result the last failure code
* @return result from set temperature range routine
*/
int tf_tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
{
struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
SMU72_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
uint32_t duty100;
uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
uint16_t fdo_min, slope1, slope2;
uint32_t reference_clock;
int res;
uint64_t tmp64;
if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
return 0;
if (0 == data->fan_table_start) {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
return 0;
}
duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
if (0 == duty100) {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
return 0;
}
tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
do_div(tmp64, 10000);
fdo_min = (uint16_t)tmp64;
t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
fan_table.Slope1 = cpu_to_be16(slope1);
fan_table.Slope2 = cpu_to_be16(slope2);
fan_table.FdoMin = cpu_to_be16(fdo_min);
fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
fan_table.HystUp = cpu_to_be16(1);
fan_table.HystSlope = cpu_to_be16(1);
fan_table.TempRespLim = cpu_to_be16(5);
reference_clock = tonga_get_xclk(hwmgr);
fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
fan_table.FanControl_GL_Flag = 1;
res = tonga_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), data->sram_end);
/* TO DO FOR SOME DEVICE ID 0X692b, send this msg return invalid command.
if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit != 0)
res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanMinPwm, \
hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit) ? 0 : -1);
if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit != 0)
res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanSclkTarget, \
hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit) ? 0 : -1);
if (0 != res)
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
*/
return 0;
}
/**
* Start the fan control on the SMC.
* @param hwmgr the address of the powerplay hardware manager.
* @param pInput the pointer to input data
* @param pOutput the pointer to output data
* @param pStorage the pointer to temporary storage
* @param Result the last failure code
* @return result from set temperature range routine
*/
int tf_tonga_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
{
/* If the fantable setup has failed we could have disabled PHM_PlatformCaps_MicrocodeFanControl even after this function was included in the table.
* Make sure that we still think controlling the fan is OK.
*/
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) {
tonga_fan_ctrl_start_smc_fan_control(hwmgr);
tonga_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
}
return 0;
}
/**
* Set temperature range for high and low alerts
* @param hwmgr the address of the powerplay hardware manager.
* @param pInput the pointer to input data
* @param pOutput the pointer to output data
* @param pStorage the pointer to temporary storage
* @param Result the last failure code
* @return result from set temperature range routine
*/
int tf_tonga_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
{
struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
if (range == NULL)
return -EINVAL;
return tonga_thermal_set_temperature_range(hwmgr, range->min, range->max);
}
/**
* Programs one-time setting registers
* @param hwmgr the address of the powerplay hardware manager.
* @param pInput the pointer to input data
* @param pOutput the pointer to output data
* @param pStorage the pointer to temporary storage
* @param Result the last failure code
* @return result from initialize thermal controller routine
*/
int tf_tonga_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
{
return tonga_thermal_initialize(hwmgr);
}
/**
* Enable high and low alerts
* @param hwmgr the address of the powerplay hardware manager.
* @param pInput the pointer to input data
* @param pOutput the pointer to output data
* @param pStorage the pointer to temporary storage
* @param Result the last failure code
* @return result from enable alert routine
*/
int tf_tonga_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
{
return tonga_thermal_enable_alert(hwmgr);
}
/**
* Disable high and low alerts
* @param hwmgr the address of the powerplay hardware manager.
* @param pInput the pointer to input data
* @param pOutput the pointer to output data
* @param pStorage the pointer to temporary storage
* @param Result the last failure code
* @return result from disable alert routine
*/
static int tf_tonga_thermal_disable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
{
return tonga_thermal_disable_alert(hwmgr);
}
static const struct phm_master_table_item tonga_thermal_start_thermal_controller_master_list[] = {
{ NULL, tf_tonga_thermal_initialize },
{ NULL, tf_tonga_thermal_set_temperature_range },
{ NULL, tf_tonga_thermal_enable_alert },
/* We should restrict performance levels to low before we halt the SMC.
* On the other hand we are still in boot state when we do this so it would be pointless.
* If this assumption changes we have to revisit this table.
*/
{ NULL, tf_tonga_thermal_setup_fan_table},
{ NULL, tf_tonga_thermal_start_smc_fan_control},
{ NULL, NULL }
};
static const struct phm_master_table_header tonga_thermal_start_thermal_controller_master = {
0,
PHM_MasterTableFlag_None,
tonga_thermal_start_thermal_controller_master_list
};
static const struct phm_master_table_item tonga_thermal_set_temperature_range_master_list[] = {
{ NULL, tf_tonga_thermal_disable_alert},
{ NULL, tf_tonga_thermal_set_temperature_range},
{ NULL, tf_tonga_thermal_enable_alert},
{ NULL, NULL }
};
static const struct phm_master_table_header tonga_thermal_set_temperature_range_master = {
0,
PHM_MasterTableFlag_None,
tonga_thermal_set_temperature_range_master_list
};
int tonga_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
{
if (!hwmgr->thermal_controller.fanInfo.bNoFan)
tonga_fan_ctrl_set_default_mode(hwmgr);
return 0;
}
/**
* Initializes the thermal controller related functions in the Hardware Manager structure.
* @param hwmgr The address of the hardware manager.
* @exception Any error code from the low-level communication.
*/
int pp_tonga_thermal_initialize(struct pp_hwmgr *hwmgr)
{
int result;
result = phm_construct_table(hwmgr, &tonga_thermal_set_temperature_range_master, &(hwmgr->set_temperature_range));
if (0 == result) {
result = phm_construct_table(hwmgr,
&tonga_thermal_start_thermal_controller_master,
&(hwmgr->start_thermal_controller));
if (0 != result)
phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
}
if (0 == result)
hwmgr->fan_ctrl_is_in_default_mode = true;
return result;
}

View File

@ -1,61 +0,0 @@
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef TONGA_THERMAL_H
#define TONGA_THERMAL_H
#include "hwmgr.h"
#define TONGA_THERMAL_HIGH_ALERT_MASK 0x1
#define TONGA_THERMAL_LOW_ALERT_MASK 0x2
#define TONGA_THERMAL_MINIMUM_TEMP_READING -256
#define TONGA_THERMAL_MAXIMUM_TEMP_READING 255
#define TONGA_THERMAL_MINIMUM_ALERT_TEMP 0
#define TONGA_THERMAL_MAXIMUM_ALERT_TEMP 255
#define FDO_PWM_MODE_STATIC 1
#define FDO_PWM_MODE_STATIC_RPM 5
extern int tf_tonga_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
extern int tf_tonga_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
extern int tf_tonga_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
extern int tonga_thermal_get_temperature(struct pp_hwmgr *hwmgr);
extern int tonga_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
extern int tonga_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
extern int tonga_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
extern int tonga_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
extern int tonga_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
extern int tonga_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
extern int tonga_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
extern int pp_tonga_thermal_initialize(struct pp_hwmgr *hwmgr);
extern int tonga_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
extern int tonga_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
extern int tonga_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
extern int tonga_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
#endif

View File

@ -29,6 +29,19 @@
#include "amd_shared.h" #include "amd_shared.h"
#include "cgs_common.h" #include "cgs_common.h"
enum amd_pp_sensors {
AMDGPU_PP_SENSOR_GFX_SCLK = 0,
AMDGPU_PP_SENSOR_VDDNB,
AMDGPU_PP_SENSOR_VDDGFX,
AMDGPU_PP_SENSOR_UVD_VCLK,
AMDGPU_PP_SENSOR_UVD_DCLK,
AMDGPU_PP_SENSOR_VCE_ECCLK,
AMDGPU_PP_SENSOR_GPU_LOAD,
AMDGPU_PP_SENSOR_GFX_MCLK,
AMDGPU_PP_SENSOR_GPU_TEMP,
AMDGPU_PP_SENSOR_VCE_POWER,
AMDGPU_PP_SENSOR_UVD_POWER,
};
enum amd_pp_event { enum amd_pp_event {
AMD_PP_EVENT_INITIALIZE = 0, AMD_PP_EVENT_INITIALIZE = 0,
@ -260,6 +273,7 @@ enum amd_pp_clock_type {
struct amd_pp_clocks { struct amd_pp_clocks {
uint32_t count; uint32_t count;
uint32_t clock[MAX_NUM_CLOCKS]; uint32_t clock[MAX_NUM_CLOCKS];
uint32_t latency[MAX_NUM_CLOCKS];
}; };
@ -331,8 +345,6 @@ struct amd_powerplay_funcs {
int (*powergate_uvd)(void *handle, bool gate); int (*powergate_uvd)(void *handle, bool gate);
int (*dispatch_tasks)(void *handle, enum amd_pp_event event_id, int (*dispatch_tasks)(void *handle, enum amd_pp_event event_id,
void *input, void *output); void *input, void *output);
void (*print_current_performance_level)(void *handle,
struct seq_file *m);
int (*set_fan_control_mode)(void *handle, uint32_t mode); int (*set_fan_control_mode)(void *handle, uint32_t mode);
int (*get_fan_control_mode)(void *handle); int (*get_fan_control_mode)(void *handle);
int (*set_fan_speed_percent)(void *handle, uint32_t percent); int (*set_fan_speed_percent)(void *handle, uint32_t percent);
@ -346,6 +358,7 @@ struct amd_powerplay_funcs {
int (*set_sclk_od)(void *handle, uint32_t value); int (*set_sclk_od)(void *handle, uint32_t value);
int (*get_mclk_od)(void *handle); int (*get_mclk_od)(void *handle);
int (*set_mclk_od)(void *handle, uint32_t value); int (*set_mclk_od)(void *handle, uint32_t value);
int (*read_sensor)(void *handle, int idx, int32_t *value);
}; };
struct amd_powerplay { struct amd_powerplay {
@ -377,4 +390,6 @@ int amd_powerplay_get_clock_by_type(void *handle,
int amd_powerplay_get_display_mode_validation_clocks(void *handle, int amd_powerplay_get_display_mode_validation_clocks(void *handle,
struct amd_pp_simple_clock_info *output); struct amd_pp_simple_clock_info *output);
int amd_set_clockgating_by_smu(void *handle, uint32_t msg_id);
#endif /* _AMD_POWERPLAY_H_ */ #endif /* _AMD_POWERPLAY_H_ */

View File

@ -311,8 +311,6 @@ struct pp_hwmgr_func {
int (*get_sclk)(struct pp_hwmgr *hwmgr, bool low); int (*get_sclk)(struct pp_hwmgr *hwmgr, bool low);
int (*power_state_set)(struct pp_hwmgr *hwmgr, int (*power_state_set)(struct pp_hwmgr *hwmgr,
const void *state); const void *state);
void (*print_current_perforce_level)(struct pp_hwmgr *hwmgr,
struct seq_file *m);
int (*enable_clock_power_gating)(struct pp_hwmgr *hwmgr); int (*enable_clock_power_gating)(struct pp_hwmgr *hwmgr);
int (*notify_smc_display_config_after_ps_adjustment)(struct pp_hwmgr *hwmgr); int (*notify_smc_display_config_after_ps_adjustment)(struct pp_hwmgr *hwmgr);
int (*display_config_changed)(struct pp_hwmgr *hwmgr); int (*display_config_changed)(struct pp_hwmgr *hwmgr);
@ -359,6 +357,7 @@ struct pp_hwmgr_func {
int (*set_sclk_od)(struct pp_hwmgr *hwmgr, uint32_t value); int (*set_sclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
int (*get_mclk_od)(struct pp_hwmgr *hwmgr); int (*get_mclk_od)(struct pp_hwmgr *hwmgr);
int (*set_mclk_od)(struct pp_hwmgr *hwmgr, uint32_t value); int (*set_mclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
int (*read_sensor)(struct pp_hwmgr *hwmgr, int idx, int32_t *value);
}; };
struct pp_table_func { struct pp_table_func {
@ -709,6 +708,7 @@ extern int phm_hwmgr_backend_fini(struct pp_hwmgr *hwmgr);
extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask); extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask);
extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr); extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
extern int smu7_hwmgr_init(struct pp_hwmgr *hwmgr);
extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
uint32_t sclk, uint16_t id, uint16_t *voltage); uint32_t sclk, uint16_t id, uint16_t *voltage);

View File

@ -23,8 +23,7 @@
#ifndef _POLARIS10_PWRVIRUS_H #ifndef _POLARIS10_PWRVIRUS_H
#define _POLARIS10_PWRVIRUS_H #define _POLARIS10_PWRVIRUS_H
#define mmSMC_IND_INDEX_11 0x01AC
#define mmSMC_IND_DATA_11 0x01AD
#define mmCP_HYP_MEC1_UCODE_ADDR 0xf81a #define mmCP_HYP_MEC1_UCODE_ADDR 0xf81a
#define mmCP_HYP_MEC1_UCODE_DATA 0xf81b #define mmCP_HYP_MEC1_UCODE_DATA 0xf81b
#define mmCP_HYP_MEC2_UCODE_ADDR 0xf81c #define mmCP_HYP_MEC2_UCODE_ADDR 0xf81c

View File

@ -21,21 +21,38 @@
* *
*/ */
#ifndef ICELAND_SMUM_H #ifndef _PP_COMMON_H
#define ICELAND_SMUM_H #define _PP_COMMON_H
#include "ppsmc.h" #include "smu7_ppsmc.h"
#include "cgs_common.h"
extern int iceland_smu_init(struct amdgpu_device *adev); #include "smu/smu_7_1_3_d.h"
extern int iceland_smu_fini(struct amdgpu_device *adev); #include "smu/smu_7_1_3_sh_mask.h"
extern int iceland_smu_start(struct amdgpu_device *adev);
#include "smu74.h"
#include "smu74_discrete.h"
#include "gmc/gmc_8_1_d.h"
#include "gmc/gmc_8_1_sh_mask.h"
#include "bif/bif_5_0_d.h"
#include "bif/bif_5_0_sh_mask.h"
#include "bif/bif_5_0_d.h"
#include "bif/bif_5_0_sh_mask.h"
#include "dce/dce_10_0_d.h"
#include "dce/dce_10_0_sh_mask.h"
#include "gca/gfx_8_0_d.h"
#include "gca/gfx_8_0_sh_mask.h"
#include "oss/oss_3_0_d.h"
#include "oss/oss_3_0_sh_mask.h"
struct iceland_smu_private_data
{
uint8_t *header;
uint8_t *mec_image;
uint32_t header_addr_high;
uint32_t header_addr_low;
};
#endif #endif

View File

@ -0,0 +1,412 @@
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef DGPU_VI_PP_SMC_H
#define DGPU_VI_PP_SMC_H
#pragma pack(push, 1)
#define PPSMC_MSG_SetGBDroopSettings ((uint16_t) 0x305)
#define PPSMC_SWSTATE_FLAG_DC 0x01
#define PPSMC_SWSTATE_FLAG_UVD 0x02
#define PPSMC_SWSTATE_FLAG_VCE 0x04
#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00
#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01
#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff
#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01
#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02
#define PPSMC_SYSTEMFLAG_GDDR5 0x04
#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08
#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10
#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20
#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07
#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08
#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00
#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01
#define PPSMC_DPM2FLAGS_TDPCLMP 0x01
#define PPSMC_DPM2FLAGS_PWRSHFT 0x02
#define PPSMC_DPM2FLAGS_OCP 0x04
#define PPSMC_DISPLAY_WATERMARK_LOW 0
#define PPSMC_DISPLAY_WATERMARK_HIGH 1
#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01
#define PPSMC_STATEFLAG_POWERBOOST 0x02
#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 0x04
#define PPSMC_STATEFLAG_POWERSHIFT 0x08
#define PPSMC_STATEFLAG_SLOW_READ_MARGIN 0x10
#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20
#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40
#define FDO_MODE_HARDWARE 0
#define FDO_MODE_PIECE_WISE_LINEAR 1
enum FAN_CONTROL {
FAN_CONTROL_FUZZY,
FAN_CONTROL_TABLE
};
#define PPSMC_Result_OK ((uint16_t)0x01)
#define PPSMC_Result_NoMore ((uint16_t)0x02)
#define PPSMC_Result_NotNow ((uint16_t)0x03)
#define PPSMC_Result_Failed ((uint16_t)0xFF)
#define PPSMC_Result_UnknownCmd ((uint16_t)0xFE)
#define PPSMC_Result_UnknownVT ((uint16_t)0xFD)
typedef uint16_t PPSMC_Result;
#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x))
#define PPSMC_MSG_Halt ((uint16_t)0x10)
#define PPSMC_MSG_Resume ((uint16_t)0x11)
#define PPSMC_MSG_EnableDPMLevel ((uint16_t)0x12)
#define PPSMC_MSG_ZeroLevelsDisabled ((uint16_t)0x13)
#define PPSMC_MSG_OneLevelsDisabled ((uint16_t)0x14)
#define PPSMC_MSG_TwoLevelsDisabled ((uint16_t)0x15)
#define PPSMC_MSG_EnableThermalInterrupt ((uint16_t)0x16)
#define PPSMC_MSG_RunningOnAC ((uint16_t)0x17)
#define PPSMC_MSG_LevelUp ((uint16_t)0x18)
#define PPSMC_MSG_LevelDown ((uint16_t)0x19)
#define PPSMC_MSG_ResetDPMCounters ((uint16_t)0x1a)
#define PPSMC_MSG_SwitchToSwState ((uint16_t)0x20)
#define PPSMC_MSG_SwitchToSwStateLast ((uint16_t)0x3f)
#define PPSMC_MSG_SwitchToInitialState ((uint16_t)0x40)
#define PPSMC_MSG_NoForcedLevel ((uint16_t)0x41)
#define PPSMC_MSG_ForceHigh ((uint16_t)0x42)
#define PPSMC_MSG_ForceMediumOrHigh ((uint16_t)0x43)
#define PPSMC_MSG_SwitchToMinimumPower ((uint16_t)0x51)
#define PPSMC_MSG_ResumeFromMinimumPower ((uint16_t)0x52)
#define PPSMC_MSG_EnableCac ((uint16_t)0x53)
#define PPSMC_MSG_DisableCac ((uint16_t)0x54)
#define PPSMC_DPMStateHistoryStart ((uint16_t)0x55)
#define PPSMC_DPMStateHistoryStop ((uint16_t)0x56)
#define PPSMC_CACHistoryStart ((uint16_t)0x57)
#define PPSMC_CACHistoryStop ((uint16_t)0x58)
#define PPSMC_TDPClampingActive ((uint16_t)0x59)
#define PPSMC_TDPClampingInactive ((uint16_t)0x5A)
#define PPSMC_StartFanControl ((uint16_t)0x5B)
#define PPSMC_StopFanControl ((uint16_t)0x5C)
#define PPSMC_NoDisplay ((uint16_t)0x5D)
#define PPSMC_HasDisplay ((uint16_t)0x5E)
#define PPSMC_MSG_UVDPowerOFF ((uint16_t)0x60)
#define PPSMC_MSG_UVDPowerON ((uint16_t)0x61)
#define PPSMC_MSG_EnableULV ((uint16_t)0x62)
#define PPSMC_MSG_DisableULV ((uint16_t)0x63)
#define PPSMC_MSG_EnterULV ((uint16_t)0x64)
#define PPSMC_MSG_ExitULV ((uint16_t)0x65)
#define PPSMC_PowerShiftActive ((uint16_t)0x6A)
#define PPSMC_PowerShiftInactive ((uint16_t)0x6B)
#define PPSMC_OCPActive ((uint16_t)0x6C)
#define PPSMC_OCPInactive ((uint16_t)0x6D)
#define PPSMC_CACLongTermAvgEnable ((uint16_t)0x6E)
#define PPSMC_CACLongTermAvgDisable ((uint16_t)0x6F)
#define PPSMC_MSG_InferredStateSweep_Start ((uint16_t)0x70)
#define PPSMC_MSG_InferredStateSweep_Stop ((uint16_t)0x71)
#define PPSMC_MSG_SwitchToLowestInfState ((uint16_t)0x72)
#define PPSMC_MSG_SwitchToNonInfState ((uint16_t)0x73)
#define PPSMC_MSG_AllStateSweep_Start ((uint16_t)0x74)
#define PPSMC_MSG_AllStateSweep_Stop ((uint16_t)0x75)
#define PPSMC_MSG_SwitchNextLowerInfState ((uint16_t)0x76)
#define PPSMC_MSG_SwitchNextHigherInfState ((uint16_t)0x77)
#define PPSMC_MSG_MclkRetrainingTest ((uint16_t)0x78)
#define PPSMC_MSG_ForceTDPClamping ((uint16_t)0x79)
#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint16_t)0x7A)
#define PPSMC_MSG_CollectCAC_WeightCalib ((uint16_t)0x7B)
#define PPSMC_MSG_CollectCAC_SQonly ((uint16_t)0x7C)
#define PPSMC_MSG_CollectCAC_TemperaturePwr ((uint16_t)0x7D)
#define PPSMC_MSG_ExtremitiesTest_Start ((uint16_t)0x7E)
#define PPSMC_MSG_ExtremitiesTest_Stop ((uint16_t)0x7F)
#define PPSMC_FlushDataCache ((uint16_t)0x80)
#define PPSMC_FlushInstrCache ((uint16_t)0x81)
#define PPSMC_MSG_SetEnabledLevels ((uint16_t)0x82)
#define PPSMC_MSG_SetForcedLevels ((uint16_t)0x83)
#define PPSMC_MSG_ResetToDefaults ((uint16_t)0x84)
#define PPSMC_MSG_SetForcedLevelsAndJump ((uint16_t)0x85)
#define PPSMC_MSG_SetCACHistoryMode ((uint16_t)0x86)
#define PPSMC_MSG_EnableDTE ((uint16_t)0x87)
#define PPSMC_MSG_DisableDTE ((uint16_t)0x88)
#define PPSMC_MSG_SmcSpaceSetAddress ((uint16_t)0x89)
#define PPSM_MSG_SmcSpaceWriteDWordInc ((uint16_t)0x8A)
#define PPSM_MSG_SmcSpaceWriteWordInc ((uint16_t)0x8B)
#define PPSM_MSG_SmcSpaceWriteByteInc ((uint16_t)0x8C)
#define PPSMC_MSG_BREAK ((uint16_t)0xF8)
#define PPSMC_MSG_Test ((uint16_t) 0x100)
#define PPSMC_MSG_DPM_Voltage_Pwrmgt ((uint16_t) 0x101)
#define PPSMC_MSG_DPM_Config ((uint16_t) 0x102)
#define PPSMC_MSG_PM_Controller_Start ((uint16_t) 0x103)
#define PPSMC_MSG_DPM_ForceState ((uint16_t) 0x104)
#define PPSMC_MSG_PG_PowerDownSIMD ((uint16_t) 0x105)
#define PPSMC_MSG_PG_PowerUpSIMD ((uint16_t) 0x106)
#define PPSMC_MSG_PM_Controller_Stop ((uint16_t) 0x107)
#define PPSMC_MSG_PG_SIMD_Config ((uint16_t) 0x108)
#define PPSMC_MSG_Voltage_Cntl_Enable ((uint16_t) 0x109)
#define PPSMC_MSG_Thermal_Cntl_Enable ((uint16_t) 0x10a)
#define PPSMC_MSG_Reset_Service ((uint16_t) 0x10b)
#define PPSMC_MSG_VCEPowerOFF ((uint16_t) 0x10e)
#define PPSMC_MSG_VCEPowerON ((uint16_t) 0x10f)
#define PPSMC_MSG_DPM_Disable_VCE_HS ((uint16_t) 0x110)
#define PPSMC_MSG_DPM_Enable_VCE_HS ((uint16_t) 0x111)
#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint16_t) 0x112)
#define PPSMC_MSG_DCEPowerOFF ((uint16_t) 0x113)
#define PPSMC_MSG_DCEPowerON ((uint16_t) 0x114)
#define PPSMC_MSG_PCIE_DDIPowerDown ((uint16_t) 0x117)
#define PPSMC_MSG_PCIE_DDIPowerUp ((uint16_t) 0x118)
#define PPSMC_MSG_PCIE_CascadePLLPowerDown ((uint16_t) 0x119)
#define PPSMC_MSG_PCIE_CascadePLLPowerUp ((uint16_t) 0x11a)
#define PPSMC_MSG_SYSPLLPowerOff ((uint16_t) 0x11b)
#define PPSMC_MSG_SYSPLLPowerOn ((uint16_t) 0x11c)
#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint16_t) 0x11d)
#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint16_t) 0x11e)
#define PPSMC_MSG_DISPLAYPHYStatusNotify ((uint16_t) 0x11f)
#define PPSMC_MSG_EnableBAPM ((uint16_t) 0x120)
#define PPSMC_MSG_DisableBAPM ((uint16_t) 0x121)
#define PPSMC_MSG_Spmi_Enable ((uint16_t) 0x122)
#define PPSMC_MSG_Spmi_Timer ((uint16_t) 0x123)
#define PPSMC_MSG_LCLK_DPM_Config ((uint16_t) 0x124)
#define PPSMC_MSG_VddNB_Request ((uint16_t) 0x125)
#define PPSMC_MSG_PCIE_DDIPhyPowerDown ((uint32_t) 0x126)
#define PPSMC_MSG_PCIE_DDIPhyPowerUp ((uint32_t) 0x127)
#define PPSMC_MSG_MCLKDPM_Config ((uint16_t) 0x128)
#define PPSMC_MSG_UVDDPM_Config ((uint16_t) 0x129)
#define PPSMC_MSG_VCEDPM_Config ((uint16_t) 0x12A)
#define PPSMC_MSG_ACPDPM_Config ((uint16_t) 0x12B)
#define PPSMC_MSG_SAMUDPM_Config ((uint16_t) 0x12C)
#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D)
#define PPSMC_MSG_VCEDPM_SetEnabledMask ((uint16_t) 0x12E)
#define PPSMC_MSG_ACPDPM_SetEnabledMask ((uint16_t) 0x12F)
#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130)
#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131)
#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132)
#define PPSMC_MSG_Thermal_Cntl_Disable ((uint16_t) 0x133)
#define PPSMC_MSG_SetTDPLimit ((uint16_t) 0x134)
#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135)
#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136)
#define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0x137)
#define PPSMC_MSG_ACPPowerON ((uint16_t) 0x138)
#define PPSMC_MSG_SAMPowerOFF ((uint16_t) 0x139)
#define PPSMC_MSG_SAMPowerON ((uint16_t) 0x13a)
#define PPSMC_MSG_SDMAPowerOFF ((uint16_t) 0x13b)
#define PPSMC_MSG_SDMAPowerON ((uint16_t) 0x13c)
#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d)
#define PPSMC_MSG_IOMMUPowerOFF ((uint16_t) 0x13e)
#define PPSMC_MSG_IOMMUPowerON ((uint16_t) 0x13f)
#define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140)
#define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141)
#define PPSMC_MSG_NBDPM_ForceNominal ((uint16_t) 0x142)
#define PPSMC_MSG_NBDPM_ForcePerformance ((uint16_t) 0x143)
#define PPSMC_MSG_NBDPM_UnForce ((uint16_t) 0x144)
#define PPSMC_MSG_SCLKDPM_SetEnabledMask ((uint16_t) 0x145)
#define PPSMC_MSG_MCLKDPM_SetEnabledMask ((uint16_t) 0x146)
#define PPSMC_MSG_PCIeDPM_ForceLevel ((uint16_t) 0x147)
#define PPSMC_MSG_PCIeDPM_UnForceLevel ((uint16_t) 0x148)
#define PPSMC_MSG_EnableACDCGPIOInterrupt ((uint16_t) 0x149)
#define PPSMC_MSG_EnableVRHotGPIOInterrupt ((uint16_t) 0x14a)
#define PPSMC_MSG_SwitchToAC ((uint16_t) 0x14b)
#define PPSMC_MSG_XDMAPowerOFF ((uint16_t) 0x14c)
#define PPSMC_MSG_XDMAPowerON ((uint16_t) 0x14d)
#define PPSMC_MSG_DPM_Enable ((uint16_t) 0x14e)
#define PPSMC_MSG_DPM_Disable ((uint16_t) 0x14f)
#define PPSMC_MSG_MCLKDPM_Enable ((uint16_t) 0x150)
#define PPSMC_MSG_MCLKDPM_Disable ((uint16_t) 0x151)
#define PPSMC_MSG_LCLKDPM_Enable ((uint16_t) 0x152)
#define PPSMC_MSG_LCLKDPM_Disable ((uint16_t) 0x153)
#define PPSMC_MSG_UVDDPM_Enable ((uint16_t) 0x154)
#define PPSMC_MSG_UVDDPM_Disable ((uint16_t) 0x155)
#define PPSMC_MSG_SAMUDPM_Enable ((uint16_t) 0x156)
#define PPSMC_MSG_SAMUDPM_Disable ((uint16_t) 0x157)
#define PPSMC_MSG_ACPDPM_Enable ((uint16_t) 0x158)
#define PPSMC_MSG_ACPDPM_Disable ((uint16_t) 0x159)
#define PPSMC_MSG_VCEDPM_Enable ((uint16_t) 0x15a)
#define PPSMC_MSG_VCEDPM_Disable ((uint16_t) 0x15b)
#define PPSMC_MSG_LCLKDPM_SetEnabledMask ((uint16_t) 0x15c)
#define PPSMC_MSG_DPM_FPS_Mode ((uint16_t) 0x15d)
#define PPSMC_MSG_DPM_Activity_Mode ((uint16_t) 0x15e)
#define PPSMC_MSG_VddC_Request ((uint16_t) 0x15f)
#define PPSMC_MSG_MCLKDPM_GetEnabledMask ((uint16_t) 0x160)
#define PPSMC_MSG_LCLKDPM_GetEnabledMask ((uint16_t) 0x161)
#define PPSMC_MSG_SCLKDPM_GetEnabledMask ((uint16_t) 0x162)
#define PPSMC_MSG_UVDDPM_GetEnabledMask ((uint16_t) 0x163)
#define PPSMC_MSG_SAMUDPM_GetEnabledMask ((uint16_t) 0x164)
#define PPSMC_MSG_ACPDPM_GetEnabledMask ((uint16_t) 0x165)
#define PPSMC_MSG_VCEDPM_GetEnabledMask ((uint16_t) 0x166)
#define PPSMC_MSG_PCIeDPM_SetEnabledMask ((uint16_t) 0x167)
#define PPSMC_MSG_PCIeDPM_GetEnabledMask ((uint16_t) 0x168)
#define PPSMC_MSG_TDCLimitEnable ((uint16_t) 0x169)
#define PPSMC_MSG_TDCLimitDisable ((uint16_t) 0x16a)
#define PPSMC_MSG_DPM_AutoRotate_Mode ((uint16_t) 0x16b)
#define PPSMC_MSG_DISPCLK_FROM_FCH ((uint16_t) 0x16c)
#define PPSMC_MSG_DISPCLK_FROM_DFS ((uint16_t) 0x16d)
#define PPSMC_MSG_DPREFCLK_FROM_FCH ((uint16_t) 0x16e)
#define PPSMC_MSG_DPREFCLK_FROM_DFS ((uint16_t) 0x16f)
#define PPSMC_MSG_PmStatusLogStart ((uint16_t) 0x170)
#define PPSMC_MSG_PmStatusLogSample ((uint16_t) 0x171)
#define PPSMC_MSG_SCLK_AutoDPM_ON ((uint16_t) 0x172)
#define PPSMC_MSG_MCLK_AutoDPM_ON ((uint16_t) 0x173)
#define PPSMC_MSG_LCLK_AutoDPM_ON ((uint16_t) 0x174)
#define PPSMC_MSG_UVD_AutoDPM_ON ((uint16_t) 0x175)
#define PPSMC_MSG_SAMU_AutoDPM_ON ((uint16_t) 0x176)
#define PPSMC_MSG_ACP_AutoDPM_ON ((uint16_t) 0x177)
#define PPSMC_MSG_VCE_AutoDPM_ON ((uint16_t) 0x178)
#define PPSMC_MSG_PCIe_AutoDPM_ON ((uint16_t) 0x179)
#define PPSMC_MSG_MASTER_AutoDPM_ON ((uint16_t) 0x17a)
#define PPSMC_MSG_MASTER_AutoDPM_OFF ((uint16_t) 0x17b)
#define PPSMC_MSG_DYNAMICDISPPHYPOWER ((uint16_t) 0x17c)
#define PPSMC_MSG_CAC_COLLECTION_ON ((uint16_t) 0x17d)
#define PPSMC_MSG_CAC_COLLECTION_OFF ((uint16_t) 0x17e)
#define PPSMC_MSG_CAC_CORRELATION_ON ((uint16_t) 0x17f)
#define PPSMC_MSG_CAC_CORRELATION_OFF ((uint16_t) 0x180)
#define PPSMC_MSG_PM_STATUS_TO_DRAM_ON ((uint16_t) 0x181)
#define PPSMC_MSG_PM_STATUS_TO_DRAM_OFF ((uint16_t) 0x182)
#define PPSMC_MSG_ALLOW_LOWSCLK_INTERRUPT ((uint16_t) 0x184)
#define PPSMC_MSG_PkgPwrLimitEnable ((uint16_t) 0x185)
#define PPSMC_MSG_PkgPwrLimitDisable ((uint16_t) 0x186)
#define PPSMC_MSG_PkgPwrSetLimit ((uint16_t) 0x187)
#define PPSMC_MSG_OverDriveSetTargetTdp ((uint16_t) 0x188)
#define PPSMC_MSG_SCLKDPM_FreezeLevel ((uint16_t) 0x189)
#define PPSMC_MSG_SCLKDPM_UnfreezeLevel ((uint16_t) 0x18A)
#define PPSMC_MSG_MCLKDPM_FreezeLevel ((uint16_t) 0x18B)
#define PPSMC_MSG_MCLKDPM_UnfreezeLevel ((uint16_t) 0x18C)
#define PPSMC_MSG_START_DRAM_LOGGING ((uint16_t) 0x18D)
#define PPSMC_MSG_STOP_DRAM_LOGGING ((uint16_t) 0x18E)
#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F)
#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190)
#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191)
#define PPSMC_MSG_DisableACDCGPIOInterrupt ((uint16_t) 0x192)
#define PPSMC_MSG_OverrideVoltageControl_SetVddc ((uint16_t) 0x193)
#define PPSMC_MSG_OverrideVoltageControl_SetVddci ((uint16_t) 0x194)
#define PPSMC_MSG_SetVidOffset_1 ((uint16_t) 0x195)
#define PPSMC_MSG_SetVidOffset_2 ((uint16_t) 0x207)
#define PPSMC_MSG_GetVidOffset_1 ((uint16_t) 0x196)
#define PPSMC_MSG_GetVidOffset_2 ((uint16_t) 0x208)
#define PPSMC_MSG_THERMAL_OVERDRIVE_Enable ((uint16_t) 0x197)
#define PPSMC_MSG_THERMAL_OVERDRIVE_Disable ((uint16_t) 0x198)
#define PPSMC_MSG_SetTjMax ((uint16_t) 0x199)
#define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A)
#define PPSMC_MSG_WaitForMclkSwitchFinish ((uint16_t) 0x19B)
#define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C)
#define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D)
#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200)
#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201)
#define PPSMC_MSG_API_GetSclkBusy ((uint16_t) 0x202)
#define PPSMC_MSG_API_GetMclkBusy ((uint16_t) 0x203)
#define PPSMC_MSG_API_GetAsicPower ((uint16_t) 0x204)
#define PPSMC_MSG_SetFanRpmMax ((uint16_t) 0x205)
#define PPSMC_MSG_SetFanSclkTarget ((uint16_t) 0x206)
#define PPSMC_MSG_SetFanMinPwm ((uint16_t) 0x209)
#define PPSMC_MSG_SetFanTemperatureTarget ((uint16_t) 0x20A)
#define PPSMC_MSG_BACO_StartMonitor ((uint16_t) 0x240)
#define PPSMC_MSG_BACO_Cancel ((uint16_t) 0x241)
#define PPSMC_MSG_EnableVddGfx ((uint16_t) 0x242)
#define PPSMC_MSG_DisableVddGfx ((uint16_t) 0x243)
#define PPSMC_MSG_UcodeAddressLow ((uint16_t) 0x244)
#define PPSMC_MSG_UcodeAddressHigh ((uint16_t) 0x245)
#define PPSMC_MSG_UcodeLoadStatus ((uint16_t) 0x246)
#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t) 0x250)
#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t) 0x251)
#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t) 0x252)
#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t) 0x253)
#define PPSMC_MSG_LoadUcodes ((uint16_t) 0x254)
#define PPSMC_MSG_PowerStateNotify ((uint16_t) 0x255)
#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_HI ((uint16_t) 0x256)
#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_LO ((uint16_t) 0x257)
#define PPSMC_MSG_VBIOS_DRAM_ADDR_HI ((uint16_t) 0x258)
#define PPSMC_MSG_VBIOS_DRAM_ADDR_LO ((uint16_t) 0x259)
#define PPSMC_MSG_LoadVBios ((uint16_t) 0x25A)
#define PPSMC_MSG_GetUcodeVersion ((uint16_t) 0x25B)
#define DMCUSMC_MSG_PSREntry ((uint16_t) 0x25C)
#define DMCUSMC_MSG_PSRExit ((uint16_t) 0x25D)
#define PPSMC_MSG_EnableClockGatingFeature ((uint16_t) 0x260)
#define PPSMC_MSG_DisableClockGatingFeature ((uint16_t) 0x261)
#define PPSMC_MSG_IsDeviceRunning ((uint16_t) 0x262)
#define PPSMC_MSG_LoadMetaData ((uint16_t) 0x263)
#define PPSMC_MSG_TMON_AutoCaliberate_Enable ((uint16_t) 0x264)
#define PPSMC_MSG_TMON_AutoCaliberate_Disable ((uint16_t) 0x265)
#define PPSMC_MSG_GetTelemetry1Slope ((uint16_t) 0x266)
#define PPSMC_MSG_GetTelemetry1Offset ((uint16_t) 0x267)
#define PPSMC_MSG_GetTelemetry2Slope ((uint16_t) 0x268)
#define PPSMC_MSG_GetTelemetry2Offset ((uint16_t) 0x269)
#define PPSMC_MSG_EnableAvfs ((uint16_t) 0x26A)
#define PPSMC_MSG_DisableAvfs ((uint16_t) 0x26B)
#define PPSMC_MSG_PerformBtc ((uint16_t) 0x26C)
#define PPSMC_MSG_VftTableIsValid ((uint16_t) 0x275)
#define PPSMC_MSG_UseNewGPIOScheme ((uint16_t) 0x277)
#define PPSMC_MSG_GetEnabledPsm ((uint16_t) 0x400)
#define PPSMC_MSG_AgmStartPsm ((uint16_t) 0x401)
#define PPSMC_MSG_AgmReadPsm ((uint16_t) 0x402)
#define PPSMC_MSG_AgmResetPsm ((uint16_t) 0x403)
#define PPSMC_MSG_ReadVftCell ((uint16_t) 0x404)
#define PPSMC_MSG_GFX_CU_PG_ENABLE ((uint16_t) 0x280)
#define PPSMC_MSG_GFX_CU_PG_DISABLE ((uint16_t) 0x281)
#define PPSMC_MSG_GetCurrPkgPwr ((uint16_t) 0x282)
#define PPSMC_MSG_SetGpuPllDfsForSclk ((uint16_t) 0x300)
#define PPSMC_MSG_Didt_Block_Function ((uint16_t) 0x301)
#define PPSMC_MSG_SetVBITimeout ((uint16_t) 0x306)
#define PPSMC_MSG_SecureSRBMWrite ((uint16_t) 0x600)
#define PPSMC_MSG_SecureSRBMRead ((uint16_t) 0x601)
#define PPSMC_MSG_SetAddress ((uint16_t) 0x800)
#define PPSMC_MSG_GetData ((uint16_t) 0x801)
#define PPSMC_MSG_SetData ((uint16_t) 0x802)
typedef uint16_t PPSMC_Msg;
#define PPSMC_EVENT_STATUS_THERMAL 0x00000001
#define PPSMC_EVENT_STATUS_REGULATORHOT 0x00000002
#define PPSMC_EVENT_STATUS_DC 0x00000004
#pragma pack(pop)
#endif

View File

@ -28,6 +28,7 @@
struct pp_smumgr; struct pp_smumgr;
struct pp_instance; struct pp_instance;
struct pp_hwmgr;
#define smu_lower_32_bits(n) ((uint32_t)(n)) #define smu_lower_32_bits(n) ((uint32_t)(n))
#define smu_upper_32_bits(n) ((uint32_t)(((n)>>16)>>16)) #define smu_upper_32_bits(n) ((uint32_t)(((n)>>16)>>16))
@ -53,6 +54,45 @@ enum AVFS_BTC_STATUS {
AVFS_BTC_SMUMSG_ERROR AVFS_BTC_SMUMSG_ERROR
}; };
enum SMU_TABLE {
SMU_UVD_TABLE = 0,
SMU_VCE_TABLE,
SMU_SAMU_TABLE,
SMU_BIF_TABLE,
};
enum SMU_TYPE {
SMU_SoftRegisters = 0,
SMU_Discrete_DpmTable,
};
enum SMU_MEMBER {
HandshakeDisables = 0,
VoltageChangeTimeout,
AverageGraphicsActivity,
PreVBlankGap,
VBlankTimeout,
UcodeLoadStatus,
UvdBootLevel,
VceBootLevel,
SamuBootLevel,
LowSclkInterruptThreshold,
};
enum SMU_MAC_DEFINITION {
SMU_MAX_LEVELS_GRAPHICS = 0,
SMU_MAX_LEVELS_MEMORY,
SMU_MAX_LEVELS_LINK,
SMU_MAX_ENTRIES_SMIO,
SMU_MAX_LEVELS_VDDC,
SMU_MAX_LEVELS_VDDGFX,
SMU_MAX_LEVELS_VDDCI,
SMU_MAX_LEVELS_MVDD,
SMU_UVD_MCLK_HANDSHAKE_DISABLE,
};
struct pp_smumgr_func { struct pp_smumgr_func {
int (*smu_init)(struct pp_smumgr *smumgr); int (*smu_init)(struct pp_smumgr *smumgr);
int (*smu_fini)(struct pp_smumgr *smumgr); int (*smu_fini)(struct pp_smumgr *smumgr);
@ -69,6 +109,18 @@ struct pp_smumgr_func {
int (*download_pptable_settings)(struct pp_smumgr *smumgr, int (*download_pptable_settings)(struct pp_smumgr *smumgr,
void **table); void **table);
int (*upload_pptable_settings)(struct pp_smumgr *smumgr); int (*upload_pptable_settings)(struct pp_smumgr *smumgr);
int (*update_smc_table)(struct pp_hwmgr *hwmgr, uint32_t type);
int (*process_firmware_header)(struct pp_hwmgr *hwmgr);
int (*update_sclk_threshold)(struct pp_hwmgr *hwmgr);
int (*thermal_setup_fan_table)(struct pp_hwmgr *hwmgr);
int (*thermal_avfs_enable)(struct pp_hwmgr *hwmgr);
int (*init_smc_table)(struct pp_hwmgr *hwmgr);
int (*populate_all_graphic_levels)(struct pp_hwmgr *hwmgr);
int (*populate_all_memory_levels)(struct pp_hwmgr *hwmgr);
int (*initialize_mc_reg_table)(struct pp_hwmgr *hwmgr);
uint32_t (*get_offsetof)(uint32_t type, uint32_t member);
uint32_t (*get_mac_definition)(uint32_t value);
bool (*is_dpm_running)(struct pp_hwmgr *hwmgr);
}; };
struct pp_smumgr { struct pp_smumgr {
@ -127,6 +179,24 @@ extern int tonga_smum_init(struct pp_smumgr *smumgr);
extern int fiji_smum_init(struct pp_smumgr *smumgr); extern int fiji_smum_init(struct pp_smumgr *smumgr);
extern int polaris10_smum_init(struct pp_smumgr *smumgr); extern int polaris10_smum_init(struct pp_smumgr *smumgr);
extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr);
extern int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
extern int smum_process_firmware_header(struct pp_hwmgr *hwmgr);
extern int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result);
extern int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result);
extern int smum_init_smc_table(struct pp_hwmgr *hwmgr);
extern int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
extern int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
extern int smum_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
extern uint32_t smum_get_offsetof(struct pp_smumgr *smumgr,
uint32_t type, uint32_t member);
extern uint32_t smum_get_mac_definition(struct pp_smumgr *smumgr, uint32_t value);
extern bool smum_is_dpm_running(struct pp_hwmgr *hwmgr);
#define SMUM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT #define SMUM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
#define SMUM_FIELD_MASK(reg, field) reg##__##field##_MASK #define SMUM_FIELD_MASK(reg, field) reg##__##field##_MASK

View File

@ -2,8 +2,9 @@
# Makefile for the 'smu manager' sub-component of powerplay. # Makefile for the 'smu manager' sub-component of powerplay.
# It provides the smu management services for the driver. # It provides the smu management services for the driver.
SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o \ SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o fiji_smc.o \
polaris10_smumgr.o iceland_smumgr.o polaris10_smumgr.o iceland_smumgr.o polaris10_smc.o tonga_smc.o \
smu7_smumgr.o iceland_smc.o
AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR)) AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2014 Advanced Micro Devices, Inc. * Copyright 2015 Advanced Micro Devices, Inc.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@ -20,23 +20,32 @@
* OTHER DEALINGS IN THE SOFTWARE. * OTHER DEALINGS IN THE SOFTWARE.
* *
*/ */
#ifndef FIJI_SMC_H
#define FIJI_SMC_H
#ifndef FIJI_SMUMGR_H #include "smumgr.h"
#define FIJI_SMUMGR_H #include "smu73.h"
#include "fiji_ppsmc.h" struct fiji_pt_defaults {
uint8_t SviLoadLineEn;
int fiji_smu_init(struct amdgpu_device *adev); uint8_t SviLoadLineVddC;
int fiji_smu_fini(struct amdgpu_device *adev); uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
int fiji_smu_start(struct amdgpu_device *adev); uint8_t TDC_MAWt;
uint8_t TdcWaterfallCtl;
struct fiji_smu_private_data uint8_t DTEAmbientTempBase;
{
uint8_t *header;
uint32_t smu_buffer_addr_high;
uint32_t smu_buffer_addr_low;
uint32_t header_addr_high;
uint32_t header_addr_low;
}; };
int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
int fiji_init_smc_table(struct pp_hwmgr *hwmgr);
int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr);
uint32_t fiji_get_offsetof(uint32_t type, uint32_t member);
uint32_t fiji_get_mac_definition(uint32_t value);
int fiji_process_firmware_header(struct pp_hwmgr *hwmgr);
int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr);
#endif #endif

610
drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c Normal file → Executable file
View File

@ -38,6 +38,7 @@
#include "bif/bif_5_0_sh_mask.h" #include "bif/bif_5_0_sh_mask.h"
#include "pp_debug.h" #include "pp_debug.h"
#include "fiji_pwrvirus.h" #include "fiji_pwrvirus.h"
#include "fiji_smc.h"
#define AVFS_EN_MSB 1568 #define AVFS_EN_MSB 1568
#define AVFS_EN_LSB 1568 #define AVFS_EN_LSB 1568
@ -57,509 +58,6 @@ static const struct SMU73_Discrete_GraphicsLevel avfs_graphics_level[8] = {
{ 0xf811d047, 0x80380100, 0x01, 0x00, 0x1e00, 0x00000610, 0x87020000, 0x21680000, 0x12000000, 0, 0, 0x0c, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 } { 0xf811d047, 0x80380100, 0x01, 0x00, 0x1e00, 0x00000610, 0x87020000, 0x21680000, 0x12000000, 0, 0, 0x0c, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 }
}; };
static enum cgs_ucode_id fiji_convert_fw_type_to_cgs(uint32_t fw_type)
{
enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
switch (fw_type) {
case UCODE_ID_SMU:
result = CGS_UCODE_ID_SMU;
break;
case UCODE_ID_SDMA0:
result = CGS_UCODE_ID_SDMA0;
break;
case UCODE_ID_SDMA1:
result = CGS_UCODE_ID_SDMA1;
break;
case UCODE_ID_CP_CE:
result = CGS_UCODE_ID_CP_CE;
break;
case UCODE_ID_CP_PFP:
result = CGS_UCODE_ID_CP_PFP;
break;
case UCODE_ID_CP_ME:
result = CGS_UCODE_ID_CP_ME;
break;
case UCODE_ID_CP_MEC:
result = CGS_UCODE_ID_CP_MEC;
break;
case UCODE_ID_CP_MEC_JT1:
result = CGS_UCODE_ID_CP_MEC_JT1;
break;
case UCODE_ID_CP_MEC_JT2:
result = CGS_UCODE_ID_CP_MEC_JT2;
break;
case UCODE_ID_RLC_G:
result = CGS_UCODE_ID_RLC_G;
break;
default:
break;
}
return result;
}
/**
* Set the address for reading/writing the SMC SRAM space.
* @param smumgr the address of the powerplay hardware manager.
* @param smc_addr the address in the SMC RAM to access.
*/
static int fiji_set_smc_sram_address(struct pp_smumgr *smumgr,
uint32_t smc_addr, uint32_t limit)
{
PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)),
"SMC address must be 4 byte aligned.", return -EINVAL;);
PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)),
"SMC address is beyond the SMC RAM area.", return -EINVAL;);
cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, smc_addr);
SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
return 0;
}
/**
* Copy bytes from an array into the SMC RAM space.
*
* @param smumgr the address of the powerplay SMU manager.
* @param smcStartAddress the start address in the SMC RAM to copy bytes to.
* @param src the byte array to copy the bytes from.
* @param byteCount the number of bytes to copy.
*/
int fiji_copy_bytes_to_smc(struct pp_smumgr *smumgr,
uint32_t smcStartAddress, const uint8_t *src,
uint32_t byteCount, uint32_t limit)
{
int result;
uint32_t data, originalData;
uint32_t addr, extraShift;
PP_ASSERT_WITH_CODE((0 == (3 & smcStartAddress)),
"SMC address must be 4 byte aligned.", return -EINVAL;);
PP_ASSERT_WITH_CODE((limit > (smcStartAddress + byteCount)),
"SMC address is beyond the SMC RAM area.", return -EINVAL;);
addr = smcStartAddress;
while (byteCount >= 4) {
/* Bytes are written into the SMC addres space with the MSB first. */
data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
result = fiji_set_smc_sram_address(smumgr, addr, limit);
if (result)
return result;
cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
src += 4;
byteCount -= 4;
addr += 4;
}
if (byteCount) {
/* Now write the odd bytes left.
* Do a read modify write cycle.
*/
data = 0;
result = fiji_set_smc_sram_address(smumgr, addr, limit);
if (result)
return result;
originalData = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0);
extraShift = 8 * (4 - byteCount);
while (byteCount > 0) {
/* Bytes are written into the SMC addres
* space with the MSB first.
*/
data = (0x100 * data) + *src++;
byteCount--;
}
data <<= extraShift;
data |= (originalData & ~((~0UL) << extraShift));
result = fiji_set_smc_sram_address(smumgr, addr, limit);
if (!result)
return result;
cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
}
return 0;
}
int fiji_program_jump_on_start(struct pp_smumgr *smumgr)
{
static const unsigned char data[] = { 0xE0, 0x00, 0x80, 0x40 };
fiji_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data) + 1);
return 0;
}
/**
* Return if the SMC is currently running.
*
* @param smumgr the address of the powerplay hardware manager.
*/
bool fiji_is_smc_ram_running(struct pp_smumgr *smumgr)
{
return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device,
CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
&& (0x20100 <= cgs_read_ind_register(smumgr->device,
CGS_IND_REG__SMC, ixSMC_PC_C)));
}
/**
* Send a message to the SMC, and wait for its response.
*
* @param smumgr the address of the powerplay hardware manager.
* @param msg the message to send.
* @return The response that came from the SMC.
*/
int fiji_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
{
if (!fiji_is_smc_ram_running(smumgr))
return -1;
if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) {
printk(KERN_ERR "Failed to send Previous Message.");
SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
}
cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
return 0;
}
/**
* Send a message to the SMC with parameter
* @param smumgr: the address of the powerplay hardware manager.
* @param msg: the message to send.
* @param parameter: the parameter to send
* @return The response that came from the SMC.
*/
int fiji_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
uint16_t msg, uint32_t parameter)
{
if (!fiji_is_smc_ram_running(smumgr))
return -1;
if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) {
printk(KERN_ERR "Failed to send Previous Message.");
SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
}
cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
return 0;
}
/**
* Send a message to the SMC with parameter, do not wait for response
*
* @param smumgr: the address of the powerplay hardware manager.
* @param msg: the message to send.
* @param parameter: the parameter to send
* @return The response that came from the SMC.
*/
int fiji_send_msg_to_smc_with_parameter_without_waiting(
struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
{
if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) {
printk(KERN_ERR "Failed to send Previous Message.");
SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
}
cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
return 0;
}
/**
* Uploads the SMU firmware from .hex file
*
* @param smumgr the address of the powerplay SMU manager.
* @return 0 or -1.
*/
static int fiji_upload_smu_firmware_image(struct pp_smumgr *smumgr)
{
const uint8_t *src;
uint32_t byte_count;
uint32_t *data;
struct cgs_firmware_info info = {0};
cgs_get_firmware_info(smumgr->device,
fiji_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
if (info.image_size & 3) {
printk(KERN_ERR "SMC ucode is not 4 bytes aligned\n");
return -EINVAL;
}
if (info.image_size > FIJI_SMC_SIZE) {
printk(KERN_ERR "SMC address is beyond the SMC RAM area\n");
return -EINVAL;
}
cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, 0x20000);
SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
byte_count = info.image_size;
src = (const uint8_t *)info.kptr;
data = (uint32_t *)src;
for (; byte_count >= 4; data++, byte_count -= 4)
cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data[0]);
SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
return 0;
}
/**
* Read a 32bit value from the SMC SRAM space.
* ALL PARAMETERS ARE IN HOST BYTE ORDER.
* @param smumgr the address of the powerplay hardware manager.
* @param smc_addr the address in the SMC RAM to access.
* @param value and output parameter for the data read from the SMC SRAM.
*/
int fiji_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
uint32_t *value, uint32_t limit)
{
int result = fiji_set_smc_sram_address(smumgr, smc_addr, limit);
if (result)
return result;
*value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0);
return 0;
}
/**
* Write a 32bit value to the SMC SRAM space.
* ALL PARAMETERS ARE IN HOST BYTE ORDER.
* @param smumgr the address of the powerplay hardware manager.
* @param smc_addr the address in the SMC RAM to access.
* @param value to write to the SMC SRAM.
*/
int fiji_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
uint32_t value, uint32_t limit)
{
int result;
result = fiji_set_smc_sram_address(smumgr, smc_addr, limit);
if (result)
return result;
cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, value);
return 0;
}
static uint32_t fiji_get_mask_for_firmware_type(uint32_t fw_type)
{
uint32_t result = 0;
switch (fw_type) {
case UCODE_ID_SDMA0:
result = UCODE_ID_SDMA0_MASK;
break;
case UCODE_ID_SDMA1:
result = UCODE_ID_SDMA1_MASK;
break;
case UCODE_ID_CP_CE:
result = UCODE_ID_CP_CE_MASK;
break;
case UCODE_ID_CP_PFP:
result = UCODE_ID_CP_PFP_MASK;
break;
case UCODE_ID_CP_ME:
result = UCODE_ID_CP_ME_MASK;
break;
case UCODE_ID_CP_MEC_JT1:
result = UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK;
break;
case UCODE_ID_CP_MEC_JT2:
result = UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT2_MASK;
break;
case UCODE_ID_RLC_G:
result = UCODE_ID_RLC_G_MASK;
break;
default:
printk(KERN_ERR "UCode type is out of range!");
result = 0;
}
return result;
}
/* Populate one firmware image to the data structure */
static int fiji_populate_single_firmware_entry(struct pp_smumgr *smumgr,
uint32_t fw_type, struct SMU_Entry *entry)
{
int result;
struct cgs_firmware_info info = {0};
result = cgs_get_firmware_info(
smumgr->device,
fiji_convert_fw_type_to_cgs(fw_type),
&info);
if (!result) {
entry->version = 0;
entry->id = (uint16_t)fw_type;
entry->image_addr_high = smu_upper_32_bits(info.mc_addr);
entry->image_addr_low = smu_lower_32_bits(info.mc_addr);
entry->meta_data_addr_high = 0;
entry->meta_data_addr_low = 0;
entry->data_size_byte = info.image_size;
entry->num_register_entries = 0;
if (fw_type == UCODE_ID_RLC_G)
entry->flags = 1;
else
entry->flags = 0;
}
return result;
}
static int fiji_request_smu_load_fw(struct pp_smumgr *smumgr)
{
struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
uint32_t fw_to_load;
struct SMU_DRAMData_TOC *toc;
if (priv->soft_regs_start)
cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
priv->soft_regs_start +
offsetof(SMU73_SoftRegisters, UcodeLoadStatus),
0x0);
toc = (struct SMU_DRAMData_TOC *)priv->header;
toc->num_entries = 0;
toc->structure_version = 1;
PP_ASSERT_WITH_CODE(
0 == fiji_populate_single_firmware_entry(smumgr,
UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.\n" , return -1 );
PP_ASSERT_WITH_CODE(
0 == fiji_populate_single_firmware_entry(smumgr,
UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.\n" , return -1 );
PP_ASSERT_WITH_CODE(
0 == fiji_populate_single_firmware_entry(smumgr,
UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.\n" , return -1 );
PP_ASSERT_WITH_CODE(
0 == fiji_populate_single_firmware_entry(smumgr,
UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.\n" , return -1 );
PP_ASSERT_WITH_CODE(
0 == fiji_populate_single_firmware_entry(smumgr,
UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.\n" , return -1 );
PP_ASSERT_WITH_CODE(
0 == fiji_populate_single_firmware_entry(smumgr,
UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.\n" , return -1 );
PP_ASSERT_WITH_CODE(
0 == fiji_populate_single_firmware_entry(smumgr,
UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.\n" , return -1 );
PP_ASSERT_WITH_CODE(
0 == fiji_populate_single_firmware_entry(smumgr,
UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.\n" , return -1 );
PP_ASSERT_WITH_CODE(
0 == fiji_populate_single_firmware_entry(smumgr,
UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.\n" , return -1 );
fiji_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI,
priv->header_buffer.mc_addr_high);
fiji_send_msg_to_smc_with_parameter(smumgr,PPSMC_MSG_DRV_DRAM_ADDR_LO,
priv->header_buffer.mc_addr_low);
fw_to_load = UCODE_ID_RLC_G_MASK
+ UCODE_ID_SDMA0_MASK
+ UCODE_ID_SDMA1_MASK
+ UCODE_ID_CP_CE_MASK
+ UCODE_ID_CP_ME_MASK
+ UCODE_ID_CP_PFP_MASK
+ UCODE_ID_CP_MEC_MASK
+ UCODE_ID_CP_MEC_JT1_MASK
+ UCODE_ID_CP_MEC_JT2_MASK;
if (fiji_send_msg_to_smc_with_parameter(smumgr,
PPSMC_MSG_LoadUcodes, fw_to_load))
printk(KERN_ERR "Fail to Request SMU Load uCode");
return 0;
}
/* Check if the FW has been loaded, SMU will not return
* if loading has not finished.
*/
static int fiji_check_fw_load_finish(struct pp_smumgr *smumgr,
uint32_t fw_type)
{
struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
uint32_t mask = fiji_get_mask_for_firmware_type(fw_type);
/* Check SOFT_REGISTERS_TABLE_28.UcodeLoadStatus */
if (smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX,
priv->soft_regs_start +
offsetof(SMU73_SoftRegisters, UcodeLoadStatus),
mask, mask)) {
printk(KERN_ERR "check firmware loading failed\n");
return -EINVAL;
}
return 0;
}
static int fiji_reload_firmware(struct pp_smumgr *smumgr)
{
return smumgr->smumgr_funcs->start_smu(smumgr);
}
static bool fiji_is_hw_virtualization_enabled(struct pp_smumgr *smumgr)
{
uint32_t value;
value = cgs_read_register(smumgr->device, mmBIF_IOV_FUNC_IDENTIFIER);
if (value & BIF_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK) {
/* driver reads on SR-IOV enabled PF: 0x80000000
* driver reads on SR-IOV enabled VF: 0x80000001
* driver reads on SR-IOV disabled: 0x00000000
*/
return true;
}
return false;
}
static int fiji_request_smu_specific_fw_load(struct pp_smumgr *smumgr, uint32_t fw_type)
{
if (fiji_is_hw_virtualization_enabled(smumgr)) {
uint32_t masks = fiji_get_mask_for_firmware_type(fw_type);
if (fiji_send_msg_to_smc_with_parameter_without_waiting(smumgr,
PPSMC_MSG_LoadUcodes, masks))
printk(KERN_ERR "Fail to Request SMU Load uCode");
}
/* For non-virtualization cases,
* SMU loads all FWs at once in fiji_request_smu_load_fw.
*/
return 0;
}
static int fiji_start_smu_in_protection_mode(struct pp_smumgr *smumgr) static int fiji_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
{ {
int result = 0; int result = 0;
@ -571,7 +69,7 @@ static int fiji_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1); SMC_SYSCON_RESET_CNTL, rst_reg, 1);
result = fiji_upload_smu_firmware_image(smumgr); result = smu7_upload_smu_firmware_image(smumgr);
if (result) if (result)
return result; return result;
@ -610,8 +108,8 @@ static int fiji_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
SMU_STATUS, SMU_DONE, 0); SMU_STATUS, SMU_DONE, 0);
/* Check pass/failed indicator */ /* Check pass/failed indicator */
if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, if (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
SMU_STATUS, SMU_PASS)) { SMU_STATUS, SMU_PASS) != 1) {
PP_ASSERT_WITH_CODE(false, PP_ASSERT_WITH_CODE(false,
"SMU Firmware start failed!", return -1); "SMU Firmware start failed!", return -1);
} }
@ -639,12 +137,12 @@ static int fiji_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr)
SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1); SMC_SYSCON_RESET_CNTL, rst_reg, 1);
result = fiji_upload_smu_firmware_image(smumgr); result = smu7_upload_smu_firmware_image(smumgr);
if (result) if (result)
return result; return result;
/* Set smc instruct start point at 0x0 */ /* Set smc instruct start point at 0x0 */
fiji_program_jump_on_start(smumgr); smu7_program_jump_on_start(smumgr);
/* Enable clock */ /* Enable clock */
SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
@ -698,15 +196,15 @@ static int fiji_start_avfs_btc(struct pp_smumgr *smumgr)
priv->avfs.AvfsBtcStatus = AVFS_BTC_STARTED; priv->avfs.AvfsBtcStatus = AVFS_BTC_STARTED;
if (priv->avfs.AvfsBtcParam) { if (priv->avfs.AvfsBtcParam) {
if (!fiji_send_msg_to_smc_with_parameter(smumgr, if (!smum_send_msg_to_smc_with_parameter(smumgr,
PPSMC_MSG_PerformBtc, priv->avfs.AvfsBtcParam)) { PPSMC_MSG_PerformBtc, priv->avfs.AvfsBtcParam)) {
if (!fiji_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs)) { if (!smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs)) {
priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_UNSAVED; priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_UNSAVED;
result = 0; result = 0;
} else { } else {
printk(KERN_ERR "[AVFS][fiji_start_avfs_btc] Attempt" printk(KERN_ERR "[AVFS][fiji_start_avfs_btc] Attempt"
" to Enable AVFS Failed!"); " to Enable AVFS Failed!");
fiji_send_msg_to_smc(smumgr, PPSMC_MSG_DisableAvfs); smum_send_msg_to_smc(smumgr, PPSMC_MSG_DisableAvfs);
result = -1; result = -1;
} }
} else { } else {
@ -736,7 +234,7 @@ int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
charz_freq = 0x30750000; /* In 10KHz units 0x00007530 Actual value */ charz_freq = 0x30750000; /* In 10KHz units 0x00007530 Actual value */
inversion_voltage = 0x1A04; /* mV Q14.2 0x41A Actual value */ inversion_voltage = 0x1A04; /* mV Q14.2 0x41A Actual value */
PP_ASSERT_WITH_CODE(0 == fiji_read_smc_sram_dword(smumgr, PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr,
SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header,
PmFuseTable), &table_start, 0x40000), PmFuseTable), &table_start, 0x40000),
"[AVFS][Fiji_SetupGfxLvlStruct] SMU could not communicate " "[AVFS][Fiji_SetupGfxLvlStruct] SMU could not communicate "
@ -748,13 +246,13 @@ int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
inversion_voltage_addr = table_start + inversion_voltage_addr = table_start +
offsetof(struct SMU73_Discrete_PmFuses, InversionVoltage); offsetof(struct SMU73_Discrete_PmFuses, InversionVoltage);
result = fiji_copy_bytes_to_smc(smumgr, charz_freq_addr, result = smu7_copy_bytes_to_smc(smumgr, charz_freq_addr,
(uint8_t *)(&charz_freq), sizeof(charz_freq), 0x40000); (uint8_t *)(&charz_freq), sizeof(charz_freq), 0x40000);
PP_ASSERT_WITH_CODE(0 == result, PP_ASSERT_WITH_CODE(0 == result,
"[AVFS][fiji_setup_pm_fuse_for_avfs] charz_freq could not " "[AVFS][fiji_setup_pm_fuse_for_avfs] charz_freq could not "
"be populated.", return -1;); "be populated.", return -1;);
result = fiji_copy_bytes_to_smc(smumgr, inversion_voltage_addr, result = smu7_copy_bytes_to_smc(smumgr, inversion_voltage_addr,
(uint8_t *)(&inversion_voltage), sizeof(inversion_voltage), 0x40000); (uint8_t *)(&inversion_voltage), sizeof(inversion_voltage), 0x40000);
PP_ASSERT_WITH_CODE(0 == result, "[AVFS][fiji_setup_pm_fuse_for_avfs] " PP_ASSERT_WITH_CODE(0 == result, "[AVFS][fiji_setup_pm_fuse_for_avfs] "
"charz_freq could not be populated.", return -1;); "charz_freq could not be populated.", return -1;);
@ -769,7 +267,7 @@ int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
uint32_t level_addr, vr_config_addr; uint32_t level_addr, vr_config_addr;
uint32_t level_size = sizeof(avfs_graphics_level); uint32_t level_size = sizeof(avfs_graphics_level);
PP_ASSERT_WITH_CODE(0 == fiji_read_smc_sram_dword(smumgr, PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr,
SMU7_FIRMWARE_HEADER_LOCATION + SMU7_FIRMWARE_HEADER_LOCATION +
offsetof(SMU73_Firmware_Header, DpmTable), offsetof(SMU73_Firmware_Header, DpmTable),
&table_start, 0x40000), &table_start, 0x40000),
@ -784,7 +282,7 @@ int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
vr_config_addr = table_start + vr_config_addr = table_start +
offsetof(SMU73_Discrete_DpmTable, VRConfig); offsetof(SMU73_Discrete_DpmTable, VRConfig);
PP_ASSERT_WITH_CODE(0 == fiji_copy_bytes_to_smc(smumgr, vr_config_addr, PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, vr_config_addr,
(uint8_t *)&vr_config, sizeof(int32_t), 0x40000), (uint8_t *)&vr_config, sizeof(int32_t), 0x40000),
"[AVFS][Fiji_SetupGfxLvlStruct] Problems copying " "[AVFS][Fiji_SetupGfxLvlStruct] Problems copying "
"vr_config value over to SMC", "vr_config value over to SMC",
@ -792,7 +290,7 @@ int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
level_addr = table_start + offsetof(SMU73_Discrete_DpmTable, GraphicsLevel); level_addr = table_start + offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
PP_ASSERT_WITH_CODE(0 == fiji_copy_bytes_to_smc(smumgr, level_addr, PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, level_addr,
(uint8_t *)(&avfs_graphics_level), level_size, 0x40000), (uint8_t *)(&avfs_graphics_level), level_size, 0x40000),
"[AVFS][Fiji_SetupGfxLvlStruct] Copying of DPM table failed!", "[AVFS][Fiji_SetupGfxLvlStruct] Copying of DPM table failed!",
return -1;); return -1;);
@ -839,13 +337,13 @@ int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
break; break;
case AVFS_BTC_COMPLETED_RESTORED: /*S3 State - Post SMU Start*/ case AVFS_BTC_COMPLETED_RESTORED: /*S3 State - Post SMU Start*/
priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR; priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR;
PP_ASSERT_WITH_CODE(0 == fiji_send_msg_to_smc(smumgr, PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(smumgr,
PPSMC_MSG_VftTableIsValid), 0x666),
"[AVFS][fiji_avfs_event_mgr] SMU did not respond " "[AVFS][fiji_avfs_event_mgr] SMU did not respond "
"correctly to VftTableIsValid Msg", "correctly to VftTableIsValid Msg",
return -1;); return -1;);
priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR; priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR;
PP_ASSERT_WITH_CODE(0 == fiji_send_msg_to_smc(smumgr, PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(smumgr,
PPSMC_MSG_EnableAvfs), PPSMC_MSG_EnableAvfs),
"[AVFS][fiji_avfs_event_mgr] SMU did not respond " "[AVFS][fiji_avfs_event_mgr] SMU did not respond "
"correctly to EnableAvfs Message Msg", "correctly to EnableAvfs Message Msg",
@ -898,7 +396,7 @@ static int fiji_start_smu(struct pp_smumgr *smumgr)
struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
/* Only start SMC if SMC RAM is not running */ /* Only start SMC if SMC RAM is not running */
if (!fiji_is_smc_ram_running(smumgr)) { if (!smu7_is_smc_ram_running(smumgr)) {
fiji_avfs_event_mgr(smumgr, false); fiji_avfs_event_mgr(smumgr, false);
/* Check if SMU is running in protected mode */ /* Check if SMU is running in protected mode */
@ -929,12 +427,12 @@ static int fiji_start_smu(struct pp_smumgr *smumgr)
/* Setup SoftRegsStart here for register lookup in case /* Setup SoftRegsStart here for register lookup in case
* DummyBackEnd is used and ProcessFirmwareHeader is not executed * DummyBackEnd is used and ProcessFirmwareHeader is not executed
*/ */
fiji_read_smc_sram_dword(smumgr, smu7_read_smc_sram_dword(smumgr,
SMU7_FIRMWARE_HEADER_LOCATION + SMU7_FIRMWARE_HEADER_LOCATION +
offsetof(SMU73_Firmware_Header, SoftRegisters), offsetof(SMU73_Firmware_Header, SoftRegisters),
&(priv->soft_regs_start), 0x40000); &(priv->smu7_data.soft_regs_start), 0x40000);
result = fiji_request_smu_load_fw(smumgr); result = smu7_request_smu_load_fw(smumgr);
return result; return result;
} }
@ -963,28 +461,10 @@ static bool fiji_is_hw_avfs_present(struct pp_smumgr *smumgr)
static int fiji_smu_init(struct pp_smumgr *smumgr) static int fiji_smu_init(struct pp_smumgr *smumgr)
{ {
struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
uint64_t mc_addr; int i;
priv->header_buffer.data_size = if (smu7_init(smumgr))
((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; return -EINVAL;
smu_allocate_memory(smumgr->device,
priv->header_buffer.data_size,
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
&mc_addr,
&priv->header_buffer.kaddr,
&priv->header_buffer.handle);
priv->header = priv->header_buffer.kaddr;
priv->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
priv->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
PP_ASSERT_WITH_CODE((NULL != priv->header),
"Out of memory.",
kfree(smumgr->backend);
cgs_free_gpu_mem(smumgr->device,
(cgs_handle_t)priv->header_buffer.handle);
return -1);
priv->avfs.AvfsBtcStatus = AVFS_BTC_BOOT; priv->avfs.AvfsBtcStatus = AVFS_BTC_BOOT;
if (fiji_is_hw_avfs_present(smumgr)) if (fiji_is_hw_avfs_present(smumgr))
@ -999,37 +479,35 @@ static int fiji_smu_init(struct pp_smumgr *smumgr)
else else
priv->avfs.AvfsBtcStatus = AVFS_BTC_NOTSUPPORTED; priv->avfs.AvfsBtcStatus = AVFS_BTC_NOTSUPPORTED;
priv->acpi_optimization = 1; for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++)
priv->activity_target[i] = 30;
return 0; return 0;
} }
static int fiji_smu_fini(struct pp_smumgr *smumgr)
{
struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
if (smumgr->backend) {
kfree(smumgr->backend);
smumgr->backend = NULL;
}
cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
return 0;
}
static const struct pp_smumgr_func fiji_smu_funcs = { static const struct pp_smumgr_func fiji_smu_funcs = {
.smu_init = &fiji_smu_init, .smu_init = &fiji_smu_init,
.smu_fini = &fiji_smu_fini, .smu_fini = &smu7_smu_fini,
.start_smu = &fiji_start_smu, .start_smu = &fiji_start_smu,
.check_fw_load_finish = &fiji_check_fw_load_finish, .check_fw_load_finish = &smu7_check_fw_load_finish,
.request_smu_load_fw = &fiji_reload_firmware, .request_smu_load_fw = &smu7_reload_firmware,
.request_smu_load_specific_fw = &fiji_request_smu_specific_fw_load, .request_smu_load_specific_fw = NULL,
.send_msg_to_smc = &fiji_send_msg_to_smc, .send_msg_to_smc = &smu7_send_msg_to_smc,
.send_msg_to_smc_with_parameter = &fiji_send_msg_to_smc_with_parameter, .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
.download_pptable_settings = NULL, .download_pptable_settings = NULL,
.upload_pptable_settings = NULL, .upload_pptable_settings = NULL,
.update_smc_table = fiji_update_smc_table,
.get_offsetof = fiji_get_offsetof,
.process_firmware_header = fiji_process_firmware_header,
.init_smc_table = fiji_init_smc_table,
.update_sclk_threshold = fiji_update_sclk_threshold,
.thermal_setup_fan_table = fiji_thermal_setup_fan_table,
.populate_all_graphic_levels = fiji_populate_all_graphic_levels,
.populate_all_memory_levels = fiji_populate_all_memory_levels,
.get_mac_definition = fiji_get_mac_definition,
.initialize_mc_reg_table = fiji_initialize_mc_reg_table,
.is_dpm_running = fiji_is_dpm_running,
}; };
int fiji_smum_init(struct pp_smumgr *smumgr) int fiji_smum_init(struct pp_smumgr *smumgr)

View File

@ -23,37 +23,31 @@
#ifndef _FIJI_SMUMANAGER_H_ #ifndef _FIJI_SMUMANAGER_H_
#define _FIJI_SMUMANAGER_H_ #define _FIJI_SMUMANAGER_H_
#include "smu73_discrete.h"
#include <pp_endian.h>
#include "smu7_smumgr.h"
struct fiji_smu_avfs { struct fiji_smu_avfs {
enum AVFS_BTC_STATUS AvfsBtcStatus; enum AVFS_BTC_STATUS AvfsBtcStatus;
uint32_t AvfsBtcParam; uint32_t AvfsBtcParam;
}; };
struct fiji_buffer_entry {
uint32_t data_size;
uint32_t mc_addr_low;
uint32_t mc_addr_high;
void *kaddr;
unsigned long handle;
};
struct fiji_smumgr { struct fiji_smumgr {
uint8_t *header; struct smu7_smumgr smu7_data;
uint8_t *mec_image;
uint32_t soft_regs_start; struct fiji_smu_avfs avfs;
struct fiji_smu_avfs avfs; struct SMU73_Discrete_DpmTable smc_state_table;
uint32_t acpi_optimization; struct SMU73_Discrete_Ulv ulv_setting;
struct SMU73_Discrete_PmFuses power_tune_table;
const struct fiji_pt_defaults *power_tune_defaults;
uint32_t activity_target[SMU73_MAX_LEVELS_GRAPHICS];
struct fiji_buffer_entry header_buffer;
}; };
int fiji_smum_init(struct pp_smumgr *smumgr);
int fiji_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smcAddress,
uint32_t *value, uint32_t limit);
int fiji_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
uint32_t value, uint32_t limit);
int fiji_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smcStartAddress,
const uint8_t *src, uint32_t byteCount, uint32_t limit);
#endif #endif

File diff suppressed because it is too large Load Diff

View File

@ -20,17 +20,21 @@
* OTHER DEALINGS IN THE SOFTWARE. * OTHER DEALINGS IN THE SOFTWARE.
* *
*/ */
#ifndef _ICELAND_SMC_H
#define _ICELAND_SMC_H
#ifndef _TONGA_CLOCK_POWER_GATING_H_ #include "smumgr.h"
#define _TONGA_CLOCK_POWER_GATING_H_
#include "tonga_hwmgr.h"
#include "pp_asicblocks.h"
extern int tonga_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating); int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
extern int tonga_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
extern int tonga_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); int iceland_init_smc_table(struct pp_hwmgr *hwmgr);
extern int tonga_phm_powerdown_uvd(struct pp_hwmgr *hwmgr); int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
extern int tonga_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr); int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr);
extern int tonga_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, const uint32_t *msg_id); uint32_t iceland_get_offsetof(uint32_t type, uint32_t member);
#endif /* _TONGA_CLOCK_POWER_GATING_H_ */ uint32_t iceland_get_mac_definition(uint32_t value);
int iceland_process_firmware_header(struct pp_hwmgr *hwmgr);
int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr);
#endif

View File

@ -35,120 +35,10 @@
#include "smu/smu_7_1_1_d.h" #include "smu/smu_7_1_1_d.h"
#include "smu/smu_7_1_1_sh_mask.h" #include "smu/smu_7_1_1_sh_mask.h"
#include "cgs_common.h" #include "cgs_common.h"
#include "iceland_smc.h"
#define ICELAND_SMC_SIZE 0x20000 #define ICELAND_SMC_SIZE 0x20000
#define BUFFER_SIZE 80000
#define MAX_STRING_SIZE 15
#define BUFFER_SIZETWO 131072 /*128 *1024*/
/**
* Set the address for reading/writing the SMC SRAM space.
* @param smumgr the address of the powerplay hardware manager.
* @param smcAddress the address in the SMC RAM to access.
*/
static int iceland_set_smc_sram_address(struct pp_smumgr *smumgr,
uint32_t smcAddress, uint32_t limit)
{
if (smumgr == NULL || smumgr->device == NULL)
return -EINVAL;
PP_ASSERT_WITH_CODE((0 == (3 & smcAddress)),
"SMC address must be 4 byte aligned.",
return -1;);
PP_ASSERT_WITH_CODE((limit > (smcAddress + 3)),
"SMC address is beyond the SMC RAM area.",
return -1;);
cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, smcAddress);
SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
return 0;
}
/**
* Copy bytes from an array into the SMC RAM space.
*
* @param smumgr the address of the powerplay SMU manager.
* @param smcStartAddress the start address in the SMC RAM to copy bytes to.
* @param src the byte array to copy the bytes from.
* @param byteCount the number of bytes to copy.
*/
int iceland_copy_bytes_to_smc(struct pp_smumgr *smumgr,
uint32_t smcStartAddress, const uint8_t *src,
uint32_t byteCount, uint32_t limit)
{
uint32_t addr;
uint32_t data, orig_data;
int result = 0;
uint32_t extra_shift;
if (smumgr == NULL || smumgr->device == NULL)
return -EINVAL;
PP_ASSERT_WITH_CODE((0 == (3 & smcStartAddress)),
"SMC address must be 4 byte aligned.",
return 0;);
PP_ASSERT_WITH_CODE((limit > (smcStartAddress + byteCount)),
"SMC address is beyond the SMC RAM area.",
return 0;);
addr = smcStartAddress;
while (byteCount >= 4) {
/*
* Bytes are written into the
* SMC address space with the MSB first
*/
data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
result = iceland_set_smc_sram_address(smumgr, addr, limit);
if (result)
goto out;
cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
src += 4;
byteCount -= 4;
addr += 4;
}
if (0 != byteCount) {
/* Now write odd bytes left, do a read modify write cycle */
data = 0;
result = iceland_set_smc_sram_address(smumgr, addr, limit);
if (result)
goto out;
orig_data = cgs_read_register(smumgr->device,
mmSMC_IND_DATA_0);
extra_shift = 8 * (4 - byteCount);
while (byteCount > 0) {
data = (data << 8) + *src++;
byteCount--;
}
data <<= extra_shift;
data |= (orig_data & ~((~0UL) << extra_shift));
result = iceland_set_smc_sram_address(smumgr, addr, limit);
if (result)
goto out;
cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
}
out:
return result;
}
/**
* Deassert the reset'pin' (set it to high).
*
* @param smumgr the address of the powerplay hardware manager.
*/
static int iceland_start_smc(struct pp_smumgr *smumgr) static int iceland_start_smc(struct pp_smumgr *smumgr)
{ {
SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
@ -157,284 +47,15 @@ static int iceland_start_smc(struct pp_smumgr *smumgr)
return 0; return 0;
} }
static void iceland_pp_reset_smc(struct pp_smumgr *smumgr) static void iceland_reset_smc(struct pp_smumgr *smumgr)
{ {
SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, SMC_SYSCON_RESET_CNTL,
rst_reg, 1); rst_reg, 1);
} }
int iceland_program_jump_on_start(struct pp_smumgr *smumgr)
{
static const unsigned char pData[] = { 0xE0, 0x00, 0x80, 0x40 };
iceland_copy_bytes_to_smc(smumgr, 0x0, pData, 4, sizeof(pData)+1); static void iceland_stop_smc_clock(struct pp_smumgr *smumgr)
return 0;
}
/**
* Return if the SMC is currently running.
*
* @param smumgr the address of the powerplay hardware manager.
*/
bool iceland_is_smc_ram_running(struct pp_smumgr *smumgr)
{
uint32_t val1, val2;
val1 = SMUM_READ_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
val2 = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC,
ixSMC_PC_C);
return ((0 == val1) && (0x20100 <= val2));
}
/**
* Send a message to the SMC, and wait for its response.
*
* @param smumgr the address of the powerplay hardware manager.
* @param msg the message to send.
* @return The response that came from the SMC.
*/
static int iceland_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
{
if (smumgr == NULL || smumgr->device == NULL)
return -EINVAL;
if (!iceland_is_smc_ram_running(smumgr))
return -EINVAL;
SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
PP_ASSERT_WITH_CODE(
1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP),
"Failed to send Previous Message.",
);
cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
PP_ASSERT_WITH_CODE(
1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP),
"Failed to send Message.",
);
return 0;
}
/**
* Send a message to the SMC with parameter
*
* @param smumgr: the address of the powerplay hardware manager.
* @param msg: the message to send.
* @param parameter: the parameter to send
* @return The response that came from the SMC.
*/
static int iceland_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
uint16_t msg, uint32_t parameter)
{
if (smumgr == NULL || smumgr->device == NULL)
return -EINVAL;
cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
return iceland_send_msg_to_smc(smumgr, msg);
}
/*
* Read a 32bit value from the SMC SRAM space.
* ALL PARAMETERS ARE IN HOST BYTE ORDER.
* @param smumgr the address of the powerplay hardware manager.
* @param smcAddress the address in the SMC RAM to access.
* @param value and output parameter for the data read from the SMC SRAM.
*/
int iceland_read_smc_sram_dword(struct pp_smumgr *smumgr,
uint32_t smcAddress, uint32_t *value,
uint32_t limit)
{
int result;
result = iceland_set_smc_sram_address(smumgr, smcAddress, limit);
if (0 != result)
return result;
*value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0);
return 0;
}
/*
* Write a 32bit value to the SMC SRAM space.
* ALL PARAMETERS ARE IN HOST BYTE ORDER.
* @param smumgr the address of the powerplay hardware manager.
* @param smcAddress the address in the SMC RAM to access.
* @param value to write to the SMC SRAM.
*/
int iceland_write_smc_sram_dword(struct pp_smumgr *smumgr,
uint32_t smcAddress, uint32_t value,
uint32_t limit)
{
int result;
result = iceland_set_smc_sram_address(smumgr, smcAddress, limit);
if (0 != result)
return result;
cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, value);
return 0;
}
static int iceland_smu_fini(struct pp_smumgr *smumgr)
{
struct iceland_smumgr *priv = (struct iceland_smumgr *)(smumgr->backend);
smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
if (smumgr->backend != NULL) {
kfree(smumgr->backend);
smumgr->backend = NULL;
}
cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
return 0;
}
static enum cgs_ucode_id iceland_convert_fw_type_to_cgs(uint32_t fw_type)
{
enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
switch (fw_type) {
case UCODE_ID_SMU:
result = CGS_UCODE_ID_SMU;
break;
case UCODE_ID_SDMA0:
result = CGS_UCODE_ID_SDMA0;
break;
case UCODE_ID_SDMA1:
result = CGS_UCODE_ID_SDMA1;
break;
case UCODE_ID_CP_CE:
result = CGS_UCODE_ID_CP_CE;
break;
case UCODE_ID_CP_PFP:
result = CGS_UCODE_ID_CP_PFP;
break;
case UCODE_ID_CP_ME:
result = CGS_UCODE_ID_CP_ME;
break;
case UCODE_ID_CP_MEC:
result = CGS_UCODE_ID_CP_MEC;
break;
case UCODE_ID_CP_MEC_JT1:
result = CGS_UCODE_ID_CP_MEC_JT1;
break;
case UCODE_ID_CP_MEC_JT2:
result = CGS_UCODE_ID_CP_MEC_JT2;
break;
case UCODE_ID_RLC_G:
result = CGS_UCODE_ID_RLC_G;
break;
default:
break;
}
return result;
}
/**
* Convert the PPIRI firmware type to SMU type mask.
* For MEC, we need to check all MEC related type
*/
static uint16_t iceland_get_mask_for_firmware_type(uint16_t firmwareType)
{
uint16_t result = 0;
switch (firmwareType) {
case UCODE_ID_SDMA0:
result = UCODE_ID_SDMA0_MASK;
break;
case UCODE_ID_SDMA1:
result = UCODE_ID_SDMA1_MASK;
break;
case UCODE_ID_CP_CE:
result = UCODE_ID_CP_CE_MASK;
break;
case UCODE_ID_CP_PFP:
result = UCODE_ID_CP_PFP_MASK;
break;
case UCODE_ID_CP_ME:
result = UCODE_ID_CP_ME_MASK;
break;
case UCODE_ID_CP_MEC:
case UCODE_ID_CP_MEC_JT1:
case UCODE_ID_CP_MEC_JT2:
result = UCODE_ID_CP_MEC_MASK;
break;
case UCODE_ID_RLC_G:
result = UCODE_ID_RLC_G_MASK;
break;
default:
break;
}
return result;
}
/**
* Check if the FW has been loaded,
* SMU will not return if loading has not finished.
*/
static int iceland_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fwType)
{
uint16_t fwMask = iceland_get_mask_for_firmware_type(fwType);
if (0 != SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, SMC_IND,
SOFT_REGISTERS_TABLE_27, fwMask, fwMask)) {
pr_err("[ powerplay ] check firmware loading failed\n");
return -EINVAL;
}
return 0;
}
/* Populate one firmware image to the data structure */
static int iceland_populate_single_firmware_entry(struct pp_smumgr *smumgr,
uint16_t firmware_type,
struct SMU_Entry *pentry)
{
int result;
struct cgs_firmware_info info = {0};
result = cgs_get_firmware_info(
smumgr->device,
iceland_convert_fw_type_to_cgs(firmware_type),
&info);
if (result == 0) {
pentry->version = 0;
pentry->id = (uint16_t)firmware_type;
pentry->image_addr_high = smu_upper_32_bits(info.mc_addr);
pentry->image_addr_low = smu_lower_32_bits(info.mc_addr);
pentry->meta_data_addr_high = 0;
pentry->meta_data_addr_low = 0;
pentry->data_size_byte = info.image_size;
pentry->num_register_entries = 0;
if (firmware_type == UCODE_ID_RLC_G)
pentry->flags = 1;
else
pentry->flags = 0;
} else {
return result;
}
return result;
}
static void iceland_pp_stop_smc_clock(struct pp_smumgr *smumgr)
{ {
SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0, SMC_SYSCON_CLOCK_CNTL_0,
@ -448,10 +69,10 @@ static void iceland_start_smc_clock(struct pp_smumgr *smumgr)
ck_disable, 0); ck_disable, 0);
} }
int iceland_smu_start_smc(struct pp_smumgr *smumgr) static int iceland_smu_start_smc(struct pp_smumgr *smumgr)
{ {
/* set smc instruct start point at 0x0 */ /* set smc instruct start point at 0x0 */
iceland_program_jump_on_start(smumgr); smu7_program_jump_on_start(smumgr);
/* enable smc clock */ /* enable smc clock */
iceland_start_smc_clock(smumgr); iceland_start_smc_clock(smumgr);
@ -465,17 +86,37 @@ int iceland_smu_start_smc(struct pp_smumgr *smumgr)
return 0; return 0;
} }
/**
* Upload the SMC firmware to the SMC microcontroller. static int iceland_upload_smc_firmware_data(struct pp_smumgr *smumgr,
* uint32_t length, const uint8_t *src,
* @param smumgr the address of the powerplay hardware manager. uint32_t limit, uint32_t start_addr)
* @param pFirmware the data structure containing the various sections of the firmware.
*/
int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr)
{ {
const uint8_t *src; uint32_t byte_count = length;
uint32_t byte_count, val;
uint32_t data; uint32_t data;
PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL);
cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, start_addr);
SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
while (byte_count >= 4) {
data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
src += 4;
byte_count -= 4;
}
SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL);
return 0;
}
static int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr)
{
uint32_t val;
struct cgs_firmware_info info = {0}; struct cgs_firmware_info info = {0};
if (smumgr == NULL || smumgr->device == NULL) if (smumgr == NULL || smumgr->device == NULL)
@ -483,7 +124,7 @@ int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr)
/* load SMC firmware */ /* load SMC firmware */
cgs_get_firmware_info(smumgr->device, cgs_get_firmware_info(smumgr->device,
iceland_convert_fw_type_to_cgs(UCODE_ID_SMU), &info); smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
if (info.image_size & 3) { if (info.image_size & 3) {
pr_err("[ powerplay ] SMC ucode is not 4 bytes aligned\n"); pr_err("[ powerplay ] SMC ucode is not 4 bytes aligned\n");
@ -506,122 +147,17 @@ int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr)
ixSMC_SYSCON_MISC_CNTL, val | 1); ixSMC_SYSCON_MISC_CNTL, val | 1);
/* stop smc clock */ /* stop smc clock */
iceland_pp_stop_smc_clock(smumgr); iceland_stop_smc_clock(smumgr);
/* reset smc */ /* reset smc */
iceland_pp_reset_smc(smumgr); iceland_reset_smc(smumgr);
iceland_upload_smc_firmware_data(smumgr, info.image_size,
cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, (uint8_t *)info.kptr, ICELAND_SMC_SIZE,
info.ucode_start_address); info.ucode_start_address);
SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL,
AUTO_INCREMENT_IND_0, 1);
byte_count = info.image_size;
src = (const uint8_t *)info.kptr;
while (byte_count >= 4) {
data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
src += 4;
byte_count -= 4;
}
SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL,
AUTO_INCREMENT_IND_0, 0);
return 0; return 0;
} }
static int iceland_request_smu_reload_fw(struct pp_smumgr *smumgr)
{
struct iceland_smumgr *iceland_smu =
(struct iceland_smumgr *)(smumgr->backend);
uint16_t fw_to_load;
int result = 0;
struct SMU_DRAMData_TOC *toc;
toc = (struct SMU_DRAMData_TOC *)iceland_smu->pHeader;
toc->num_entries = 0;
toc->structure_version = 1;
PP_ASSERT_WITH_CODE(
0 == iceland_populate_single_firmware_entry(smumgr,
UCODE_ID_RLC_G,
&toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.\n",
return -1);
PP_ASSERT_WITH_CODE(
0 == iceland_populate_single_firmware_entry(smumgr,
UCODE_ID_CP_CE,
&toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.\n",
return -1);
PP_ASSERT_WITH_CODE(
0 == iceland_populate_single_firmware_entry
(smumgr, UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.\n", return -1);
PP_ASSERT_WITH_CODE(
0 == iceland_populate_single_firmware_entry
(smumgr, UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.\n", return -1);
PP_ASSERT_WITH_CODE(
0 == iceland_populate_single_firmware_entry
(smumgr, UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.\n", return -1);
PP_ASSERT_WITH_CODE(
0 == iceland_populate_single_firmware_entry
(smumgr, UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.\n", return -1);
PP_ASSERT_WITH_CODE(
0 == iceland_populate_single_firmware_entry
(smumgr, UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.\n", return -1);
PP_ASSERT_WITH_CODE(
0 == iceland_populate_single_firmware_entry
(smumgr, UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.\n", return -1);
PP_ASSERT_WITH_CODE(
0 == iceland_populate_single_firmware_entry
(smumgr, UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.\n", return -1);
if (!iceland_is_smc_ram_running(smumgr)) {
result = iceland_smu_upload_firmware_image(smumgr);
if (result)
return result;
result = iceland_smu_start_smc(smumgr);
if (result)
return result;
}
iceland_send_msg_to_smc_with_parameter(smumgr,
PPSMC_MSG_DRV_DRAM_ADDR_HI,
iceland_smu->header_buffer.mc_addr_high);
iceland_send_msg_to_smc_with_parameter(smumgr,
PPSMC_MSG_DRV_DRAM_ADDR_LO,
iceland_smu->header_buffer.mc_addr_low);
fw_to_load = UCODE_ID_RLC_G_MASK
+ UCODE_ID_SDMA0_MASK
+ UCODE_ID_SDMA1_MASK
+ UCODE_ID_CP_CE_MASK
+ UCODE_ID_CP_ME_MASK
+ UCODE_ID_CP_PFP_MASK
+ UCODE_ID_CP_MEC_MASK
+ UCODE_ID_CP_MEC_JT1_MASK
+ UCODE_ID_CP_MEC_JT2_MASK;
PP_ASSERT_WITH_CODE(
0 == iceland_send_msg_to_smc_with_parameter(
smumgr, PPSMC_MSG_LoadUcodes, fw_to_load),
"Fail to Request SMU Load uCode", return 0);
return result;
}
static int iceland_request_smu_load_specific_fw(struct pp_smumgr *smumgr, static int iceland_request_smu_load_specific_fw(struct pp_smumgr *smumgr,
uint32_t firmwareType) uint32_t firmwareType)
{ {
@ -635,12 +171,22 @@ static int iceland_start_smu(struct pp_smumgr *smumgr)
result = iceland_smu_upload_firmware_image(smumgr); result = iceland_smu_upload_firmware_image(smumgr);
if (result) if (result)
return result; return result;
result = iceland_smu_start_smc(smumgr); result = iceland_smu_start_smc(smumgr);
if (result) if (result)
return result; return result;
result = iceland_request_smu_reload_fw(smumgr); if (!smu7_is_smc_ram_running(smumgr)) {
printk("smu not running, upload firmware again \n");
result = iceland_smu_upload_firmware_image(smumgr);
if (result)
return result;
result = iceland_smu_start_smc(smumgr);
if (result)
return result;
}
result = smu7_request_smu_load_fw(smumgr);
return result; return result;
} }
@ -654,47 +200,38 @@ static int iceland_start_smu(struct pp_smumgr *smumgr)
*/ */
static int iceland_smu_init(struct pp_smumgr *smumgr) static int iceland_smu_init(struct pp_smumgr *smumgr)
{ {
struct iceland_smumgr *iceland_smu; int i;
uint64_t mc_addr = 0; struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
if (smu7_init(smumgr))
return -EINVAL;
/* Allocate memory for backend private data */ for (i = 0; i < SMU71_MAX_LEVELS_GRAPHICS; i++)
iceland_smu = (struct iceland_smumgr *)(smumgr->backend); smu_data->activity_target[i] = 30;
iceland_smu->header_buffer.data_size =
((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
smu_allocate_memory(smumgr->device,
iceland_smu->header_buffer.data_size,
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
&mc_addr,
&iceland_smu->header_buffer.kaddr,
&iceland_smu->header_buffer.handle);
iceland_smu->pHeader = iceland_smu->header_buffer.kaddr;
iceland_smu->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
iceland_smu->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
PP_ASSERT_WITH_CODE((NULL != iceland_smu->pHeader),
"Out of memory.",
kfree(smumgr->backend);
cgs_free_gpu_mem(smumgr->device,
(cgs_handle_t)iceland_smu->header_buffer.handle);
return -1);
return 0; return 0;
} }
static const struct pp_smumgr_func iceland_smu_funcs = { static const struct pp_smumgr_func iceland_smu_funcs = {
.smu_init = &iceland_smu_init, .smu_init = &iceland_smu_init,
.smu_fini = &iceland_smu_fini, .smu_fini = &smu7_smu_fini,
.start_smu = &iceland_start_smu, .start_smu = &iceland_start_smu,
.check_fw_load_finish = &iceland_check_fw_load_finish, .check_fw_load_finish = &smu7_check_fw_load_finish,
.request_smu_load_fw = &iceland_request_smu_reload_fw, .request_smu_load_fw = &smu7_reload_firmware,
.request_smu_load_specific_fw = &iceland_request_smu_load_specific_fw, .request_smu_load_specific_fw = &iceland_request_smu_load_specific_fw,
.send_msg_to_smc = &iceland_send_msg_to_smc, .send_msg_to_smc = &smu7_send_msg_to_smc,
.send_msg_to_smc_with_parameter = &iceland_send_msg_to_smc_with_parameter, .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
.download_pptable_settings = NULL, .download_pptable_settings = NULL,
.upload_pptable_settings = NULL, .upload_pptable_settings = NULL,
.get_offsetof = iceland_get_offsetof,
.process_firmware_header = iceland_process_firmware_header,
.init_smc_table = iceland_init_smc_table,
.update_sclk_threshold = iceland_update_sclk_threshold,
.thermal_setup_fan_table = iceland_thermal_setup_fan_table,
.populate_all_graphic_levels = iceland_populate_all_graphic_levels,
.populate_all_memory_levels = iceland_populate_all_memory_levels,
.get_mac_definition = iceland_get_mac_definition,
.initialize_mc_reg_table = iceland_initialize_mc_reg_table,
.is_dpm_running = iceland_is_dpm_running,
}; };
int iceland_smum_init(struct pp_smumgr *smumgr) int iceland_smum_init(struct pp_smumgr *smumgr)

View File

@ -26,39 +26,46 @@
#ifndef _ICELAND_SMUMGR_H_ #ifndef _ICELAND_SMUMGR_H_
#define _ICELAND_SMUMGR_H_ #define _ICELAND_SMUMGR_H_
struct iceland_buffer_entry {
uint32_t data_size; #include "smu7_smumgr.h"
uint32_t mc_addr_low; #include "pp_endian.h"
uint32_t mc_addr_high; #include "smu71_discrete.h"
void *kaddr;
unsigned long handle; struct iceland_pt_defaults {
uint8_t svi_load_line_en;
uint8_t svi_load_line_vddc;
uint8_t tdc_vddc_throttle_release_limit_perc;
uint8_t tdc_mawt;
uint8_t tdc_waterfall_ctl;
uint8_t dte_ambient_temp_base;
uint32_t display_cac;
uint32_t bamp_temp_gradient;
uint16_t bapmti_r[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS];
uint16_t bapmti_rc[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS];
};
struct iceland_mc_reg_entry {
uint32_t mclk_max;
uint32_t mc_data[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE];
};
struct iceland_mc_reg_table {
uint8_t last; /* number of registers*/
uint8_t num_entries; /* number of entries in mc_reg_table_entry used*/
uint16_t validflag; /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/
struct iceland_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
SMU71_Discrete_MCRegisterAddress mc_reg_address[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE];
}; };
/* Iceland only has header_buffer, don't have smu buffer. */
struct iceland_smumgr { struct iceland_smumgr {
uint8_t *pHeader; struct smu7_smumgr smu7_data;
uint8_t *pMecImage; struct SMU71_Discrete_DpmTable smc_state_table;
uint32_t ulSoftRegsStart; struct SMU71_Discrete_PmFuses power_tune_table;
struct SMU71_Discrete_Ulv ulv_setting;
struct iceland_buffer_entry header_buffer; struct iceland_pt_defaults *power_tune_defaults;
SMU71_Discrete_MCRegisters mc_regs;
struct iceland_mc_reg_table mc_reg_table;
uint32_t activity_target[SMU71_MAX_LEVELS_GRAPHICS];
}; };
extern int iceland_smum_init(struct pp_smumgr *smumgr);
extern int iceland_copy_bytes_to_smc(struct pp_smumgr *smumgr,
uint32_t smcStartAddress,
const uint8_t *src,
uint32_t byteCount, uint32_t limit);
extern int iceland_smu_start_smc(struct pp_smumgr *smumgr);
extern int iceland_read_smc_sram_dword(struct pp_smumgr *smumgr,
uint32_t smcAddress,
uint32_t *value, uint32_t limit);
extern int iceland_write_smc_sram_dword(struct pp_smumgr *smumgr,
uint32_t smcAddress,
uint32_t value, uint32_t limit);
extern bool iceland_is_smc_ram_running(struct pp_smumgr *smumgr);
extern int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr);
#endif #endif

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2014 Advanced Micro Devices, Inc. * Copyright 2015 Advanced Micro Devices, Inc.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@ -20,23 +20,23 @@
* OTHER DEALINGS IN THE SOFTWARE. * OTHER DEALINGS IN THE SOFTWARE.
* *
*/ */
#ifndef POLARIS10_SMC_H
#define POLARIS10_SMC_H
#ifndef TONGA_SMUMGR_H #include "smumgr.h"
#define TONGA_SMUMGR_H
#include "tonga_ppsmc.h"
int tonga_smu_init(struct amdgpu_device *adev); int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
int tonga_smu_fini(struct amdgpu_device *adev); int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
int tonga_smu_start(struct amdgpu_device *adev); int polaris10_init_smc_table(struct pp_hwmgr *hwmgr);
int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
struct tonga_smu_private_data int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr);
{ int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
uint8_t *header; int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr);
uint32_t smu_buffer_addr_high; uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member);
uint32_t smu_buffer_addr_low; uint32_t polaris10_get_mac_definition(uint32_t value);
uint32_t header_addr_high; int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr);
uint32_t header_addr_low; bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr);
};
#endif #endif

View File

@ -38,15 +38,11 @@
#include "ppatomctrl.h" #include "ppatomctrl.h"
#include "pp_debug.h" #include "pp_debug.h"
#include "cgs_common.h" #include "cgs_common.h"
#include "polaris10_smc.h"
#include "smu7_ppsmc.h"
#include "smu7_smumgr.h"
#define POLARIS10_SMC_SIZE 0x20000 #define PPPOLARIS10_TARGETACTIVITY_DFLT 50
/* Microcode file is stored in this buffer */
#define BUFFER_SIZE 80000
#define MAX_STRING_SIZE 15
#define BUFFER_SIZETWO 131072 /* 128 *1024 */
#define SMC_RAM_END 0x40000
static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = { static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
/* Min pcie DeepSleep Activity CgSpll CgSpll CcPwr CcPwr Sclk Enabled Enabled Voltage Power */ /* Min pcie DeepSleep Activity CgSpll CgSpll CcPwr CcPwr Sclk Enabled Enabled Voltage Power */
@ -61,572 +57,9 @@ static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
{ 0xa00fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0xa0860100, 0x2800, 0, 0x2000, 2, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } } { 0xa00fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0xa0860100, 0x2800, 0, 0x2000, 2, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } }
}; };
static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = {
{0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00}; 0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00};
/**
* Set the address for reading/writing the SMC SRAM space.
* @param smumgr the address of the powerplay hardware manager.
* @param smcAddress the address in the SMC RAM to access.
*/
static int polaris10_set_smc_sram_address(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t limit)
{
PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), "SMC address must be 4 byte aligned.", return -EINVAL);
PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL);
cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, smc_addr);
SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
return 0;
}
/**
* Copy bytes from SMC RAM space into driver memory.
*
* @param smumgr the address of the powerplay SMU manager.
* @param smc_start_address the start address in the SMC RAM to copy bytes from
* @param src the byte array to copy the bytes to.
* @param byte_count the number of bytes to copy.
*/
int polaris10_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit)
{
uint32_t data;
uint32_t addr;
uint8_t *dest_byte;
uint8_t i, data_byte[4] = {0};
uint32_t *pdata = (uint32_t *)&data_byte;
PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -1;);
PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -1);
addr = smc_start_address;
while (byte_count >= 4) {
polaris10_read_smc_sram_dword(smumgr, addr, &data, limit);
*dest = PP_SMC_TO_HOST_UL(data);
dest += 1;
byte_count -= 4;
addr += 4;
}
if (byte_count) {
polaris10_read_smc_sram_dword(smumgr, addr, &data, limit);
*pdata = PP_SMC_TO_HOST_UL(data);
/* Cast dest into byte type in dest_byte. This way, we don't overflow if the allocated memory is not 4-byte aligned. */
dest_byte = (uint8_t *)dest;
for (i = 0; i < byte_count; i++)
dest_byte[i] = data_byte[i];
}
return 0;
}
/**
* Copy bytes from an array into the SMC RAM space.
*
* @param pSmuMgr the address of the powerplay SMU manager.
* @param smc_start_address the start address in the SMC RAM to copy bytes to.
* @param src the byte array to copy the bytes from.
* @param byte_count the number of bytes to copy.
*/
int polaris10_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
const uint8_t *src, uint32_t byte_count, uint32_t limit)
{
int result;
uint32_t data = 0;
uint32_t original_data;
uint32_t addr = 0;
uint32_t extra_shift;
PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -1);
PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -1);
addr = smc_start_address;
while (byte_count >= 4) {
/* Bytes are written into the SMC addres space with the MSB first. */
data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
result = polaris10_set_smc_sram_address(smumgr, addr, limit);
if (0 != result)
return result;
cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
src += 4;
byte_count -= 4;
addr += 4;
}
if (0 != byte_count) {
data = 0;
result = polaris10_set_smc_sram_address(smumgr, addr, limit);
if (0 != result)
return result;
original_data = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
extra_shift = 8 * (4 - byte_count);
while (byte_count > 0) {
/* Bytes are written into the SMC addres space with the MSB first. */
data = (0x100 * data) + *src++;
byte_count--;
}
data <<= extra_shift;
data |= (original_data & ~((~0UL) << extra_shift));
result = polaris10_set_smc_sram_address(smumgr, addr, limit);
if (0 != result)
return result;
cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
}
return 0;
}
static int polaris10_program_jump_on_start(struct pp_smumgr *smumgr)
{
static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
polaris10_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data)+1);
return 0;
}
/**
* Return if the SMC is currently running.
*
* @param smumgr the address of the powerplay hardware manager.
*/
bool polaris10_is_smc_ram_running(struct pp_smumgr *smumgr)
{
return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
&& (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
}
static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr)
{
uint32_t efuse;
efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4));
efuse &= 0x00000001;
if (efuse)
return true;
return false;
}
/**
* Send a message to the SMC, and wait for its response.
*
* @param smumgr the address of the powerplay hardware manager.
* @param msg the message to send.
* @return The response that came from the SMC.
*/
int polaris10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
{
int ret;
if (!polaris10_is_smc_ram_running(smumgr))
return -1;
SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
if (ret != 1)
printk("\n failed to send pre message %x ret is %d \n", msg, ret);
cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
if (ret != 1)
printk("\n failed to send message %x ret is %d \n", msg, ret);
return 0;
}
/**
* Send a message to the SMC, and do not wait for its response.
*
* @param smumgr the address of the powerplay hardware manager.
* @param msg the message to send.
* @return Always return 0.
*/
int polaris10_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg)
{
cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
return 0;
}
/**
* Send a message to the SMC with parameter
*
* @param smumgr: the address of the powerplay hardware manager.
* @param msg: the message to send.
* @param parameter: the parameter to send
* @return The response that came from the SMC.
*/
int polaris10_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
{
if (!polaris10_is_smc_ram_running(smumgr)) {
return -1;
}
SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
return polaris10_send_msg_to_smc(smumgr, msg);
}
/**
* Send a message to the SMC with parameter, do not wait for response
*
* @param smumgr: the address of the powerplay hardware manager.
* @param msg: the message to send.
* @param parameter: the parameter to send
* @return The response that came from the SMC.
*/
int polaris10_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
{
cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
return polaris10_send_msg_to_smc_without_waiting(smumgr, msg);
}
int polaris10_send_msg_to_smc_offset(struct pp_smumgr *smumgr)
{
cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000);
cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP))
printk("Failed to send Message.\n");
return 0;
}
/**
* Wait until the SMC is doing nithing. Doing nothing means that the SMC is either turned off or it is sitting on the STOP instruction.
*
* @param smumgr the address of the powerplay hardware manager.
* @param msg the message to send.
* @return The response that came from the SMC.
*/
int polaris10_wait_for_smc_inactive(struct pp_smumgr *smumgr)
{
/* If the SMC is not even on it qualifies as inactive. */
if (!polaris10_is_smc_ram_running(smumgr))
return -1;
SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0);
return 0;
}
/**
* Upload the SMC firmware to the SMC microcontroller.
*
* @param smumgr the address of the powerplay hardware manager.
* @param pFirmware the data structure containing the various sections of the firmware.
*/
static int polaris10_upload_smc_firmware_data(struct pp_smumgr *smumgr, uint32_t length, uint32_t *src, uint32_t limit)
{
uint32_t byte_count = length;
PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -1);
cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, 0x20000);
SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1);
for (; byte_count >= 4; byte_count -= 4)
cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, *src++);
SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -1);
return 0;
}
static enum cgs_ucode_id polaris10_convert_fw_type_to_cgs(uint32_t fw_type)
{
enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
switch (fw_type) {
case UCODE_ID_SMU:
result = CGS_UCODE_ID_SMU;
break;
case UCODE_ID_SMU_SK:
result = CGS_UCODE_ID_SMU_SK;
break;
case UCODE_ID_SDMA0:
result = CGS_UCODE_ID_SDMA0;
break;
case UCODE_ID_SDMA1:
result = CGS_UCODE_ID_SDMA1;
break;
case UCODE_ID_CP_CE:
result = CGS_UCODE_ID_CP_CE;
break;
case UCODE_ID_CP_PFP:
result = CGS_UCODE_ID_CP_PFP;
break;
case UCODE_ID_CP_ME:
result = CGS_UCODE_ID_CP_ME;
break;
case UCODE_ID_CP_MEC:
result = CGS_UCODE_ID_CP_MEC;
break;
case UCODE_ID_CP_MEC_JT1:
result = CGS_UCODE_ID_CP_MEC_JT1;
break;
case UCODE_ID_CP_MEC_JT2:
result = CGS_UCODE_ID_CP_MEC_JT2;
break;
case UCODE_ID_RLC_G:
result = CGS_UCODE_ID_RLC_G;
break;
default:
break;
}
return result;
}
static int polaris10_upload_smu_firmware_image(struct pp_smumgr *smumgr)
{
int result = 0;
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
struct cgs_firmware_info info = {0};
if (smu_data->security_hard_key == 1)
cgs_get_firmware_info(smumgr->device,
polaris10_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
else
cgs_get_firmware_info(smumgr->device,
polaris10_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info);
/* TO DO cgs_init_samu_load_smu(smumgr->device, (uint32_t *)info.kptr, info.image_size, smu_data->post_initial_boot);*/
result = polaris10_upload_smc_firmware_data(smumgr, info.image_size, (uint32_t *)info.kptr, POLARIS10_SMC_SIZE);
return result;
}
/**
* Read a 32bit value from the SMC SRAM space.
* ALL PARAMETERS ARE IN HOST BYTE ORDER.
* @param smumgr the address of the powerplay hardware manager.
* @param smcAddress the address in the SMC RAM to access.
* @param value and output parameter for the data read from the SMC SRAM.
*/
int polaris10_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit)
{
int result;
result = polaris10_set_smc_sram_address(smumgr, smc_addr, limit);
if (result)
return result;
*value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
return 0;
}
/**
* Write a 32bit value to the SMC SRAM space.
* ALL PARAMETERS ARE IN HOST BYTE ORDER.
* @param smumgr the address of the powerplay hardware manager.
* @param smc_addr the address in the SMC RAM to access.
* @param value to write to the SMC SRAM.
*/
int polaris10_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit)
{
int result;
result = polaris10_set_smc_sram_address(smumgr, smc_addr, limit);
if (result)
return result;
cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, value);
return 0;
}
int polaris10_smu_fini(struct pp_smumgr *smumgr)
{
if (smumgr->backend) {
kfree(smumgr->backend);
smumgr->backend = NULL;
}
cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
return 0;
}
/* Convert the firmware type to SMU type mask. For MEC, we need to check all MEC related type */
static uint32_t polaris10_get_mask_for_firmware_type(uint32_t fw_type)
{
uint32_t result = 0;
switch (fw_type) {
case UCODE_ID_SDMA0:
result = UCODE_ID_SDMA0_MASK;
break;
case UCODE_ID_SDMA1:
result = UCODE_ID_SDMA1_MASK;
break;
case UCODE_ID_CP_CE:
result = UCODE_ID_CP_CE_MASK;
break;
case UCODE_ID_CP_PFP:
result = UCODE_ID_CP_PFP_MASK;
break;
case UCODE_ID_CP_ME:
result = UCODE_ID_CP_ME_MASK;
break;
case UCODE_ID_CP_MEC_JT1:
case UCODE_ID_CP_MEC_JT2:
result = UCODE_ID_CP_MEC_MASK;
break;
case UCODE_ID_RLC_G:
result = UCODE_ID_RLC_G_MASK;
break;
default:
printk("UCode type is out of range! \n");
result = 0;
}
return result;
}
/* Populate one firmware image to the data structure */
static int polaris10_populate_single_firmware_entry(struct pp_smumgr *smumgr,
uint32_t fw_type,
struct SMU_Entry *entry)
{
int result = 0;
struct cgs_firmware_info info = {0};
result = cgs_get_firmware_info(smumgr->device,
polaris10_convert_fw_type_to_cgs(fw_type),
&info);
if (!result) {
entry->version = info.version;
entry->id = (uint16_t)fw_type;
entry->image_addr_high = smu_upper_32_bits(info.mc_addr);
entry->image_addr_low = smu_lower_32_bits(info.mc_addr);
entry->meta_data_addr_high = 0;
entry->meta_data_addr_low = 0;
entry->data_size_byte = info.image_size;
entry->num_register_entries = 0;
}
if (fw_type == UCODE_ID_RLC_G)
entry->flags = 1;
else
entry->flags = 0;
return 0;
}
static int polaris10_request_smu_load_fw(struct pp_smumgr *smumgr)
{
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
uint32_t fw_to_load;
int result = 0;
struct SMU_DRAMData_TOC *toc;
if (!smumgr->reload_fw) {
printk(KERN_INFO "[ powerplay ] skip reloading...\n");
return 0;
}
if (smu_data->soft_regs_start)
cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
smu_data->soft_regs_start + offsetof(SMU74_SoftRegisters, UcodeLoadStatus),
0x0);
polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_HI, smu_data->smu_buffer.mc_addr_high);
polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_LO, smu_data->smu_buffer.mc_addr_low);
toc = (struct SMU_DRAMData_TOC *)smu_data->header;
toc->num_entries = 0;
toc->structure_version = 1;
PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high);
polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
fw_to_load = UCODE_ID_RLC_G_MASK
+ UCODE_ID_SDMA0_MASK
+ UCODE_ID_SDMA1_MASK
+ UCODE_ID_CP_CE_MASK
+ UCODE_ID_CP_ME_MASK
+ UCODE_ID_CP_PFP_MASK
+ UCODE_ID_CP_MEC_MASK;
if (polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_LoadUcodes, fw_to_load))
printk(KERN_ERR "Fail to Request SMU Load uCode");
return result;
}
/* Check if the FW has been loaded, SMU will not return if loading has not finished. */
static int polaris10_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type)
{
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
uint32_t fw_mask = polaris10_get_mask_for_firmware_type(fw_type);
uint32_t ret;
/* Check SOFT_REGISTERS_TABLE_28.UcodeLoadStatus */
ret = smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX_11,
smu_data->soft_regs_start + offsetof(SMU74_SoftRegisters, UcodeLoadStatus),
fw_mask, fw_mask);
return ret;
}
static int polaris10_reload_firmware(struct pp_smumgr *smumgr)
{
return smumgr->smumgr_funcs->start_smu(smumgr);
}
static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr) static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr)
{ {
@ -668,7 +101,7 @@ static int polaris10_perform_btc(struct pp_smumgr *smumgr)
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
if (0 != smu_data->avfs.avfs_btc_param) { if (0 != smu_data->avfs.avfs_btc_param) {
if (0 != polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) { if (0 != smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
printk("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed"); printk("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed");
result = -1; result = -1;
} }
@ -696,7 +129,7 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
graphics_level_size = sizeof(avfs_graphics_level_polaris10); graphics_level_size = sizeof(avfs_graphics_level_polaris10);
u16_boot_mvdd = PP_HOST_TO_SMC_US(1300 * VOLTAGE_SCALE); u16_boot_mvdd = PP_HOST_TO_SMC_US(1300 * VOLTAGE_SCALE);
PP_ASSERT_WITH_CODE(0 == polaris10_read_smc_sram_dword(smumgr, PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr,
SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, DpmTable), SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, DpmTable),
&dpm_table_start, 0x40000), &dpm_table_start, 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] SMU could not communicate starting address of DPM table", "[AVFS][Polaris10_SetupGfxLvlStruct] SMU could not communicate starting address of DPM table",
@ -707,14 +140,14 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
vr_config_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, VRConfig); vr_config_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, VRConfig);
PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, vr_config_address, PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, vr_config_address,
(uint8_t *)&vr_config, sizeof(uint32_t), 0x40000), (uint8_t *)&vr_config, sizeof(uint32_t), 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] Problems copying VRConfig value over to SMC", "[AVFS][Polaris10_SetupGfxLvlStruct] Problems copying VRConfig value over to SMC",
return -1); return -1);
graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, GraphicsLevel); graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, graphics_level_address, PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address,
(uint8_t *)(&avfs_graphics_level_polaris10), (uint8_t *)(&avfs_graphics_level_polaris10),
graphics_level_size, 0x40000), graphics_level_size, 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] Copying of SCLK DPM table failed!", "[AVFS][Polaris10_SetupGfxLvlStruct] Copying of SCLK DPM table failed!",
@ -722,7 +155,7 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, MemoryLevel); graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, graphics_level_address, PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address,
(uint8_t *)(&avfs_memory_level_polaris10), sizeof(avfs_memory_level_polaris10), 0x40000), (uint8_t *)(&avfs_memory_level_polaris10), sizeof(avfs_memory_level_polaris10), 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] Copying of MCLK DPM table failed!", "[AVFS][Polaris10_SetupGfxLvlStruct] Copying of MCLK DPM table failed!",
return -1); return -1);
@ -731,7 +164,7 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, BootMVdd); graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, BootMVdd);
PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, graphics_level_address, PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address,
(uint8_t *)(&u16_boot_mvdd), sizeof(u16_boot_mvdd), 0x40000), (uint8_t *)(&u16_boot_mvdd), sizeof(u16_boot_mvdd), 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] Copying of DPM table failed!", "[AVFS][Polaris10_SetupGfxLvlStruct] Copying of DPM table failed!",
return -1); return -1);
@ -792,7 +225,7 @@ static int polaris10_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1); SMC_SYSCON_RESET_CNTL, rst_reg, 1);
result = polaris10_upload_smu_firmware_image(smumgr); result = smu7_upload_smu_firmware_image(smumgr);
if (result != 0) if (result != 0)
return result; return result;
@ -811,7 +244,7 @@ static int polaris10_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
/* Call Test SMU message with 0x20000 offset to trigger SMU start */ /* Call Test SMU message with 0x20000 offset to trigger SMU start */
polaris10_send_msg_to_smc_offset(smumgr); smu7_send_msg_to_smc_offset(smumgr);
/* Wait done bit to be set */ /* Wait done bit to be set */
/* Check pass/failed indicator */ /* Check pass/failed indicator */
@ -852,12 +285,12 @@ static int polaris10_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr)
SMC_SYSCON_RESET_CNTL, SMC_SYSCON_RESET_CNTL,
rst_reg, 1); rst_reg, 1);
result = polaris10_upload_smu_firmware_image(smumgr); result = smu7_upload_smu_firmware_image(smumgr);
if (result != 0) if (result != 0)
return result; return result;
/* Set smc instruct start point at 0x0 */ /* Set smc instruct start point at 0x0 */
polaris10_program_jump_on_start(smumgr); smu7_program_jump_on_start(smumgr);
SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
@ -880,10 +313,10 @@ static int polaris10_start_smu(struct pp_smumgr *smumgr)
bool SMU_VFT_INTACT; bool SMU_VFT_INTACT;
/* Only start SMC if SMC RAM is not running */ /* Only start SMC if SMC RAM is not running */
if (!polaris10_is_smc_ram_running(smumgr)) { if (!smu7_is_smc_ram_running(smumgr)) {
SMU_VFT_INTACT = false; SMU_VFT_INTACT = false;
smu_data->protected_mode = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE)); smu_data->protected_mode = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE));
smu_data->security_hard_key = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL)); smu_data->smu7_data.security_hard_key = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
/* Check if SMU is running in protected mode */ /* Check if SMU is running in protected mode */
if (smu_data->protected_mode == 0) { if (smu_data->protected_mode == 0) {
@ -893,7 +326,7 @@ static int polaris10_start_smu(struct pp_smumgr *smumgr)
/* If failed, try with different security Key. */ /* If failed, try with different security Key. */
if (result != 0) { if (result != 0) {
smu_data->security_hard_key ^= 1; smu_data->smu7_data.security_hard_key ^= 1;
result = polaris10_start_smu_in_protection_mode(smumgr); result = polaris10_start_smu_in_protection_mode(smumgr);
} }
} }
@ -905,89 +338,69 @@ static int polaris10_start_smu(struct pp_smumgr *smumgr)
} else } else
SMU_VFT_INTACT = true; /*Driver went offline but SMU was still alive and contains the VFT table */ SMU_VFT_INTACT = true; /*Driver went offline but SMU was still alive and contains the VFT table */
smu_data->post_initial_boot = true;
polaris10_avfs_event_mgr(smumgr, SMU_VFT_INTACT); polaris10_avfs_event_mgr(smumgr, SMU_VFT_INTACT);
/* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */ /* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */
polaris10_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters), smu7_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters),
&(smu_data->soft_regs_start), 0x40000); &(smu_data->smu7_data.soft_regs_start), 0x40000);
result = polaris10_request_smu_load_fw(smumgr); result = smu7_request_smu_load_fw(smumgr);
return result; return result;
} }
static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr)
{
uint32_t efuse;
efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4));
efuse &= 0x00000001;
if (efuse)
return true;
return false;
}
static int polaris10_smu_init(struct pp_smumgr *smumgr) static int polaris10_smu_init(struct pp_smumgr *smumgr)
{ {
struct polaris10_smumgr *smu_data; struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
uint8_t *internal_buf; int i;
uint64_t mc_addr = 0;
/* Allocate memory for backend private data */
smu_data = (struct polaris10_smumgr *)(smumgr->backend);
smu_data->header_buffer.data_size =
((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
smu_data->smu_buffer.data_size = 200*4096;
smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
/* Allocate FW image data structure and header buffer and
* send the header buffer address to SMU */
smu_allocate_memory(smumgr->device,
smu_data->header_buffer.data_size,
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
&mc_addr,
&smu_data->header_buffer.kaddr,
&smu_data->header_buffer.handle);
smu_data->header = smu_data->header_buffer.kaddr; if (smu7_init(smumgr))
smu_data->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); return -EINVAL;
smu_data->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
PP_ASSERT_WITH_CODE((NULL != smu_data->header),
"Out of memory.",
kfree(smumgr->backend);
cgs_free_gpu_mem(smumgr->device,
(cgs_handle_t)smu_data->header_buffer.handle);
return -1);
/* Allocate buffer for SMU internal buffer and send the address to SMU.
* Iceland SMU does not need internal buffer.*/
smu_allocate_memory(smumgr->device,
smu_data->smu_buffer.data_size,
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
&mc_addr,
&smu_data->smu_buffer.kaddr,
&smu_data->smu_buffer.handle);
internal_buf = smu_data->smu_buffer.kaddr;
smu_data->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
smu_data->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
PP_ASSERT_WITH_CODE((NULL != internal_buf),
"Out of memory.",
kfree(smumgr->backend);
cgs_free_gpu_mem(smumgr->device,
(cgs_handle_t)smu_data->smu_buffer.handle);
return -1;);
if (polaris10_is_hw_avfs_present(smumgr)) if (polaris10_is_hw_avfs_present(smumgr))
smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT; smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT;
else else
smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED; smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
for (i = 0; i < SMU74_MAX_LEVELS_GRAPHICS; i++)
smu_data->activity_target[i] = PPPOLARIS10_TARGETACTIVITY_DFLT;
return 0; return 0;
} }
static const struct pp_smumgr_func polaris10_smu_funcs = { static const struct pp_smumgr_func polaris10_smu_funcs = {
.smu_init = polaris10_smu_init, .smu_init = polaris10_smu_init,
.smu_fini = polaris10_smu_fini, .smu_fini = smu7_smu_fini,
.start_smu = polaris10_start_smu, .start_smu = polaris10_start_smu,
.check_fw_load_finish = polaris10_check_fw_load_finish, .check_fw_load_finish = smu7_check_fw_load_finish,
.request_smu_load_fw = polaris10_reload_firmware, .request_smu_load_fw = smu7_reload_firmware,
.request_smu_load_specific_fw = NULL, .request_smu_load_specific_fw = NULL,
.send_msg_to_smc = polaris10_send_msg_to_smc, .send_msg_to_smc = smu7_send_msg_to_smc,
.send_msg_to_smc_with_parameter = polaris10_send_msg_to_smc_with_parameter, .send_msg_to_smc_with_parameter = smu7_send_msg_to_smc_with_parameter,
.download_pptable_settings = NULL, .download_pptable_settings = NULL,
.upload_pptable_settings = NULL, .upload_pptable_settings = NULL,
.update_smc_table = polaris10_update_smc_table,
.get_offsetof = polaris10_get_offsetof,
.process_firmware_header = polaris10_process_firmware_header,
.init_smc_table = polaris10_init_smc_table,
.update_sclk_threshold = polaris10_update_sclk_threshold,
.thermal_avfs_enable = polaris10_thermal_avfs_enable,
.thermal_setup_fan_table = polaris10_thermal_setup_fan_table,
.populate_all_graphic_levels = polaris10_populate_all_graphic_levels,
.populate_all_memory_levels = polaris10_populate_all_memory_levels,
.get_mac_definition = polaris10_get_mac_definition,
.is_dpm_running = polaris10_is_dpm_running,
}; };
int polaris10_smum_init(struct pp_smumgr *smumgr) int polaris10_smum_init(struct pp_smumgr *smumgr)
@ -997,7 +410,7 @@ int polaris10_smum_init(struct pp_smumgr *smumgr)
polaris10_smu = kzalloc(sizeof(struct polaris10_smumgr), GFP_KERNEL); polaris10_smu = kzalloc(sizeof(struct polaris10_smumgr), GFP_KERNEL);
if (polaris10_smu == NULL) if (polaris10_smu == NULL)
return -1; return -EINVAL;
smumgr->backend = polaris10_smu; smumgr->backend = polaris10_smu;
smumgr->smumgr_funcs = &polaris10_smu_funcs; smumgr->smumgr_funcs = &polaris10_smu_funcs;

View File

@ -24,9 +24,13 @@
#ifndef _POLARIS10_SMUMANAGER_H #ifndef _POLARIS10_SMUMANAGER_H
#define _POLARIS10_SMUMANAGER_H #define _POLARIS10_SMUMANAGER_H
#include <polaris10_ppsmc.h>
#include <pp_endian.h> #include <pp_endian.h>
#include "smu74.h" #include "smu74.h"
#include "smu74_discrete.h"
#include "smu7_smumgr.h"
#define SMC_RAM_END 0x40000
struct polaris10_avfs { struct polaris10_avfs {
enum AVFS_BTC_STATUS avfs_btc_status; enum AVFS_BTC_STATUS avfs_btc_status;
@ -47,13 +51,7 @@ struct polaris10_pt_defaults {
uint16_t BAPMTI_RC[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS]; uint16_t BAPMTI_RC[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
}; };
struct polaris10_buffer_entry {
uint32_t data_size;
uint32_t mc_addr_low;
uint32_t mc_addr_high;
void *kaddr;
unsigned long handle;
};
struct polaris10_range_table { struct polaris10_range_table {
uint32_t trans_lower_frequency; /* in 10khz */ uint32_t trans_lower_frequency; /* in 10khz */
@ -61,28 +59,17 @@ struct polaris10_range_table {
}; };
struct polaris10_smumgr { struct polaris10_smumgr {
uint8_t *header; struct smu7_smumgr smu7_data;
uint8_t *mec_image;
struct polaris10_buffer_entry smu_buffer;
struct polaris10_buffer_entry header_buffer;
uint32_t soft_regs_start;
uint8_t *read_rrm_straps;
uint32_t read_drm_straps_mc_address_high;
uint32_t read_drm_straps_mc_address_low;
uint32_t acpi_optimization;
bool post_initial_boot;
uint8_t protected_mode; uint8_t protected_mode;
uint8_t security_hard_key;
struct polaris10_avfs avfs; struct polaris10_avfs avfs;
SMU74_Discrete_DpmTable smc_state_table;
struct SMU74_Discrete_Ulv ulv_setting;
struct SMU74_Discrete_PmFuses power_tune_table;
struct polaris10_range_table range_table[NUM_SCLK_RANGE];
const struct polaris10_pt_defaults *power_tune_defaults;
uint32_t activity_target[SMU74_MAX_LEVELS_GRAPHICS];
uint32_t bif_sclk_table[SMU74_MAX_LEVELS_LINK];
}; };
int polaris10_smum_init(struct pp_smumgr *smumgr);
int polaris10_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit);
int polaris10_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit);
int polaris10_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
const uint8_t *src, uint32_t byte_count, uint32_t limit);
#endif #endif

View File

@ -0,0 +1,589 @@
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "smumgr.h"
#include "smu_ucode_xfer_vi.h"
#include "smu/smu_7_1_3_d.h"
#include "smu/smu_7_1_3_sh_mask.h"
#include "ppatomctrl.h"
#include "pp_debug.h"
#include "cgs_common.h"
#include "smu7_ppsmc.h"
#include "smu7_smumgr.h"
#define SMU7_SMC_SIZE 0x20000
static int smu7_set_smc_sram_address(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t limit)
{
PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), "SMC address must be 4 byte aligned.", return -EINVAL);
PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL);
cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, smc_addr);
SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); /* on ci, SMC_IND_ACCESS_CNTL is different */
return 0;
}
int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit)
{
uint32_t data;
uint32_t addr;
uint8_t *dest_byte;
uint8_t i, data_byte[4] = {0};
uint32_t *pdata = (uint32_t *)&data_byte;
PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL);
PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL);
addr = smc_start_address;
while (byte_count >= 4) {
smu7_read_smc_sram_dword(smumgr, addr, &data, limit);
*dest = PP_SMC_TO_HOST_UL(data);
dest += 1;
byte_count -= 4;
addr += 4;
}
if (byte_count) {
smu7_read_smc_sram_dword(smumgr, addr, &data, limit);
*pdata = PP_SMC_TO_HOST_UL(data);
/* Cast dest into byte type in dest_byte. This way, we don't overflow if the allocated memory is not 4-byte aligned. */
dest_byte = (uint8_t *)dest;
for (i = 0; i < byte_count; i++)
dest_byte[i] = data_byte[i];
}
return 0;
}
int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
const uint8_t *src, uint32_t byte_count, uint32_t limit)
{
int result;
uint32_t data = 0;
uint32_t original_data;
uint32_t addr = 0;
uint32_t extra_shift;
PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL);
PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL);
addr = smc_start_address;
while (byte_count >= 4) {
/* Bytes are written into the SMC addres space with the MSB first. */
data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
result = smu7_set_smc_sram_address(smumgr, addr, limit);
if (0 != result)
return result;
cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
src += 4;
byte_count -= 4;
addr += 4;
}
if (0 != byte_count) {
data = 0;
result = smu7_set_smc_sram_address(smumgr, addr, limit);
if (0 != result)
return result;
original_data = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
extra_shift = 8 * (4 - byte_count);
while (byte_count > 0) {
/* Bytes are written into the SMC addres space with the MSB first. */
data = (0x100 * data) + *src++;
byte_count--;
}
data <<= extra_shift;
data |= (original_data & ~((~0UL) << extra_shift));
result = smu7_set_smc_sram_address(smumgr, addr, limit);
if (0 != result)
return result;
cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
}
return 0;
}
int smu7_program_jump_on_start(struct pp_smumgr *smumgr)
{
static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
smu7_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data)+1);
return 0;
}
bool smu7_is_smc_ram_running(struct pp_smumgr *smumgr)
{
return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
&& (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
}
int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
{
int ret;
if (!smu7_is_smc_ram_running(smumgr))
return -EINVAL;
SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
if (ret != 1)
printk("\n failed to send pre message %x ret is %d \n", msg, ret);
cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
if (ret != 1)
printk("\n failed to send message %x ret is %d \n", msg, ret);
return 0;
}
int smu7_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg)
{
cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
return 0;
}
int smu7_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
{
if (!smu7_is_smc_ram_running(smumgr)) {
return -EINVAL;
}
SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
return smu7_send_msg_to_smc(smumgr, msg);
}
int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
{
cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
return smu7_send_msg_to_smc_without_waiting(smumgr, msg);
}
int smu7_send_msg_to_smc_offset(struct pp_smumgr *smumgr)
{
cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000);
cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP))
printk("Failed to send Message.\n");
return 0;
}
int smu7_wait_for_smc_inactive(struct pp_smumgr *smumgr)
{
if (!smu7_is_smc_ram_running(smumgr))
return -EINVAL;
SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0);
return 0;
}
enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type)
{
enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
switch (fw_type) {
case UCODE_ID_SMU:
result = CGS_UCODE_ID_SMU;
break;
case UCODE_ID_SMU_SK:
result = CGS_UCODE_ID_SMU_SK;
break;
case UCODE_ID_SDMA0:
result = CGS_UCODE_ID_SDMA0;
break;
case UCODE_ID_SDMA1:
result = CGS_UCODE_ID_SDMA1;
break;
case UCODE_ID_CP_CE:
result = CGS_UCODE_ID_CP_CE;
break;
case UCODE_ID_CP_PFP:
result = CGS_UCODE_ID_CP_PFP;
break;
case UCODE_ID_CP_ME:
result = CGS_UCODE_ID_CP_ME;
break;
case UCODE_ID_CP_MEC:
result = CGS_UCODE_ID_CP_MEC;
break;
case UCODE_ID_CP_MEC_JT1:
result = CGS_UCODE_ID_CP_MEC_JT1;
break;
case UCODE_ID_CP_MEC_JT2:
result = CGS_UCODE_ID_CP_MEC_JT2;
break;
case UCODE_ID_RLC_G:
result = CGS_UCODE_ID_RLC_G;
break;
default:
break;
}
return result;
}
int smu7_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit)
{
int result;
result = smu7_set_smc_sram_address(smumgr, smc_addr, limit);
if (result)
return result;
*value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
return 0;
}
int smu7_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit)
{
int result;
result = smu7_set_smc_sram_address(smumgr, smc_addr, limit);
if (result)
return result;
cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, value);
return 0;
}
/* Convert the firmware type to SMU type mask. For MEC, we need to check all MEC related type */
static uint32_t smu7_get_mask_for_firmware_type(uint32_t fw_type)
{
uint32_t result = 0;
switch (fw_type) {
case UCODE_ID_SDMA0:
result = UCODE_ID_SDMA0_MASK;
break;
case UCODE_ID_SDMA1:
result = UCODE_ID_SDMA1_MASK;
break;
case UCODE_ID_CP_CE:
result = UCODE_ID_CP_CE_MASK;
break;
case UCODE_ID_CP_PFP:
result = UCODE_ID_CP_PFP_MASK;
break;
case UCODE_ID_CP_ME:
result = UCODE_ID_CP_ME_MASK;
break;
case UCODE_ID_CP_MEC:
case UCODE_ID_CP_MEC_JT1:
case UCODE_ID_CP_MEC_JT2:
result = UCODE_ID_CP_MEC_MASK;
break;
case UCODE_ID_RLC_G:
result = UCODE_ID_RLC_G_MASK;
break;
default:
printk("UCode type is out of range! \n");
result = 0;
}
return result;
}
static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr,
uint32_t fw_type,
struct SMU_Entry *entry)
{
int result = 0;
struct cgs_firmware_info info = {0};
result = cgs_get_firmware_info(smumgr->device,
smu7_convert_fw_type_to_cgs(fw_type),
&info);
if (!result) {
entry->version = info.version;
entry->id = (uint16_t)fw_type;
entry->image_addr_high = smu_upper_32_bits(info.mc_addr);
entry->image_addr_low = smu_lower_32_bits(info.mc_addr);
entry->meta_data_addr_high = 0;
entry->meta_data_addr_low = 0;
entry->data_size_byte = info.image_size;
entry->num_register_entries = 0;
}
if (fw_type == UCODE_ID_RLC_G)
entry->flags = 1;
else
entry->flags = 0;
return 0;
}
int smu7_request_smu_load_fw(struct pp_smumgr *smumgr)
{
struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
uint32_t fw_to_load;
int result = 0;
struct SMU_DRAMData_TOC *toc;
if (!smumgr->reload_fw) {
printk(KERN_INFO "[ powerplay ] skip reloading...\n");
return 0;
}
if (smu_data->soft_regs_start)
cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
smu_data->soft_regs_start + smum_get_offsetof(smumgr,
SMU_SoftRegisters, UcodeLoadStatus),
0x0);
if (smumgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */
smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_HI, smu_data->smu_buffer.mc_addr_high);
smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_LO, smu_data->smu_buffer.mc_addr_low);
fw_to_load = UCODE_ID_RLC_G_MASK
+ UCODE_ID_SDMA0_MASK
+ UCODE_ID_SDMA1_MASK
+ UCODE_ID_CP_CE_MASK
+ UCODE_ID_CP_ME_MASK
+ UCODE_ID_CP_PFP_MASK
+ UCODE_ID_CP_MEC_MASK;
} else {
fw_to_load = UCODE_ID_RLC_G_MASK
+ UCODE_ID_SDMA0_MASK
+ UCODE_ID_SDMA1_MASK
+ UCODE_ID_CP_CE_MASK
+ UCODE_ID_CP_ME_MASK
+ UCODE_ID_CP_PFP_MASK
+ UCODE_ID_CP_MEC_MASK
+ UCODE_ID_CP_MEC_JT1_MASK
+ UCODE_ID_CP_MEC_JT2_MASK;
}
toc = (struct SMU_DRAMData_TOC *)smu_data->header;
toc->num_entries = 0;
toc->structure_version = 1;
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high);
smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
if (smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_LoadUcodes, fw_to_load))
printk(KERN_ERR "Fail to Request SMU Load uCode");
return result;
}
/* Check if the FW has been loaded, SMU will not return if loading has not finished. */
int smu7_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type)
{
struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
uint32_t fw_mask = smu7_get_mask_for_firmware_type(fw_type);
uint32_t ret;
ret = smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX_11,
smu_data->soft_regs_start + smum_get_offsetof(smumgr,
SMU_SoftRegisters, UcodeLoadStatus),
fw_mask, fw_mask);
return ret;
}
int smu7_reload_firmware(struct pp_smumgr *smumgr)
{
return smumgr->smumgr_funcs->start_smu(smumgr);
}
static int smu7_upload_smc_firmware_data(struct pp_smumgr *smumgr, uint32_t length, uint32_t *src, uint32_t limit)
{
uint32_t byte_count = length;
PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL);
cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, 0x20000);
SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1);
for (; byte_count >= 4; byte_count -= 4)
cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, *src++);
SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL);
return 0;
}
int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr)
{
int result = 0;
struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
struct cgs_firmware_info info = {0};
if (smu_data->security_hard_key == 1)
cgs_get_firmware_info(smumgr->device,
smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
else
cgs_get_firmware_info(smumgr->device,
smu7_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info);
result = smu7_upload_smc_firmware_data(smumgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE);
return result;
}
int smu7_init(struct pp_smumgr *smumgr)
{
struct smu7_smumgr *smu_data;
uint8_t *internal_buf;
uint64_t mc_addr = 0;
/* Allocate memory for backend private data */
smu_data = (struct smu7_smumgr *)(smumgr->backend);
smu_data->header_buffer.data_size =
((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
smu_data->smu_buffer.data_size = 200*4096;
/* Allocate FW image data structure and header buffer and
* send the header buffer address to SMU */
smu_allocate_memory(smumgr->device,
smu_data->header_buffer.data_size,
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
&mc_addr,
&smu_data->header_buffer.kaddr,
&smu_data->header_buffer.handle);
smu_data->header = smu_data->header_buffer.kaddr;
smu_data->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
smu_data->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
PP_ASSERT_WITH_CODE((NULL != smu_data->header),
"Out of memory.",
kfree(smumgr->backend);
cgs_free_gpu_mem(smumgr->device,
(cgs_handle_t)smu_data->header_buffer.handle);
return -EINVAL);
smu_allocate_memory(smumgr->device,
smu_data->smu_buffer.data_size,
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
&mc_addr,
&smu_data->smu_buffer.kaddr,
&smu_data->smu_buffer.handle);
internal_buf = smu_data->smu_buffer.kaddr;
smu_data->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
smu_data->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
PP_ASSERT_WITH_CODE((NULL != internal_buf),
"Out of memory.",
kfree(smumgr->backend);
cgs_free_gpu_mem(smumgr->device,
(cgs_handle_t)smu_data->smu_buffer.handle);
return -EINVAL);
return 0;
}
int smu7_smu_fini(struct pp_smumgr *smumgr)
{
if (smumgr->backend) {
kfree(smumgr->backend);
smumgr->backend = NULL;
}
cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
return 0;
}

View File

@ -0,0 +1,87 @@
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef _SMU7_SMUMANAGER_H
#define _SMU7_SMUMANAGER_H
#include <pp_endian.h>
#define SMC_RAM_END 0x40000
#define mmSMC_IND_INDEX_11 0x01AC
#define mmSMC_IND_DATA_11 0x01AD
struct smu7_buffer_entry {
uint32_t data_size;
uint32_t mc_addr_low;
uint32_t mc_addr_high;
void *kaddr;
unsigned long handle;
};
struct smu7_smumgr {
uint8_t *header;
uint8_t *mec_image;
struct smu7_buffer_entry smu_buffer;
struct smu7_buffer_entry header_buffer;
uint32_t soft_regs_start;
uint32_t dpm_table_start;
uint32_t mc_reg_table_start;
uint32_t fan_table_start;
uint32_t arb_table_start;
uint32_t ulv_setting_starts;
uint8_t security_hard_key;
uint32_t acpi_optimization;
};
int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
uint32_t *dest, uint32_t byte_count, uint32_t limit);
int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
const uint8_t *src, uint32_t byte_count, uint32_t limit);
int smu7_program_jump_on_start(struct pp_smumgr *smumgr);
bool smu7_is_smc_ram_running(struct pp_smumgr *smumgr);
int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg);
int smu7_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg);
int smu7_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg,
uint32_t parameter);
int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr,
uint16_t msg, uint32_t parameter);
int smu7_send_msg_to_smc_offset(struct pp_smumgr *smumgr);
int smu7_wait_for_smc_inactive(struct pp_smumgr *smumgr);
enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type);
int smu7_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
uint32_t *value, uint32_t limit);
int smu7_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
uint32_t value, uint32_t limit);
int smu7_request_smu_load_fw(struct pp_smumgr *smumgr);
int smu7_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type);
int smu7_reload_firmware(struct pp_smumgr *smumgr);
int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr);
int smu7_init(struct pp_smumgr *smumgr);
int smu7_smu_fini(struct pp_smumgr *smumgr);
#endif

View File

@ -86,6 +86,57 @@ int smum_fini(struct pp_smumgr *smumgr)
return 0; return 0;
} }
int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result)
{
if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable)
return hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable(hwmgr);
return 0;
}
int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result)
{
if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table)
return hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table(hwmgr);
return 0;
}
int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr)
{
if (NULL != hwmgr->smumgr->smumgr_funcs->update_sclk_threshold)
return hwmgr->smumgr->smumgr_funcs->update_sclk_threshold(hwmgr);
return 0;
}
int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
{
if (NULL != hwmgr->smumgr->smumgr_funcs->update_smc_table)
return hwmgr->smumgr->smumgr_funcs->update_smc_table(hwmgr, type);
return 0;
}
uint32_t smum_get_offsetof(struct pp_smumgr *smumgr, uint32_t type, uint32_t member)
{
if (NULL != smumgr->smumgr_funcs->get_offsetof)
return smumgr->smumgr_funcs->get_offsetof(type, member);
return 0;
}
int smum_process_firmware_header(struct pp_hwmgr *hwmgr)
{
if (NULL != hwmgr->smumgr->smumgr_funcs->process_firmware_header)
return hwmgr->smumgr->smumgr_funcs->process_firmware_header(hwmgr);
return 0;
}
int smum_get_argument(struct pp_smumgr *smumgr) int smum_get_argument(struct pp_smumgr *smumgr)
{ {
if (NULL != smumgr->smumgr_funcs->get_argument) if (NULL != smumgr->smumgr_funcs->get_argument)
@ -94,13 +145,20 @@ int smum_get_argument(struct pp_smumgr *smumgr)
return 0; return 0;
} }
uint32_t smum_get_mac_definition(struct pp_smumgr *smumgr, uint32_t value)
{
if (NULL != smumgr->smumgr_funcs->get_mac_definition)
return smumgr->smumgr_funcs->get_mac_definition(value);
return 0;
}
int smum_download_powerplay_table(struct pp_smumgr *smumgr, int smum_download_powerplay_table(struct pp_smumgr *smumgr,
void **table) void **table)
{ {
if (NULL != smumgr->smumgr_funcs->download_pptable_settings) if (NULL != smumgr->smumgr_funcs->download_pptable_settings)
return smumgr->smumgr_funcs->download_pptable_settings(smumgr, return smumgr->smumgr_funcs->download_pptable_settings(smumgr,
table); table);
return 0; return 0;
} }
@ -267,3 +325,44 @@ int smu_free_memory(void *device, void *handle)
return 0; return 0;
} }
int smum_init_smc_table(struct pp_hwmgr *hwmgr)
{
if (NULL != hwmgr->smumgr->smumgr_funcs->init_smc_table)
return hwmgr->smumgr->smumgr_funcs->init_smc_table(hwmgr);
return 0;
}
int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
{
if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels)
return hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels(hwmgr);
return 0;
}
int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
{
if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels)
return hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels(hwmgr);
return 0;
}
/*this interface is needed by island ci/vi */
int smum_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
{
if (NULL != hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table)
return hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table(hwmgr);
return 0;
}
bool smum_is_dpm_running(struct pp_hwmgr *hwmgr)
{
if (NULL != hwmgr->smumgr->smumgr_funcs->is_dpm_running)
return hwmgr->smumgr->smumgr_funcs->is_dpm_running(hwmgr);
return true;
}

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More