drm/amd/pp: Delete the wrapper layer of smu_allocate/free_memory
use amdgpu_bo_create/free_kernel instand. Reviewed-by: Alex Deucher <alexdeucher@amd.com> Acked-by: Christian König <christian.koenig@amd.com> Signed-off-by: Rex Zhu <Rex.Zhu@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
807f93ac6a
commit
ecc124b035
|
@ -106,13 +106,6 @@ extern int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg);
|
|||
extern int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
|
||||
uint16_t msg, uint32_t parameter);
|
||||
|
||||
extern int smu_allocate_memory(void *device, uint32_t size,
|
||||
enum cgs_gpu_mem_type type,
|
||||
uint32_t byte_align, uint64_t *mc_addr,
|
||||
void **kptr, void *handle);
|
||||
|
||||
extern int smu_free_memory(void *device, void *handle);
|
||||
|
||||
extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr);
|
||||
|
||||
extern int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
|
||||
|
|
|
@ -347,8 +347,8 @@ static int cz_smu_populate_single_scratch_task(
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
task->addr.low = cz_smu->scratch_buffer[i].mc_addr_low;
|
||||
task->addr.high = cz_smu->scratch_buffer[i].mc_addr_high;
|
||||
task->addr.low = smu_lower_32_bits(cz_smu->scratch_buffer[i].mc_addr);
|
||||
task->addr.high = smu_upper_32_bits(cz_smu->scratch_buffer[i].mc_addr);
|
||||
task->size_bytes = cz_smu->scratch_buffer[i].data_size;
|
||||
|
||||
if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == fw_enum) {
|
||||
|
@ -384,8 +384,8 @@ static int cz_smu_populate_single_ucode_load_task(
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
task->addr.low = cz_smu->driver_buffer[i].mc_addr_low;
|
||||
task->addr.high = cz_smu->driver_buffer[i].mc_addr_high;
|
||||
task->addr.low = smu_lower_32_bits(cz_smu->driver_buffer[i].mc_addr);
|
||||
task->addr.high = smu_upper_32_bits(cz_smu->driver_buffer[i].mc_addr);
|
||||
task->size_bytes = cz_smu->driver_buffer[i].data_size;
|
||||
|
||||
return 0;
|
||||
|
@ -566,11 +566,7 @@ static int cz_smu_populate_firmware_entries(struct pp_hwmgr *hwmgr)
|
|||
ucode_id, &info);
|
||||
|
||||
if (ret == 0) {
|
||||
cz_smu->driver_buffer[i].mc_addr_high =
|
||||
smu_upper_32_bits(info.mc_addr);
|
||||
|
||||
cz_smu->driver_buffer[i].mc_addr_low =
|
||||
smu_lower_32_bits(info.mc_addr);
|
||||
cz_smu->driver_buffer[i].mc_addr = info.mc_addr;
|
||||
|
||||
cz_smu->driver_buffer[i].data_size = info.image_size;
|
||||
|
||||
|
@ -589,19 +585,12 @@ static int cz_smu_populate_single_scratch_entry(
|
|||
struct cz_buffer_entry *entry)
|
||||
{
|
||||
struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
|
||||
long long mc_addr =
|
||||
((long long)(cz_smu->smu_buffer.mc_addr_high) << 32)
|
||||
| cz_smu->smu_buffer.mc_addr_low;
|
||||
|
||||
uint32_t ulsize_aligned = SIZE_ALIGN_32(ulsize_byte);
|
||||
|
||||
mc_addr += cz_smu->smu_buffer_used_bytes;
|
||||
|
||||
entry->data_size = ulsize_byte;
|
||||
entry->kaddr = (char *) cz_smu->smu_buffer.kaddr +
|
||||
cz_smu->smu_buffer_used_bytes;
|
||||
entry->mc_addr_low = smu_lower_32_bits(mc_addr);
|
||||
entry->mc_addr_high = smu_upper_32_bits(mc_addr);
|
||||
entry->mc_addr = cz_smu->smu_buffer.mc_addr + cz_smu->smu_buffer_used_bytes;
|
||||
entry->firmware_ID = scratch_type;
|
||||
|
||||
cz_smu->smu_buffer_used_bytes += ulsize_aligned;
|
||||
|
@ -624,11 +613,11 @@ static int cz_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table)
|
|||
|
||||
cz_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_SetClkTableAddrHi,
|
||||
cz_smu->scratch_buffer[i].mc_addr_high);
|
||||
smu_upper_32_bits(cz_smu->scratch_buffer[i].mc_addr));
|
||||
|
||||
cz_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_SetClkTableAddrLo,
|
||||
cz_smu->scratch_buffer[i].mc_addr_low);
|
||||
smu_lower_32_bits(cz_smu->scratch_buffer[i].mc_addr));
|
||||
|
||||
cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
|
||||
cz_smu->toc_entry_clock_table);
|
||||
|
@ -651,11 +640,11 @@ static int cz_upload_pptable_settings(struct pp_hwmgr *hwmgr)
|
|||
|
||||
cz_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_SetClkTableAddrHi,
|
||||
cz_smu->scratch_buffer[i].mc_addr_high);
|
||||
smu_upper_32_bits(cz_smu->scratch_buffer[i].mc_addr));
|
||||
|
||||
cz_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_SetClkTableAddrLo,
|
||||
cz_smu->scratch_buffer[i].mc_addr_low);
|
||||
smu_lower_32_bits(cz_smu->scratch_buffer[i].mc_addr));
|
||||
|
||||
cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
|
||||
cz_smu->toc_entry_clock_table);
|
||||
|
@ -686,11 +675,11 @@ static int cz_request_smu_load_fw(struct pp_hwmgr *hwmgr)
|
|||
|
||||
cz_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_DriverDramAddrHi,
|
||||
cz_smu->toc_buffer.mc_addr_high);
|
||||
smu_upper_32_bits(cz_smu->toc_buffer.mc_addr));
|
||||
|
||||
cz_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_DriverDramAddrLo,
|
||||
cz_smu->toc_buffer.mc_addr_low);
|
||||
smu_lower_32_bits(cz_smu->toc_buffer.mc_addr));
|
||||
|
||||
cz_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs);
|
||||
|
||||
|
@ -750,7 +739,6 @@ static int cz_start_smu(struct pp_hwmgr *hwmgr)
|
|||
|
||||
static int cz_smu_init(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
uint64_t mc_addr = 0;
|
||||
int ret = 0;
|
||||
struct cz_smumgr *cz_smu;
|
||||
|
||||
|
@ -768,31 +756,29 @@ static int cz_smu_init(struct pp_hwmgr *hwmgr)
|
|||
ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
|
||||
ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
|
||||
|
||||
ret = smu_allocate_memory(hwmgr->device,
|
||||
ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
|
||||
cz_smu->toc_buffer.data_size,
|
||||
CGS_GPU_MEM_TYPE__GART_CACHEABLE,
|
||||
PAGE_SIZE,
|
||||
&mc_addr,
|
||||
&cz_smu->toc_buffer.kaddr,
|
||||
&cz_smu->toc_buffer.handle);
|
||||
if (ret != 0)
|
||||
return -1;
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&cz_smu->toc_buffer.handle,
|
||||
&cz_smu->toc_buffer.mc_addr,
|
||||
&cz_smu->toc_buffer.kaddr);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
||||
cz_smu->toc_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
|
||||
cz_smu->toc_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
|
||||
|
||||
ret = smu_allocate_memory(hwmgr->device,
|
||||
ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
|
||||
cz_smu->smu_buffer.data_size,
|
||||
CGS_GPU_MEM_TYPE__GART_CACHEABLE,
|
||||
PAGE_SIZE,
|
||||
&mc_addr,
|
||||
&cz_smu->smu_buffer.kaddr,
|
||||
&cz_smu->smu_buffer.handle);
|
||||
if (ret != 0)
|
||||
return -1;
|
||||
|
||||
cz_smu->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
|
||||
cz_smu->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&cz_smu->smu_buffer.handle,
|
||||
&cz_smu->smu_buffer.mc_addr,
|
||||
&cz_smu->smu_buffer.kaddr);
|
||||
if (ret) {
|
||||
amdgpu_bo_free_kernel(&cz_smu->toc_buffer.handle,
|
||||
&cz_smu->toc_buffer.mc_addr,
|
||||
&cz_smu->toc_buffer.kaddr);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
|
||||
|
@ -845,10 +831,12 @@ static int cz_smu_fini(struct pp_hwmgr *hwmgr)
|
|||
|
||||
cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
|
||||
if (cz_smu) {
|
||||
cgs_free_gpu_mem(hwmgr->device,
|
||||
cz_smu->toc_buffer.handle);
|
||||
cgs_free_gpu_mem(hwmgr->device,
|
||||
cz_smu->smu_buffer.handle);
|
||||
amdgpu_bo_free_kernel(&cz_smu->toc_buffer.handle,
|
||||
&cz_smu->toc_buffer.mc_addr,
|
||||
&cz_smu->toc_buffer.kaddr);
|
||||
amdgpu_bo_free_kernel(&cz_smu->smu_buffer.handle,
|
||||
&cz_smu->smu_buffer.mc_addr,
|
||||
&cz_smu->smu_buffer.kaddr);
|
||||
kfree(cz_smu);
|
||||
}
|
||||
|
||||
|
|
|
@ -60,11 +60,10 @@ enum cz_scratch_entry {
|
|||
|
||||
struct cz_buffer_entry {
|
||||
uint32_t data_size;
|
||||
uint32_t mc_addr_low;
|
||||
uint32_t mc_addr_high;
|
||||
uint64_t mc_addr;
|
||||
void *kaddr;
|
||||
enum cz_scratch_entry firmware_ID;
|
||||
unsigned long handle; /* as bo handle used when release bo */
|
||||
struct amdgpu_bo *handle; /* as bo handle used when release bo */
|
||||
};
|
||||
|
||||
struct cz_register_index_data_pair {
|
||||
|
|
|
@ -169,11 +169,11 @@ int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr,
|
|||
"Invalid SMU Table Length!", return -EINVAL;);
|
||||
PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_SetDriverDramAddrHigh,
|
||||
priv->smu_tables.entry[table_id].table_addr_high) == 0,
|
||||
smu_upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
|
||||
"[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL;);
|
||||
PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_SetDriverDramAddrLow,
|
||||
priv->smu_tables.entry[table_id].table_addr_low) == 0,
|
||||
smu_lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
|
||||
"[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
|
||||
return -EINVAL;);
|
||||
PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
|
||||
|
@ -182,7 +182,7 @@ int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr,
|
|||
"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
|
||||
return -EINVAL;);
|
||||
|
||||
memcpy(table, priv->smu_tables.entry[table_id].table,
|
||||
memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table,
|
||||
priv->smu_tables.entry[table_id].size);
|
||||
|
||||
return 0;
|
||||
|
@ -206,12 +206,12 @@ int rv_copy_table_to_smc(struct pp_hwmgr *hwmgr,
|
|||
|
||||
PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_SetDriverDramAddrHigh,
|
||||
priv->smu_tables.entry[table_id].table_addr_high) == 0,
|
||||
smu_upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
|
||||
"[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
|
||||
return -EINVAL;);
|
||||
PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_SetDriverDramAddrLow,
|
||||
priv->smu_tables.entry[table_id].table_addr_low) == 0,
|
||||
smu_lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
|
||||
"[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
|
||||
return -EINVAL;);
|
||||
PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
|
||||
|
@ -292,10 +292,12 @@ static int rv_smu_fini(struct pp_hwmgr *hwmgr)
|
|||
if (priv) {
|
||||
rv_smc_disable_sdma(hwmgr);
|
||||
rv_smc_disable_vcn(hwmgr);
|
||||
cgs_free_gpu_mem(hwmgr->device,
|
||||
priv->smu_tables.entry[WMTABLE].handle);
|
||||
cgs_free_gpu_mem(hwmgr->device,
|
||||
priv->smu_tables.entry[CLOCKTABLE].handle);
|
||||
amdgpu_bo_free_kernel(&priv->smu_tables.entry[WMTABLE].handle,
|
||||
&priv->smu_tables.entry[WMTABLE].mc_addr,
|
||||
priv->smu_tables.entry[WMTABLE].table);
|
||||
amdgpu_bo_free_kernel(&priv->smu_tables.entry[CLOCKTABLE].handle,
|
||||
&priv->smu_tables.entry[CLOCKTABLE].mc_addr,
|
||||
priv->smu_tables.entry[CLOCKTABLE].table);
|
||||
kfree(hwmgr->smu_backend);
|
||||
hwmgr->smu_backend = NULL;
|
||||
}
|
||||
|
@ -328,7 +330,8 @@ static int rv_smu_init(struct pp_hwmgr *hwmgr)
|
|||
struct rv_smumgr *priv;
|
||||
uint64_t mc_addr;
|
||||
void *kaddr = NULL;
|
||||
unsigned long handle;
|
||||
struct amdgpu_bo *handle;
|
||||
int r;
|
||||
|
||||
priv = kzalloc(sizeof(struct rv_smumgr), GFP_KERNEL);
|
||||
|
||||
|
@ -338,54 +341,44 @@ static int rv_smu_init(struct pp_hwmgr *hwmgr)
|
|||
hwmgr->smu_backend = priv;
|
||||
|
||||
/* allocate space for watermarks table */
|
||||
smu_allocate_memory(hwmgr->device,
|
||||
r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
|
||||
sizeof(Watermarks_t),
|
||||
CGS_GPU_MEM_TYPE__GART_CACHEABLE,
|
||||
PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&handle,
|
||||
&mc_addr,
|
||||
&kaddr,
|
||||
&handle);
|
||||
&kaddr);
|
||||
|
||||
PP_ASSERT_WITH_CODE(kaddr,
|
||||
"[rv_smu_init] Out of memory for wmtable.",
|
||||
kfree(hwmgr->smu_backend);
|
||||
hwmgr->smu_backend = NULL;
|
||||
return -EINVAL);
|
||||
if (r)
|
||||
return -EINVAL;
|
||||
|
||||
priv->smu_tables.entry[WMTABLE].version = 0x01;
|
||||
priv->smu_tables.entry[WMTABLE].size = sizeof(Watermarks_t);
|
||||
priv->smu_tables.entry[WMTABLE].table_id = TABLE_WATERMARKS;
|
||||
priv->smu_tables.entry[WMTABLE].table_addr_high =
|
||||
smu_upper_32_bits(mc_addr);
|
||||
priv->smu_tables.entry[WMTABLE].table_addr_low =
|
||||
smu_lower_32_bits(mc_addr);
|
||||
priv->smu_tables.entry[WMTABLE].mc_addr = mc_addr;
|
||||
priv->smu_tables.entry[WMTABLE].table = kaddr;
|
||||
priv->smu_tables.entry[WMTABLE].handle = handle;
|
||||
|
||||
/* allocate space for watermarks table */
|
||||
smu_allocate_memory(hwmgr->device,
|
||||
r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
|
||||
sizeof(DpmClocks_t),
|
||||
CGS_GPU_MEM_TYPE__GART_CACHEABLE,
|
||||
PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&handle,
|
||||
&mc_addr,
|
||||
&kaddr,
|
||||
&handle);
|
||||
&kaddr);
|
||||
|
||||
PP_ASSERT_WITH_CODE(kaddr,
|
||||
"[rv_smu_init] Out of memory for CLOCKTABLE.",
|
||||
cgs_free_gpu_mem(hwmgr->device,
|
||||
(cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
|
||||
kfree(hwmgr->smu_backend);
|
||||
hwmgr->smu_backend = NULL;
|
||||
return -EINVAL);
|
||||
if (r) {
|
||||
amdgpu_bo_free_kernel(&priv->smu_tables.entry[WMTABLE].handle,
|
||||
&priv->smu_tables.entry[WMTABLE].mc_addr,
|
||||
&priv->smu_tables.entry[WMTABLE].table);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
priv->smu_tables.entry[CLOCKTABLE].version = 0x01;
|
||||
priv->smu_tables.entry[CLOCKTABLE].size = sizeof(DpmClocks_t);
|
||||
priv->smu_tables.entry[CLOCKTABLE].table_id = TABLE_DPMCLOCKS;
|
||||
priv->smu_tables.entry[CLOCKTABLE].table_addr_high =
|
||||
smu_upper_32_bits(mc_addr);
|
||||
priv->smu_tables.entry[CLOCKTABLE].table_addr_low =
|
||||
smu_lower_32_bits(mc_addr);
|
||||
priv->smu_tables.entry[CLOCKTABLE].mc_addr = mc_addr;
|
||||
priv->smu_tables.entry[CLOCKTABLE].table = kaddr;
|
||||
priv->smu_tables.entry[CLOCKTABLE].handle = handle;
|
||||
|
||||
|
|
|
@ -37,10 +37,9 @@ struct smu_table_entry {
|
|||
uint32_t version;
|
||||
uint32_t size;
|
||||
uint32_t table_id;
|
||||
uint32_t table_addr_high;
|
||||
uint32_t table_addr_low;
|
||||
uint8_t *table;
|
||||
unsigned long handle;
|
||||
uint64_t mc_addr;
|
||||
void *table;
|
||||
struct amdgpu_bo *handle;
|
||||
};
|
||||
|
||||
struct smu_table_array {
|
||||
|
|
|
@ -412,10 +412,10 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
|
|||
if (!cgs_is_virtualization_enabled(hwmgr->device)) {
|
||||
smu7_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_SMU_DRAM_ADDR_HI,
|
||||
smu_data->smu_buffer.mc_addr_high);
|
||||
smu_upper_32_bits(smu_data->smu_buffer.mc_addr));
|
||||
smu7_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_SMU_DRAM_ADDR_LO,
|
||||
smu_data->smu_buffer.mc_addr_low);
|
||||
smu_lower_32_bits(smu_data->smu_buffer.mc_addr));
|
||||
}
|
||||
fw_to_load = UCODE_ID_RLC_G_MASK
|
||||
+ UCODE_ID_SDMA0_MASK
|
||||
|
@ -472,8 +472,8 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
|
|||
UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
|
||||
"Failed to Get Firmware Entry.", return -EINVAL);
|
||||
|
||||
smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high);
|
||||
smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
|
||||
smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_upper_32_bits(smu_data->header_buffer.mc_addr));
|
||||
smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_lower_32_bits(smu_data->header_buffer.mc_addr));
|
||||
|
||||
if (smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load))
|
||||
pr_err("Fail to Request SMU Load uCode");
|
||||
|
@ -587,7 +587,7 @@ int smu7_init(struct pp_hwmgr *hwmgr)
|
|||
struct smu7_smumgr *smu_data;
|
||||
uint8_t *internal_buf;
|
||||
uint64_t mc_addr = 0;
|
||||
|
||||
int r;
|
||||
/* Allocate memory for backend private data */
|
||||
smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
|
||||
smu_data->header_buffer.data_size =
|
||||
|
@ -595,47 +595,40 @@ int smu7_init(struct pp_hwmgr *hwmgr)
|
|||
|
||||
/* Allocate FW image data structure and header buffer and
|
||||
* send the header buffer address to SMU */
|
||||
smu_allocate_memory(hwmgr->device,
|
||||
r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
|
||||
smu_data->header_buffer.data_size,
|
||||
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
|
||||
PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&smu_data->header_buffer.handle,
|
||||
&mc_addr,
|
||||
&smu_data->header_buffer.kaddr,
|
||||
&smu_data->header_buffer.handle);
|
||||
&smu_data->header_buffer.kaddr);
|
||||
|
||||
if (r)
|
||||
return -EINVAL;
|
||||
|
||||
smu_data->header = smu_data->header_buffer.kaddr;
|
||||
smu_data->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
|
||||
smu_data->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
|
||||
|
||||
PP_ASSERT_WITH_CODE((NULL != smu_data->header),
|
||||
"Out of memory.",
|
||||
kfree(hwmgr->smu_backend);
|
||||
cgs_free_gpu_mem(hwmgr->device,
|
||||
(cgs_handle_t)smu_data->header_buffer.handle);
|
||||
return -EINVAL);
|
||||
smu_data->header_buffer.mc_addr = mc_addr;
|
||||
|
||||
if (cgs_is_virtualization_enabled(hwmgr->device))
|
||||
return 0;
|
||||
|
||||
smu_data->smu_buffer.data_size = 200*4096;
|
||||
smu_allocate_memory(hwmgr->device,
|
||||
r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
|
||||
smu_data->smu_buffer.data_size,
|
||||
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
|
||||
PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&smu_data->smu_buffer.handle,
|
||||
&mc_addr,
|
||||
&smu_data->smu_buffer.kaddr,
|
||||
&smu_data->smu_buffer.handle);
|
||||
&smu_data->smu_buffer.kaddr);
|
||||
|
||||
if (r) {
|
||||
amdgpu_bo_free_kernel(&smu_data->header_buffer.handle,
|
||||
&smu_data->header_buffer.mc_addr,
|
||||
&smu_data->header_buffer.kaddr);
|
||||
return -EINVAL;
|
||||
}
|
||||
internal_buf = smu_data->smu_buffer.kaddr;
|
||||
smu_data->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
|
||||
smu_data->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
|
||||
|
||||
PP_ASSERT_WITH_CODE((NULL != internal_buf),
|
||||
"Out of memory.",
|
||||
kfree(hwmgr->smu_backend);
|
||||
cgs_free_gpu_mem(hwmgr->device,
|
||||
(cgs_handle_t)smu_data->smu_buffer.handle);
|
||||
return -EINVAL);
|
||||
smu_data->smu_buffer.mc_addr = mc_addr;
|
||||
|
||||
if (smum_is_hw_avfs_present(hwmgr))
|
||||
smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT;
|
||||
|
@ -650,9 +643,14 @@ int smu7_smu_fini(struct pp_hwmgr *hwmgr)
|
|||
{
|
||||
struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
|
||||
|
||||
smu_free_memory(hwmgr->device, (void *) smu_data->header_buffer.handle);
|
||||
amdgpu_bo_free_kernel(&smu_data->header_buffer.handle,
|
||||
&smu_data->header_buffer.mc_addr,
|
||||
&smu_data->header_buffer.kaddr);
|
||||
|
||||
if (!cgs_is_virtualization_enabled(hwmgr->device))
|
||||
smu_free_memory(hwmgr->device, (void *) smu_data->smu_buffer.handle);
|
||||
amdgpu_bo_free_kernel(&smu_data->smu_buffer.handle,
|
||||
&smu_data->smu_buffer.mc_addr,
|
||||
&smu_data->smu_buffer.kaddr);
|
||||
|
||||
kfree(hwmgr->smu_backend);
|
||||
hwmgr->smu_backend = NULL;
|
||||
|
|
|
@ -31,10 +31,9 @@
|
|||
|
||||
struct smu7_buffer_entry {
|
||||
uint32_t data_size;
|
||||
uint32_t mc_addr_low;
|
||||
uint32_t mc_addr_high;
|
||||
uint64_t mc_addr;
|
||||
void *kaddr;
|
||||
unsigned long handle;
|
||||
struct amdgpu_bo *handle;
|
||||
};
|
||||
|
||||
struct smu7_avfs {
|
||||
|
|
|
@ -144,57 +144,6 @@ int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
|
|||
hwmgr, msg, parameter);
|
||||
}
|
||||
|
||||
int smu_allocate_memory(void *device, uint32_t size,
|
||||
enum cgs_gpu_mem_type type,
|
||||
uint32_t byte_align, uint64_t *mc_addr,
|
||||
void **kptr, void *handle)
|
||||
{
|
||||
int ret = 0;
|
||||
cgs_handle_t cgs_handle;
|
||||
|
||||
if (device == NULL || handle == NULL ||
|
||||
mc_addr == NULL || kptr == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
ret = cgs_alloc_gpu_mem(device, type, size, byte_align,
|
||||
(cgs_handle_t *)handle);
|
||||
if (ret)
|
||||
return -ENOMEM;
|
||||
|
||||
cgs_handle = *(cgs_handle_t *)handle;
|
||||
|
||||
ret = cgs_gmap_gpu_mem(device, cgs_handle, mc_addr);
|
||||
if (ret)
|
||||
goto error_gmap;
|
||||
|
||||
ret = cgs_kmap_gpu_mem(device, cgs_handle, kptr);
|
||||
if (ret)
|
||||
goto error_kmap;
|
||||
|
||||
return 0;
|
||||
|
||||
error_kmap:
|
||||
cgs_gunmap_gpu_mem(device, cgs_handle);
|
||||
|
||||
error_gmap:
|
||||
cgs_free_gpu_mem(device, cgs_handle);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int smu_free_memory(void *device, void *handle)
|
||||
{
|
||||
cgs_handle_t cgs_handle = (cgs_handle_t)handle;
|
||||
|
||||
if (device == NULL || handle == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
cgs_kunmap_gpu_mem(device, cgs_handle);
|
||||
cgs_gunmap_gpu_mem(device, cgs_handle);
|
||||
cgs_free_gpu_mem(device, cgs_handle);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int smum_init_smc_table(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
if (NULL != hwmgr->smumgr_funcs->init_smc_table)
|
||||
|
|
|
@ -230,10 +230,10 @@ int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
|
|||
"Invalid SMU Table Length!", return -EINVAL);
|
||||
vega10_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_SetDriverDramAddrHigh,
|
||||
priv->smu_tables.entry[table_id].table_addr_high);
|
||||
smu_upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
|
||||
vega10_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_SetDriverDramAddrLow,
|
||||
priv->smu_tables.entry[table_id].table_addr_low);
|
||||
smu_lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
|
||||
vega10_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_TransferTableSmu2Dram,
|
||||
priv->smu_tables.entry[table_id].table_id);
|
||||
|
@ -267,10 +267,10 @@ int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
|
|||
|
||||
vega10_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_SetDriverDramAddrHigh,
|
||||
priv->smu_tables.entry[table_id].table_addr_high);
|
||||
smu_upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
|
||||
vega10_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_SetDriverDramAddrLow,
|
||||
priv->smu_tables.entry[table_id].table_addr_low);
|
||||
smu_lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
|
||||
vega10_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_TransferTableDram2Smu,
|
||||
priv->smu_tables.entry[table_id].table_id);
|
||||
|
@ -334,14 +334,13 @@ int vega10_set_tools_address(struct pp_hwmgr *hwmgr)
|
|||
struct vega10_smumgr *priv =
|
||||
(struct vega10_smumgr *)(hwmgr->smu_backend);
|
||||
|
||||
if (priv->smu_tables.entry[TOOLSTABLE].table_addr_high ||
|
||||
priv->smu_tables.entry[TOOLSTABLE].table_addr_low) {
|
||||
if (priv->smu_tables.entry[TOOLSTABLE].mc_addr) {
|
||||
vega10_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_SetToolsDramAddrHigh,
|
||||
priv->smu_tables.entry[TOOLSTABLE].table_addr_high);
|
||||
smu_upper_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr));
|
||||
vega10_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_SetToolsDramAddrLow,
|
||||
priv->smu_tables.entry[TOOLSTABLE].table_addr_low);
|
||||
smu_lower_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -381,7 +380,8 @@ static int vega10_smu_init(struct pp_hwmgr *hwmgr)
|
|||
struct vega10_smumgr *priv;
|
||||
uint64_t mc_addr;
|
||||
void *kaddr = NULL;
|
||||
unsigned long handle, tools_size;
|
||||
unsigned long tools_size;
|
||||
struct amdgpu_bo *handle;
|
||||
int ret;
|
||||
struct cgs_firmware_info info = {0};
|
||||
|
||||
|
@ -399,147 +399,119 @@ static int vega10_smu_init(struct pp_hwmgr *hwmgr)
|
|||
hwmgr->smu_backend = priv;
|
||||
|
||||
/* allocate space for pptable */
|
||||
smu_allocate_memory(hwmgr->device,
|
||||
ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
|
||||
sizeof(PPTable_t),
|
||||
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
|
||||
PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&handle,
|
||||
&mc_addr,
|
||||
&kaddr,
|
||||
&handle);
|
||||
&kaddr);
|
||||
|
||||
PP_ASSERT_WITH_CODE(kaddr,
|
||||
"[vega10_smu_init] Out of memory for pptable.",
|
||||
kfree(hwmgr->smu_backend);
|
||||
cgs_free_gpu_mem(hwmgr->device,
|
||||
(cgs_handle_t)handle);
|
||||
return -EINVAL);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
||||
priv->smu_tables.entry[PPTABLE].version = 0x01;
|
||||
priv->smu_tables.entry[PPTABLE].size = sizeof(PPTable_t);
|
||||
priv->smu_tables.entry[PPTABLE].table_id = TABLE_PPTABLE;
|
||||
priv->smu_tables.entry[PPTABLE].table_addr_high =
|
||||
smu_upper_32_bits(mc_addr);
|
||||
priv->smu_tables.entry[PPTABLE].table_addr_low =
|
||||
smu_lower_32_bits(mc_addr);
|
||||
priv->smu_tables.entry[PPTABLE].mc_addr = mc_addr;
|
||||
priv->smu_tables.entry[PPTABLE].table = kaddr;
|
||||
priv->smu_tables.entry[PPTABLE].handle = handle;
|
||||
|
||||
/* allocate space for watermarks table */
|
||||
smu_allocate_memory(hwmgr->device,
|
||||
ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
|
||||
sizeof(Watermarks_t),
|
||||
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
|
||||
PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&handle,
|
||||
&mc_addr,
|
||||
&kaddr,
|
||||
&handle);
|
||||
&kaddr);
|
||||
|
||||
PP_ASSERT_WITH_CODE(kaddr,
|
||||
"[vega10_smu_init] Out of memory for wmtable.",
|
||||
kfree(hwmgr->smu_backend);
|
||||
cgs_free_gpu_mem(hwmgr->device,
|
||||
(cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
|
||||
cgs_free_gpu_mem(hwmgr->device,
|
||||
(cgs_handle_t)handle);
|
||||
return -EINVAL);
|
||||
if (ret)
|
||||
goto err0;
|
||||
|
||||
priv->smu_tables.entry[WMTABLE].version = 0x01;
|
||||
priv->smu_tables.entry[WMTABLE].size = sizeof(Watermarks_t);
|
||||
priv->smu_tables.entry[WMTABLE].table_id = TABLE_WATERMARKS;
|
||||
priv->smu_tables.entry[WMTABLE].table_addr_high =
|
||||
smu_upper_32_bits(mc_addr);
|
||||
priv->smu_tables.entry[WMTABLE].table_addr_low =
|
||||
smu_lower_32_bits(mc_addr);
|
||||
priv->smu_tables.entry[WMTABLE].mc_addr = mc_addr;
|
||||
priv->smu_tables.entry[WMTABLE].table = kaddr;
|
||||
priv->smu_tables.entry[WMTABLE].handle = handle;
|
||||
|
||||
/* allocate space for AVFS table */
|
||||
smu_allocate_memory(hwmgr->device,
|
||||
ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
|
||||
sizeof(AvfsTable_t),
|
||||
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
|
||||
PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&handle,
|
||||
&mc_addr,
|
||||
&kaddr,
|
||||
&handle);
|
||||
&kaddr);
|
||||
|
||||
PP_ASSERT_WITH_CODE(kaddr,
|
||||
"[vega10_smu_init] Out of memory for avfs table.",
|
||||
kfree(hwmgr->smu_backend);
|
||||
cgs_free_gpu_mem(hwmgr->device,
|
||||
(cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
|
||||
cgs_free_gpu_mem(hwmgr->device,
|
||||
(cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
|
||||
cgs_free_gpu_mem(hwmgr->device,
|
||||
(cgs_handle_t)handle);
|
||||
return -EINVAL);
|
||||
if (ret)
|
||||
goto err1;
|
||||
|
||||
priv->smu_tables.entry[AVFSTABLE].version = 0x01;
|
||||
priv->smu_tables.entry[AVFSTABLE].size = sizeof(AvfsTable_t);
|
||||
priv->smu_tables.entry[AVFSTABLE].table_id = TABLE_AVFS;
|
||||
priv->smu_tables.entry[AVFSTABLE].table_addr_high =
|
||||
smu_upper_32_bits(mc_addr);
|
||||
priv->smu_tables.entry[AVFSTABLE].table_addr_low =
|
||||
smu_lower_32_bits(mc_addr);
|
||||
priv->smu_tables.entry[AVFSTABLE].mc_addr = mc_addr;
|
||||
priv->smu_tables.entry[AVFSTABLE].table = kaddr;
|
||||
priv->smu_tables.entry[AVFSTABLE].handle = handle;
|
||||
|
||||
tools_size = 0x19000;
|
||||
if (tools_size) {
|
||||
smu_allocate_memory(hwmgr->device,
|
||||
ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
|
||||
tools_size,
|
||||
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
|
||||
PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&handle,
|
||||
&mc_addr,
|
||||
&kaddr,
|
||||
&handle);
|
||||
|
||||
if (kaddr) {
|
||||
priv->smu_tables.entry[TOOLSTABLE].version = 0x01;
|
||||
priv->smu_tables.entry[TOOLSTABLE].size = tools_size;
|
||||
priv->smu_tables.entry[TOOLSTABLE].table_id = TABLE_PMSTATUSLOG;
|
||||
priv->smu_tables.entry[TOOLSTABLE].table_addr_high =
|
||||
smu_upper_32_bits(mc_addr);
|
||||
priv->smu_tables.entry[TOOLSTABLE].table_addr_low =
|
||||
smu_lower_32_bits(mc_addr);
|
||||
priv->smu_tables.entry[TOOLSTABLE].table = kaddr;
|
||||
priv->smu_tables.entry[TOOLSTABLE].handle = handle;
|
||||
}
|
||||
&kaddr);
|
||||
if (ret)
|
||||
goto err2;
|
||||
priv->smu_tables.entry[TOOLSTABLE].version = 0x01;
|
||||
priv->smu_tables.entry[TOOLSTABLE].size = tools_size;
|
||||
priv->smu_tables.entry[TOOLSTABLE].table_id = TABLE_PMSTATUSLOG;
|
||||
priv->smu_tables.entry[TOOLSTABLE].mc_addr = mc_addr;
|
||||
priv->smu_tables.entry[TOOLSTABLE].table = kaddr;
|
||||
priv->smu_tables.entry[TOOLSTABLE].handle = handle;
|
||||
}
|
||||
|
||||
/* allocate space for AVFS Fuse table */
|
||||
smu_allocate_memory(hwmgr->device,
|
||||
ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
|
||||
sizeof(AvfsFuseOverride_t),
|
||||
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
|
||||
PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&handle,
|
||||
&mc_addr,
|
||||
&kaddr,
|
||||
&handle);
|
||||
|
||||
PP_ASSERT_WITH_CODE(kaddr,
|
||||
"[vega10_smu_init] Out of memory for avfs fuse table.",
|
||||
kfree(hwmgr->smu_backend);
|
||||
cgs_free_gpu_mem(hwmgr->device,
|
||||
(cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
|
||||
cgs_free_gpu_mem(hwmgr->device,
|
||||
(cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
|
||||
cgs_free_gpu_mem(hwmgr->device,
|
||||
(cgs_handle_t)priv->smu_tables.entry[AVFSTABLE].handle);
|
||||
cgs_free_gpu_mem(hwmgr->device,
|
||||
(cgs_handle_t)priv->smu_tables.entry[TOOLSTABLE].handle);
|
||||
cgs_free_gpu_mem(hwmgr->device,
|
||||
(cgs_handle_t)handle);
|
||||
return -EINVAL);
|
||||
&kaddr);
|
||||
if (ret)
|
||||
goto err3;
|
||||
|
||||
priv->smu_tables.entry[AVFSFUSETABLE].version = 0x01;
|
||||
priv->smu_tables.entry[AVFSFUSETABLE].size = sizeof(AvfsFuseOverride_t);
|
||||
priv->smu_tables.entry[AVFSFUSETABLE].table_id = TABLE_AVFS_FUSE_OVERRIDE;
|
||||
priv->smu_tables.entry[AVFSFUSETABLE].table_addr_high =
|
||||
smu_upper_32_bits(mc_addr);
|
||||
priv->smu_tables.entry[AVFSFUSETABLE].table_addr_low =
|
||||
smu_lower_32_bits(mc_addr);
|
||||
priv->smu_tables.entry[AVFSFUSETABLE].mc_addr = mc_addr;
|
||||
priv->smu_tables.entry[AVFSFUSETABLE].table = kaddr;
|
||||
priv->smu_tables.entry[AVFSFUSETABLE].handle = handle;
|
||||
|
||||
return 0;
|
||||
|
||||
err3:
|
||||
if (priv->smu_tables.entry[TOOLSTABLE].table)
|
||||
amdgpu_bo_free_kernel(&priv->smu_tables.entry[TOOLSTABLE].handle,
|
||||
&priv->smu_tables.entry[TOOLSTABLE].mc_addr,
|
||||
&priv->smu_tables.entry[TOOLSTABLE].table);
|
||||
err2:
|
||||
amdgpu_bo_free_kernel(&priv->smu_tables.entry[AVFSTABLE].handle,
|
||||
&priv->smu_tables.entry[AVFSTABLE].mc_addr,
|
||||
&priv->smu_tables.entry[AVFSTABLE].table);
|
||||
err1:
|
||||
amdgpu_bo_free_kernel(&priv->smu_tables.entry[WMTABLE].handle,
|
||||
&priv->smu_tables.entry[WMTABLE].mc_addr,
|
||||
&priv->smu_tables.entry[WMTABLE].table);
|
||||
err0:
|
||||
amdgpu_bo_free_kernel(&priv->smu_tables.entry[PPTABLE].handle,
|
||||
&priv->smu_tables.entry[PPTABLE].mc_addr,
|
||||
&priv->smu_tables.entry[PPTABLE].table);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int vega10_smu_fini(struct pp_hwmgr *hwmgr)
|
||||
|
@ -548,17 +520,22 @@ static int vega10_smu_fini(struct pp_hwmgr *hwmgr)
|
|||
(struct vega10_smumgr *)(hwmgr->smu_backend);
|
||||
|
||||
if (priv) {
|
||||
cgs_free_gpu_mem(hwmgr->device,
|
||||
(cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
|
||||
cgs_free_gpu_mem(hwmgr->device,
|
||||
(cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
|
||||
cgs_free_gpu_mem(hwmgr->device,
|
||||
(cgs_handle_t)priv->smu_tables.entry[AVFSTABLE].handle);
|
||||
amdgpu_bo_free_kernel(&priv->smu_tables.entry[PPTABLE].handle,
|
||||
&priv->smu_tables.entry[PPTABLE].mc_addr,
|
||||
&priv->smu_tables.entry[PPTABLE].table);
|
||||
amdgpu_bo_free_kernel(&priv->smu_tables.entry[WMTABLE].handle,
|
||||
&priv->smu_tables.entry[WMTABLE].mc_addr,
|
||||
&priv->smu_tables.entry[WMTABLE].table);
|
||||
amdgpu_bo_free_kernel(&priv->smu_tables.entry[AVFSTABLE].handle,
|
||||
&priv->smu_tables.entry[AVFSTABLE].mc_addr,
|
||||
&priv->smu_tables.entry[AVFSTABLE].table);
|
||||
if (priv->smu_tables.entry[TOOLSTABLE].table)
|
||||
cgs_free_gpu_mem(hwmgr->device,
|
||||
(cgs_handle_t)priv->smu_tables.entry[TOOLSTABLE].handle);
|
||||
cgs_free_gpu_mem(hwmgr->device,
|
||||
(cgs_handle_t)priv->smu_tables.entry[AVFSFUSETABLE].handle);
|
||||
amdgpu_bo_free_kernel(&priv->smu_tables.entry[TOOLSTABLE].handle,
|
||||
&priv->smu_tables.entry[TOOLSTABLE].mc_addr,
|
||||
&priv->smu_tables.entry[TOOLSTABLE].table);
|
||||
amdgpu_bo_free_kernel(&priv->smu_tables.entry[AVFSFUSETABLE].handle,
|
||||
&priv->smu_tables.entry[AVFSFUSETABLE].mc_addr,
|
||||
&priv->smu_tables.entry[AVFSFUSETABLE].table);
|
||||
kfree(hwmgr->smu_backend);
|
||||
hwmgr->smu_backend = NULL;
|
||||
}
|
||||
|
|
|
@ -38,10 +38,9 @@ struct smu_table_entry {
|
|||
uint32_t version;
|
||||
uint32_t size;
|
||||
uint32_t table_id;
|
||||
uint32_t table_addr_high;
|
||||
uint32_t table_addr_low;
|
||||
uint8_t *table;
|
||||
unsigned long handle;
|
||||
uint64_t mc_addr;
|
||||
void *table;
|
||||
struct amdgpu_bo *handle;
|
||||
};
|
||||
|
||||
struct smu_table_array {
|
||||
|
|
Loading…
Reference in New Issue