Merge tag 'amd-drm-next-5.15-2021-08-27' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
amd-drm-next-5.15-2021-08-27: amdgpu: - PLL fix for SI - Misc code cleanups - RAS fixes - PSP cleanups - Polaris UVD/VCE suspend fixes - aldebaran fixes - DCN3.x mclk fixes amdkfd: - CWSR fixes for arcturus and aldebaran - SVM fixes Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210827192336.4649-1-alexander.deucher@amd.com
This commit is contained in:
commit
8f0284f190
|
@ -58,7 +58,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
|
|||
amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
|
||||
amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
|
||||
amdgpu_fw_attestation.o amdgpu_securedisplay.o amdgpu_hdp.o \
|
||||
amdgpu_eeprom.o
|
||||
amdgpu_eeprom.o amdgpu_mca.o
|
||||
|
||||
amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o
|
||||
|
||||
|
@ -189,6 +189,10 @@ amdgpu-y += \
|
|||
amdgpu-y += \
|
||||
amdgpu_reset.o
|
||||
|
||||
# add MCA block
|
||||
amdgpu-y += \
|
||||
mca_v3_0.o
|
||||
|
||||
# add amdkfd interfaces
|
||||
amdgpu-y += amdgpu_amdkfd.o
|
||||
|
||||
|
|
|
@ -108,6 +108,7 @@
|
|||
#include "amdgpu_df.h"
|
||||
#include "amdgpu_smuio.h"
|
||||
#include "amdgpu_fdinfo.h"
|
||||
#include "amdgpu_mca.h"
|
||||
|
||||
#define MAX_GPU_INSTANCE 16
|
||||
|
||||
|
@ -1009,6 +1010,9 @@ struct amdgpu_device {
|
|||
/* df */
|
||||
struct amdgpu_df df;
|
||||
|
||||
/* MCA */
|
||||
struct amdgpu_mca mca;
|
||||
|
||||
struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
|
||||
uint32_t harvest_ip_mask;
|
||||
int num_ip_blocks;
|
||||
|
|
|
@ -1040,7 +1040,7 @@ void amdgpu_acpi_detect(void)
|
|||
*/
|
||||
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_PM_SLEEP)
|
||||
#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_SUSPEND)
|
||||
if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
return pm_suspend_target_state == PM_SUSPEND_TO_IDLE;
|
||||
|
|
|
@ -44,4 +44,5 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = {
|
|||
.get_atc_vmid_pasid_mapping_info =
|
||||
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
|
||||
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
|
||||
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings
|
||||
};
|
||||
|
|
|
@ -305,5 +305,6 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
|
|||
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
|
||||
.set_vm_context_page_table_base =
|
||||
kgd_gfx_v9_set_vm_context_page_table_base,
|
||||
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy
|
||||
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
|
||||
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings
|
||||
};
|
||||
|
|
|
@ -882,7 +882,7 @@ void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid,
|
|||
adev->gfx.cu_info.max_waves_per_simd;
|
||||
}
|
||||
|
||||
static void kgd_gfx_v9_program_trap_handler_settings(struct kgd_dev *kgd,
|
||||
void kgd_gfx_v9_program_trap_handler_settings(struct kgd_dev *kgd,
|
||||
uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr)
|
||||
{
|
||||
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
||||
|
|
|
@ -65,3 +65,5 @@ void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd,
|
|||
uint32_t vmid, uint64_t page_table_base);
|
||||
void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid,
|
||||
int *pasid_wave_cnt, int *max_waves_per_cu);
|
||||
void kgd_gfx_v9_program_trap_handler_settings(struct kgd_dev *kgd,
|
||||
uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr);
|
||||
|
|
|
@ -76,7 +76,7 @@ static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
|
|||
if (adev->dummy_page_addr)
|
||||
return 0;
|
||||
adev->dummy_page_addr = dma_map_page(&adev->pdev->dev, dummy_page, 0,
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
PAGE_SIZE, DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(&adev->pdev->dev, adev->dummy_page_addr)) {
|
||||
dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
|
||||
adev->dummy_page_addr = 0;
|
||||
|
@ -96,8 +96,8 @@ void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev)
|
|||
{
|
||||
if (!adev->dummy_page_addr)
|
||||
return;
|
||||
pci_unmap_page(adev->pdev, adev->dummy_page_addr,
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
dma_unmap_page(&adev->pdev->dev, adev->dummy_page_addr, PAGE_SIZE,
|
||||
DMA_BIDIRECTIONAL);
|
||||
adev->dummy_page_addr = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -904,7 +904,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
|
|||
DIV_ROUND_UP(args->bpp, 8), 0);
|
||||
args->size = (u64)args->pitch * args->height;
|
||||
args->size = ALIGN(args->size, PAGE_SIZE);
|
||||
domain = amdgpu_bo_get_preferred_pin_domain(adev,
|
||||
domain = amdgpu_bo_get_preferred_domain(adev,
|
||||
amdgpu_display_supported_domains(adev, flags));
|
||||
r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
|
||||
ttm_bo_type_device, NULL, &gobj);
|
||||
|
|
|
@ -471,6 +471,27 @@ int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev)
|
|||
return r;
|
||||
}
|
||||
|
||||
if (adev->mca.mp0.ras_funcs &&
|
||||
adev->mca.mp0.ras_funcs->ras_late_init) {
|
||||
r = adev->mca.mp0.ras_funcs->ras_late_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
if (adev->mca.mp1.ras_funcs &&
|
||||
adev->mca.mp1.ras_funcs->ras_late_init) {
|
||||
r = adev->mca.mp1.ras_funcs->ras_late_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
if (adev->mca.mpio.ras_funcs &&
|
||||
adev->mca.mpio.ras_funcs->ras_late_init) {
|
||||
r = adev->mca.mpio.ras_funcs->ras_late_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -47,8 +47,6 @@ int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev)
|
|||
{
|
||||
int i;
|
||||
|
||||
cancel_delayed_work_sync(&adev->jpeg.idle_work);
|
||||
|
||||
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
|
||||
if (adev->jpeg.harvest_config & (1 << i))
|
||||
continue;
|
||||
|
|
|
@ -0,0 +1,117 @@
|
|||
/*
|
||||
* Copyright 2021 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include "amdgpu_ras.h"
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_mca.h"
|
||||
|
||||
#include "umc/umc_6_7_0_offset.h"
|
||||
#include "umc/umc_6_7_0_sh_mask.h"
|
||||
|
||||
void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev,
|
||||
uint64_t mc_status_addr,
|
||||
unsigned long *error_count)
|
||||
{
|
||||
uint64_t mc_status = RREG64_PCIE(mc_status_addr * 4);
|
||||
|
||||
if (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
|
||||
REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
|
||||
*error_count += 1;
|
||||
}
|
||||
|
||||
void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev,
|
||||
uint64_t mc_status_addr,
|
||||
unsigned long *error_count)
|
||||
{
|
||||
uint64_t mc_status = RREG64_PCIE(mc_status_addr * 4);
|
||||
|
||||
if ((REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
|
||||
(REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
|
||||
REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
|
||||
REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
|
||||
REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
|
||||
REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
|
||||
*error_count += 1;
|
||||
}
|
||||
|
||||
void amdgpu_mca_reset_error_count(struct amdgpu_device *adev,
|
||||
uint64_t mc_status_addr)
|
||||
{
|
||||
WREG64_PCIE(mc_status_addr * 4, 0x0ULL);
|
||||
}
|
||||
|
||||
void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev,
|
||||
uint64_t mc_status_addr,
|
||||
void *ras_error_status)
|
||||
{
|
||||
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
|
||||
|
||||
amdgpu_mca_query_correctable_error_count(adev, mc_status_addr, &(err_data->ce_count));
|
||||
amdgpu_mca_query_uncorrectable_error_count(adev, mc_status_addr, &(err_data->ue_count));
|
||||
|
||||
amdgpu_mca_reset_error_count(adev, mc_status_addr);
|
||||
}
|
||||
|
||||
int amdgpu_mca_ras_late_init(struct amdgpu_device *adev,
|
||||
struct amdgpu_mca_ras *mca_dev)
|
||||
{
|
||||
int r;
|
||||
struct ras_ih_if ih_info = {
|
||||
.cb = NULL,
|
||||
};
|
||||
struct ras_fs_if fs_info = {
|
||||
.sysfs_name = mca_dev->ras_funcs->sysfs_name,
|
||||
};
|
||||
|
||||
if (!mca_dev->ras_if) {
|
||||
mca_dev->ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
|
||||
if (!mca_dev->ras_if)
|
||||
return -ENOMEM;
|
||||
mca_dev->ras_if->block = mca_dev->ras_funcs->ras_block;
|
||||
mca_dev->ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
|
||||
mca_dev->ras_if->sub_block_index = 0;
|
||||
}
|
||||
ih_info.head = fs_info.head = *mca_dev->ras_if;
|
||||
r = amdgpu_ras_late_init(adev, mca_dev->ras_if,
|
||||
&fs_info, &ih_info);
|
||||
if (r || !amdgpu_ras_is_supported(adev, mca_dev->ras_if->block)) {
|
||||
kfree(mca_dev->ras_if);
|
||||
mca_dev->ras_if = NULL;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
void amdgpu_mca_ras_fini(struct amdgpu_device *adev,
|
||||
struct amdgpu_mca_ras *mca_dev)
|
||||
{
|
||||
struct ras_ih_if ih_info = {
|
||||
.cb = NULL,
|
||||
};
|
||||
|
||||
if (!mca_dev->ras_if)
|
||||
return;
|
||||
|
||||
amdgpu_ras_late_fini(adev, mca_dev->ras_if, &ih_info);
|
||||
kfree(mca_dev->ras_if);
|
||||
mca_dev->ras_if = NULL;
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
/*
|
||||
* Copyright (C) 2021 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included
|
||||
* in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
|
||||
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#ifndef __AMDGPU_MCA_H__
|
||||
#define __AMDGPU_MCA_H__
|
||||
|
||||
struct amdgpu_mca_ras_funcs {
|
||||
int (*ras_late_init)(struct amdgpu_device *adev);
|
||||
void (*ras_fini)(struct amdgpu_device *adev);
|
||||
void (*query_ras_error_count)(struct amdgpu_device *adev,
|
||||
void *ras_error_status);
|
||||
void (*query_ras_error_address)(struct amdgpu_device *adev,
|
||||
void *ras_error_status);
|
||||
uint32_t ras_block;
|
||||
const char* sysfs_name;
|
||||
};
|
||||
|
||||
struct amdgpu_mca_ras {
|
||||
struct ras_common_if *ras_if;
|
||||
const struct amdgpu_mca_ras_funcs *ras_funcs;
|
||||
};
|
||||
|
||||
struct amdgpu_mca_funcs {
|
||||
void (*init)(struct amdgpu_device *adev);
|
||||
};
|
||||
|
||||
struct amdgpu_mca {
|
||||
const struct amdgpu_mca_funcs *funcs;
|
||||
struct amdgpu_mca_ras mp0;
|
||||
struct amdgpu_mca_ras mp1;
|
||||
struct amdgpu_mca_ras mpio;
|
||||
};
|
||||
|
||||
void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev,
|
||||
uint64_t mc_status_addr,
|
||||
unsigned long *error_count);
|
||||
|
||||
void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev,
|
||||
uint64_t mc_status_addr,
|
||||
unsigned long *error_count);
|
||||
|
||||
void amdgpu_mca_reset_error_count(struct amdgpu_device *adev,
|
||||
uint64_t mc_status_addr);
|
||||
|
||||
void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev,
|
||||
uint64_t mc_status_addr,
|
||||
void *ras_error_status);
|
||||
|
||||
int amdgpu_mca_ras_late_init(struct amdgpu_device *adev,
|
||||
struct amdgpu_mca_ras *mca_dev);
|
||||
|
||||
void amdgpu_mca_ras_fini(struct amdgpu_device *adev,
|
||||
struct amdgpu_mca_ras *mca_dev);
|
||||
|
||||
#endif
|
|
@ -947,7 +947,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
|||
/* This assumes only APU display buffers are pinned with (VRAM|GTT).
|
||||
* See function amdgpu_display_supported_domains()
|
||||
*/
|
||||
domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
|
||||
domain = amdgpu_bo_get_preferred_domain(adev, domain);
|
||||
|
||||
if (bo->tbo.base.import_attach)
|
||||
dma_buf_pin(bo->tbo.base.import_attach);
|
||||
|
@ -1518,14 +1518,14 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
|
|||
}
|
||||
|
||||
/**
|
||||
* amdgpu_bo_get_preferred_pin_domain - get preferred domain for scanout
|
||||
* amdgpu_bo_get_preferred_domain - get preferred domain
|
||||
* @adev: amdgpu device object
|
||||
* @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
|
||||
*
|
||||
* Returns:
|
||||
* Which of the allowed domains is preferred for pinning the BO for scanout.
|
||||
* Which of the allowed domains is preferred for allocating the BO.
|
||||
*/
|
||||
uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
|
||||
uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
|
||||
uint32_t domain)
|
||||
{
|
||||
if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
|
||||
|
|
|
@ -333,7 +333,7 @@ void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
|
|||
void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo);
|
||||
int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
|
||||
struct dma_fence **fence);
|
||||
uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
|
||||
uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
|
||||
uint32_t domain);
|
||||
|
||||
/*
|
||||
|
|
|
@ -80,12 +80,17 @@ static void amdgpu_pll_reduce_ratio(unsigned *nom, unsigned *den,
|
|||
* Calculate feedback and reference divider for a given post divider. Makes
|
||||
* sure we stay within the limits.
|
||||
*/
|
||||
static void amdgpu_pll_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
|
||||
unsigned fb_div_max, unsigned ref_div_max,
|
||||
unsigned *fb_div, unsigned *ref_div)
|
||||
static void amdgpu_pll_get_fb_ref_div(struct amdgpu_device *adev, unsigned int nom,
|
||||
unsigned int den, unsigned int post_div,
|
||||
unsigned int fb_div_max, unsigned int ref_div_max,
|
||||
unsigned int *fb_div, unsigned int *ref_div)
|
||||
{
|
||||
|
||||
/* limit reference * post divider to a maximum */
|
||||
ref_div_max = min(128 / post_div, ref_div_max);
|
||||
if (adev->family == AMDGPU_FAMILY_SI)
|
||||
ref_div_max = min(100 / post_div, ref_div_max);
|
||||
else
|
||||
ref_div_max = min(128 / post_div, ref_div_max);
|
||||
|
||||
/* get matching reference and feedback divider */
|
||||
*ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
|
||||
|
@ -112,7 +117,8 @@ static void amdgpu_pll_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_
|
|||
* Try to calculate the PLL parameters to generate the given frequency:
|
||||
* dot_clock = (ref_freq * feedback_div) / (ref_div * post_div)
|
||||
*/
|
||||
void amdgpu_pll_compute(struct amdgpu_pll *pll,
|
||||
void amdgpu_pll_compute(struct amdgpu_device *adev,
|
||||
struct amdgpu_pll *pll,
|
||||
u32 freq,
|
||||
u32 *dot_clock_p,
|
||||
u32 *fb_div_p,
|
||||
|
@ -199,7 +205,7 @@ void amdgpu_pll_compute(struct amdgpu_pll *pll,
|
|||
|
||||
for (post_div = post_div_min; post_div <= post_div_max; ++post_div) {
|
||||
unsigned diff;
|
||||
amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max,
|
||||
amdgpu_pll_get_fb_ref_div(adev, nom, den, post_div, fb_div_max,
|
||||
ref_div_max, &fb_div, &ref_div);
|
||||
diff = abs(target_clock - (pll->reference_freq * fb_div) /
|
||||
(ref_div * post_div));
|
||||
|
@ -214,7 +220,7 @@ void amdgpu_pll_compute(struct amdgpu_pll *pll,
|
|||
post_div = post_div_best;
|
||||
|
||||
/* get the feedback and reference divider for the optimal value */
|
||||
amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max, ref_div_max,
|
||||
amdgpu_pll_get_fb_ref_div(adev, nom, den, post_div, fb_div_max, ref_div_max,
|
||||
&fb_div, &ref_div);
|
||||
|
||||
/* reduce the numbers to a simpler ratio once more */
|
||||
|
|
|
@ -24,7 +24,8 @@
|
|||
#ifndef __AMDGPU_PLL_H__
|
||||
#define __AMDGPU_PLL_H__
|
||||
|
||||
void amdgpu_pll_compute(struct amdgpu_pll *pll,
|
||||
void amdgpu_pll_compute(struct amdgpu_device *adev,
|
||||
struct amdgpu_pll *pll,
|
||||
u32 freq,
|
||||
u32 *dot_clock_p,
|
||||
u32 *fb_div_p,
|
||||
|
|
|
@ -899,23 +899,37 @@ static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
|
|||
cmd->cmd.cmd_load_ta.cmd_buf_len = ta_shared_size;
|
||||
}
|
||||
|
||||
static int psp_xgmi_init_shared_buf(struct psp_context *psp)
|
||||
static int psp_ta_init_shared_buf(struct psp_context *psp,
|
||||
struct ta_mem_context *mem_ctx,
|
||||
uint32_t shared_mem_size)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Allocate 16k memory aligned to 4k from Frame Buffer (local
|
||||
* physical) for xgmi ta <-> Driver
|
||||
*/
|
||||
ret = amdgpu_bo_create_kernel(psp->adev, PSP_XGMI_SHARED_MEM_SIZE,
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&psp->xgmi_context.context.mem_context.shared_bo,
|
||||
&psp->xgmi_context.context.mem_context.shared_mc_addr,
|
||||
&psp->xgmi_context.context.mem_context.shared_buf);
|
||||
* Allocate 16k memory aligned to 4k from Frame Buffer (local
|
||||
* physical) for ta to host memory
|
||||
*/
|
||||
ret = amdgpu_bo_create_kernel(psp->adev, shared_mem_size, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&mem_ctx->shared_bo,
|
||||
&mem_ctx->shared_mc_addr,
|
||||
&mem_ctx->shared_buf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
|
||||
{
|
||||
amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr,
|
||||
&mem_ctx->shared_buf);
|
||||
}
|
||||
|
||||
static int psp_xgmi_init_shared_buf(struct psp_context *psp)
|
||||
{
|
||||
return psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context,
|
||||
PSP_XGMI_SHARED_MEM_SIZE);
|
||||
}
|
||||
|
||||
static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
|
||||
uint32_t ta_cmd_id,
|
||||
uint32_t session_id)
|
||||
|
@ -1020,9 +1034,7 @@ int psp_xgmi_terminate(struct psp_context *psp)
|
|||
psp->xgmi_context.context.initialized = false;
|
||||
|
||||
/* free xgmi shared memory */
|
||||
amdgpu_bo_free_kernel(&psp->xgmi_context.context.mem_context.shared_bo,
|
||||
&psp->xgmi_context.context.mem_context.shared_mc_addr,
|
||||
&psp->xgmi_context.context.mem_context.shared_buf);
|
||||
psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1270,19 +1282,8 @@ int psp_xgmi_set_topology_info(struct psp_context *psp,
|
|||
// ras begin
|
||||
static int psp_ras_init_shared_buf(struct psp_context *psp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Allocate 16k memory aligned to 4k from Frame Buffer (local
|
||||
* physical) for ras ta <-> Driver
|
||||
*/
|
||||
ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAS_SHARED_MEM_SIZE,
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&psp->ras_context.context.mem_context.shared_bo,
|
||||
&psp->ras_context.context.mem_context.shared_mc_addr,
|
||||
&psp->ras_context.context.mem_context.shared_buf);
|
||||
|
||||
return ret;
|
||||
return psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context,
|
||||
PSP_RAS_SHARED_MEM_SIZE);
|
||||
}
|
||||
|
||||
static int psp_ras_load(struct psp_context *psp)
|
||||
|
@ -1466,9 +1467,7 @@ static int psp_ras_terminate(struct psp_context *psp)
|
|||
psp->ras_context.context.initialized = false;
|
||||
|
||||
/* free ras shared memory */
|
||||
amdgpu_bo_free_kernel(&psp->ras_context.context.mem_context.shared_bo,
|
||||
&psp->ras_context.context.mem_context.shared_mc_addr,
|
||||
&psp->ras_context.context.mem_context.shared_buf);
|
||||
psp_ta_free_shared_buf(&psp->ras_context.context.mem_context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1576,19 +1575,8 @@ int psp_ras_trigger_error(struct psp_context *psp,
|
|||
// HDCP start
|
||||
static int psp_hdcp_init_shared_buf(struct psp_context *psp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Allocate 16k memory aligned to 4k from Frame Buffer (local
|
||||
* physical) for hdcp ta <-> Driver
|
||||
*/
|
||||
ret = amdgpu_bo_create_kernel(psp->adev, PSP_HDCP_SHARED_MEM_SIZE,
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&psp->hdcp_context.context.mem_context.shared_bo,
|
||||
&psp->hdcp_context.context.mem_context.shared_mc_addr,
|
||||
&psp->hdcp_context.context.mem_context.shared_buf);
|
||||
|
||||
return ret;
|
||||
return psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context,
|
||||
PSP_HDCP_SHARED_MEM_SIZE);
|
||||
}
|
||||
|
||||
static int psp_hdcp_load(struct psp_context *psp)
|
||||
|
@ -1712,9 +1700,7 @@ static int psp_hdcp_terminate(struct psp_context *psp)
|
|||
|
||||
out:
|
||||
/* free hdcp shared memory */
|
||||
amdgpu_bo_free_kernel(&psp->hdcp_context.context.mem_context.shared_bo,
|
||||
&psp->hdcp_context.context.mem_context.shared_mc_addr,
|
||||
&psp->hdcp_context.context.mem_context.shared_buf);
|
||||
psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1723,19 +1709,8 @@ out:
|
|||
// DTM start
|
||||
static int psp_dtm_init_shared_buf(struct psp_context *psp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Allocate 16k memory aligned to 4k from Frame Buffer (local
|
||||
* physical) for dtm ta <-> Driver
|
||||
*/
|
||||
ret = amdgpu_bo_create_kernel(psp->adev, PSP_DTM_SHARED_MEM_SIZE,
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&psp->dtm_context.context.mem_context.shared_bo,
|
||||
&psp->dtm_context.context.mem_context.shared_mc_addr,
|
||||
&psp->dtm_context.context.mem_context.shared_buf);
|
||||
|
||||
return ret;
|
||||
return psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context,
|
||||
PSP_DTM_SHARED_MEM_SIZE);
|
||||
}
|
||||
|
||||
static int psp_dtm_load(struct psp_context *psp)
|
||||
|
@ -1858,10 +1833,8 @@ static int psp_dtm_terminate(struct psp_context *psp)
|
|||
psp->dtm_context.context.initialized = false;
|
||||
|
||||
out:
|
||||
/* free hdcp shared memory */
|
||||
amdgpu_bo_free_kernel(&psp->dtm_context.context.mem_context.shared_bo,
|
||||
&psp->dtm_context.context.mem_context.shared_mc_addr,
|
||||
&psp->dtm_context.context.mem_context.shared_buf);
|
||||
/* free dtm shared memory */
|
||||
psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1870,19 +1843,8 @@ out:
|
|||
// RAP start
|
||||
static int psp_rap_init_shared_buf(struct psp_context *psp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Allocate 16k memory aligned to 4k from Frame Buffer (local
|
||||
* physical) for rap ta <-> Driver
|
||||
*/
|
||||
ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAP_SHARED_MEM_SIZE,
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&psp->rap_context.context.mem_context.shared_bo,
|
||||
&psp->rap_context.context.mem_context.shared_mc_addr,
|
||||
&psp->rap_context.context.mem_context.shared_buf);
|
||||
|
||||
return ret;
|
||||
return psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context,
|
||||
PSP_RAP_SHARED_MEM_SIZE);
|
||||
}
|
||||
|
||||
static int psp_rap_load(struct psp_context *psp)
|
||||
|
@ -1958,9 +1920,7 @@ static int psp_rap_initialize(struct psp_context *psp)
|
|||
if (ret || status != TA_RAP_STATUS__SUCCESS) {
|
||||
psp_rap_unload(psp);
|
||||
|
||||
amdgpu_bo_free_kernel(&psp->rap_context.context.mem_context.shared_bo,
|
||||
&psp->rap_context.context.mem_context.shared_mc_addr,
|
||||
&psp->rap_context.context.mem_context.shared_buf);
|
||||
psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
|
||||
|
||||
psp->rap_context.context.initialized = false;
|
||||
|
||||
|
@ -1985,9 +1945,7 @@ static int psp_rap_terminate(struct psp_context *psp)
|
|||
psp->rap_context.context.initialized = false;
|
||||
|
||||
/* free rap shared memory */
|
||||
amdgpu_bo_free_kernel(&psp->rap_context.context.mem_context.shared_bo,
|
||||
&psp->rap_context.context.mem_context.shared_mc_addr,
|
||||
&psp->rap_context.context.mem_context.shared_buf);
|
||||
psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -2030,19 +1988,9 @@ out_unlock:
|
|||
/* securedisplay start */
|
||||
static int psp_securedisplay_init_shared_buf(struct psp_context *psp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Allocate 16k memory aligned to 4k from Frame Buffer (local
|
||||
* physical) for sa ta <-> Driver
|
||||
*/
|
||||
ret = amdgpu_bo_create_kernel(psp->adev, PSP_SECUREDISPLAY_SHARED_MEM_SIZE,
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&psp->securedisplay_context.context.mem_context.shared_bo,
|
||||
&psp->securedisplay_context.context.mem_context.shared_mc_addr,
|
||||
&psp->securedisplay_context.context.mem_context.shared_buf);
|
||||
|
||||
return ret;
|
||||
return psp_ta_init_shared_buf(
|
||||
psp, &psp->securedisplay_context.context.mem_context,
|
||||
PSP_SECUREDISPLAY_SHARED_MEM_SIZE);
|
||||
}
|
||||
|
||||
static int psp_securedisplay_load(struct psp_context *psp)
|
||||
|
@ -2120,9 +2068,7 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
|
|||
if (ret) {
|
||||
psp_securedisplay_unload(psp);
|
||||
|
||||
amdgpu_bo_free_kernel(&psp->securedisplay_context.context.mem_context.shared_bo,
|
||||
&psp->securedisplay_context.context.mem_context.shared_mc_addr,
|
||||
&psp->securedisplay_context.context.mem_context.shared_buf);
|
||||
psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
|
||||
|
||||
psp->securedisplay_context.context.initialized = false;
|
||||
|
||||
|
@ -2159,9 +2105,7 @@ static int psp_securedisplay_terminate(struct psp_context *psp)
|
|||
psp->securedisplay_context.context.initialized = false;
|
||||
|
||||
/* free securedisplay shared memory */
|
||||
amdgpu_bo_free_kernel(&psp->securedisplay_context.context.mem_context.shared_bo,
|
||||
&psp->securedisplay_context.context.mem_context.shared_mc_addr,
|
||||
&psp->securedisplay_context.context.mem_context.shared_buf);
|
||||
psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -49,6 +49,7 @@ enum amdgpu_ras_block {
|
|||
AMDGPU_RAS_BLOCK__MP0,
|
||||
AMDGPU_RAS_BLOCK__MP1,
|
||||
AMDGPU_RAS_BLOCK__FUSE,
|
||||
AMDGPU_RAS_BLOCK__MPIO,
|
||||
|
||||
AMDGPU_RAS_BLOCK__LAST
|
||||
};
|
||||
|
@ -309,6 +310,7 @@ struct ras_common_if {
|
|||
enum amdgpu_ras_block block;
|
||||
enum amdgpu_ras_error_type type;
|
||||
uint32_t sub_block_index;
|
||||
char name[32];
|
||||
};
|
||||
|
||||
struct amdgpu_ras {
|
||||
|
@ -419,7 +421,7 @@ struct ras_badpage {
|
|||
/* interfaces for IP */
|
||||
struct ras_fs_if {
|
||||
struct ras_common_if head;
|
||||
char sysfs_name[32];
|
||||
const char* sysfs_name;
|
||||
char debugfs_name[32];
|
||||
};
|
||||
|
||||
|
|
|
@ -326,7 +326,6 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
|
|||
{
|
||||
int i, j;
|
||||
|
||||
cancel_delayed_work_sync(&adev->uvd.idle_work);
|
||||
drm_sched_entity_destroy(&adev->uvd.entity);
|
||||
|
||||
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
|
||||
|
|
|
@ -218,7 +218,6 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
|
|||
if (adev->vce.vcpu_bo == NULL)
|
||||
return 0;
|
||||
|
||||
cancel_delayed_work_sync(&adev->vce.idle_work);
|
||||
drm_sched_entity_destroy(&adev->vce.entity);
|
||||
|
||||
amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
|
||||
|
|
|
@ -258,8 +258,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
|
|||
{
|
||||
int i, j;
|
||||
|
||||
cancel_delayed_work_sync(&adev->vcn.idle_work);
|
||||
|
||||
for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
|
||||
if (adev->vcn.harvest_config & (1 << j))
|
||||
continue;
|
||||
|
|
|
@ -926,7 +926,7 @@ static int amdgpu_vm_pt_create(struct amdgpu_device *adev,
|
|||
bp.size = amdgpu_vm_bo_size(adev, level);
|
||||
bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
|
||||
bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
|
||||
bp.domain = amdgpu_bo_get_preferred_pin_domain(adev, bp.domain);
|
||||
bp.domain = amdgpu_bo_get_preferred_domain(adev, bp.domain);
|
||||
bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
|
||||
AMDGPU_GEM_CREATE_CPU_GTT_USWC;
|
||||
|
||||
|
@ -3345,12 +3345,13 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
|
|||
* @adev: amdgpu device pointer
|
||||
* @pasid: PASID of the VM
|
||||
* @addr: Address of the fault
|
||||
* @write_fault: true is write fault, false is read fault
|
||||
*
|
||||
* Try to gracefully handle a VM fault. Return true if the fault was handled and
|
||||
* shouldn't be reported any more.
|
||||
*/
|
||||
bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
|
||||
uint64_t addr)
|
||||
uint64_t addr, bool write_fault)
|
||||
{
|
||||
bool is_compute_context = false;
|
||||
struct amdgpu_bo *root;
|
||||
|
@ -3375,7 +3376,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
|
|||
addr /= AMDGPU_GPU_PAGE_SIZE;
|
||||
|
||||
if (is_compute_context &&
|
||||
!svm_range_restore_pages(adev, pasid, addr)) {
|
||||
!svm_range_restore_pages(adev, pasid, addr, write_fault)) {
|
||||
amdgpu_bo_unref(&root);
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -448,7 +448,7 @@ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
|
|||
void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
|
||||
struct amdgpu_task_info *task_info);
|
||||
bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
|
||||
uint64_t addr);
|
||||
uint64_t addr, bool write_fault);
|
||||
|
||||
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
|
||||
|
||||
|
|
|
@ -32,6 +32,10 @@
|
|||
#include "wafl/wafl2_4_0_0_smn.h"
|
||||
#include "wafl/wafl2_4_0_0_sh_mask.h"
|
||||
|
||||
#define smnPCS_XGMI23_PCS_ERROR_STATUS 0x11a01210
|
||||
#define smnPCS_XGMI3X16_PCS_ERROR_STATUS 0x11a0020c
|
||||
#define smnPCS_GOPX1_PCS_ERROR_STATUS 0x12200210
|
||||
|
||||
static DEFINE_MUTEX(xgmi_mutex);
|
||||
|
||||
#define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE 4
|
||||
|
@ -63,6 +67,33 @@ static const int wafl_pcs_err_status_reg_arct[] = {
|
|||
smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
|
||||
};
|
||||
|
||||
static const int xgmi23_pcs_err_status_reg_aldebaran[] = {
|
||||
smnPCS_XGMI23_PCS_ERROR_STATUS,
|
||||
smnPCS_XGMI23_PCS_ERROR_STATUS + 0x100000,
|
||||
smnPCS_XGMI23_PCS_ERROR_STATUS + 0x200000,
|
||||
smnPCS_XGMI23_PCS_ERROR_STATUS + 0x300000,
|
||||
smnPCS_XGMI23_PCS_ERROR_STATUS + 0x400000,
|
||||
smnPCS_XGMI23_PCS_ERROR_STATUS + 0x500000,
|
||||
smnPCS_XGMI23_PCS_ERROR_STATUS + 0x600000,
|
||||
smnPCS_XGMI23_PCS_ERROR_STATUS + 0x700000
|
||||
};
|
||||
|
||||
static const int xgmi3x16_pcs_err_status_reg_aldebaran[] = {
|
||||
smnPCS_XGMI3X16_PCS_ERROR_STATUS,
|
||||
smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x100000,
|
||||
smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x200000,
|
||||
smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x300000,
|
||||
smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x400000,
|
||||
smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x500000,
|
||||
smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x600000,
|
||||
smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x700000
|
||||
};
|
||||
|
||||
static const int walf_pcs_err_status_reg_aldebaran[] = {
|
||||
smnPCS_GOPX1_PCS_ERROR_STATUS,
|
||||
smnPCS_GOPX1_PCS_ERROR_STATUS + 0x100000
|
||||
};
|
||||
|
||||
static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = {
|
||||
{"XGMI PCS DataLossErr",
|
||||
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)},
|
||||
|
@ -771,6 +802,17 @@ static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
|
|||
pcs_clear_status(adev,
|
||||
xgmi_pcs_err_status_reg_vg20[i]);
|
||||
break;
|
||||
case CHIP_ALDEBARAN:
|
||||
for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++)
|
||||
pcs_clear_status(adev,
|
||||
xgmi23_pcs_err_status_reg_aldebaran[i]);
|
||||
for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++)
|
||||
pcs_clear_status(adev,
|
||||
xgmi23_pcs_err_status_reg_aldebaran[i]);
|
||||
for (i = 0; i < ARRAY_SIZE(walf_pcs_err_status_reg_aldebaran); i++)
|
||||
pcs_clear_status(adev,
|
||||
walf_pcs_err_status_reg_aldebaran[i]);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -848,7 +890,6 @@ static int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
|
|||
}
|
||||
break;
|
||||
case CHIP_VEGA20:
|
||||
default:
|
||||
/* check xgmi pcs error */
|
||||
for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) {
|
||||
data = RREG32_PCIE(xgmi_pcs_err_status_reg_vg20[i]);
|
||||
|
@ -864,6 +905,32 @@ static int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
|
|||
data, &ue_cnt, &ce_cnt, false);
|
||||
}
|
||||
break;
|
||||
case CHIP_ALDEBARAN:
|
||||
/* check xgmi23 pcs error */
|
||||
for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++) {
|
||||
data = RREG32_PCIE(xgmi23_pcs_err_status_reg_aldebaran[i]);
|
||||
if (data)
|
||||
amdgpu_xgmi_query_pcs_error_status(adev,
|
||||
data, &ue_cnt, &ce_cnt, true);
|
||||
}
|
||||
/* check xgmi3x16 pcs error */
|
||||
for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_aldebaran); i++) {
|
||||
data = RREG32_PCIE(xgmi3x16_pcs_err_status_reg_aldebaran[i]);
|
||||
if (data)
|
||||
amdgpu_xgmi_query_pcs_error_status(adev,
|
||||
data, &ue_cnt, &ce_cnt, true);
|
||||
}
|
||||
/* check wafl pcs error */
|
||||
for (i = 0; i < ARRAY_SIZE(walf_pcs_err_status_reg_aldebaran); i++) {
|
||||
data = RREG32_PCIE(walf_pcs_err_status_reg_aldebaran[i]);
|
||||
if (data)
|
||||
amdgpu_xgmi_query_pcs_error_status(adev,
|
||||
data, &ue_cnt, &ce_cnt, false);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
dev_warn(adev->dev, "XGMI RAS error query not supported");
|
||||
break;
|
||||
}
|
||||
|
||||
adev->gmc.xgmi.ras_funcs->reset_ras_error_count(adev);
|
||||
|
|
|
@ -851,7 +851,7 @@ void amdgpu_atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
|
|||
pll->reference_div = amdgpu_crtc->pll_reference_div;
|
||||
pll->post_div = amdgpu_crtc->pll_post_div;
|
||||
|
||||
amdgpu_pll_compute(pll, amdgpu_crtc->adjusted_clock, &pll_clock,
|
||||
amdgpu_pll_compute(adev, pll, amdgpu_crtc->adjusted_clock, &pll_clock,
|
||||
&fb_div, &frac_fb_div, &ref_div, &post_div);
|
||||
|
||||
amdgpu_atombios_crtc_program_ss(adev, ATOM_DISABLE, amdgpu_crtc->pll_id,
|
||||
|
|
|
@ -93,6 +93,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
|
|||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
bool retry_fault = !!(entry->src_data[1] & 0x80);
|
||||
bool write_fault = !!(entry->src_data[1] & 0x20);
|
||||
struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
|
||||
struct amdgpu_task_info task_info;
|
||||
uint32_t status = 0;
|
||||
|
@ -121,7 +122,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
|
|||
/* Try to handle the recoverable page faults by filling page
|
||||
* tables
|
||||
*/
|
||||
if (amdgpu_vm_handle_fault(adev, entry->pasid, addr))
|
||||
if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault))
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -55,6 +55,7 @@
|
|||
#include "umc_v6_0.h"
|
||||
#include "umc_v6_7.h"
|
||||
#include "hdp_v4_0.h"
|
||||
#include "mca_v3_0.h"
|
||||
|
||||
#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
|
||||
|
||||
|
@ -506,6 +507,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
|
|||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
bool retry_fault = !!(entry->src_data[1] & 0x80);
|
||||
bool write_fault = !!(entry->src_data[1] & 0x20);
|
||||
uint32_t status = 0, cid = 0, rw = 0;
|
||||
struct amdgpu_task_info task_info;
|
||||
struct amdgpu_vmhub *hub;
|
||||
|
@ -536,7 +538,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
|
|||
/* Try to handle the recoverable page faults by filling page
|
||||
* tables
|
||||
*/
|
||||
if (amdgpu_vm_handle_fault(adev, entry->pasid, addr))
|
||||
if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault))
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -1229,6 +1231,18 @@ static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev)
|
|||
adev->hdp.ras_funcs = &hdp_v4_0_ras_funcs;
|
||||
}
|
||||
|
||||
static void gmc_v9_0_set_mca_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_ALDEBARAN:
|
||||
if (!adev->gmc.xgmi.connected_to_cpu)
|
||||
adev->mca.funcs = &mca_v3_0_funcs;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int gmc_v9_0_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
@ -1250,6 +1264,7 @@ static int gmc_v9_0_early_init(void *handle)
|
|||
gmc_v9_0_set_mmhub_ras_funcs(adev);
|
||||
gmc_v9_0_set_gfxhub_funcs(adev);
|
||||
gmc_v9_0_set_hdp_ras_funcs(adev);
|
||||
gmc_v9_0_set_mca_funcs(adev);
|
||||
|
||||
adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
|
||||
adev->gmc.shared_aperture_end =
|
||||
|
@ -1461,6 +1476,8 @@ static int gmc_v9_0_sw_init(void *handle)
|
|||
adev->gfxhub.funcs->init(adev);
|
||||
|
||||
adev->mmhub.funcs->init(adev);
|
||||
if (adev->mca.funcs)
|
||||
adev->mca.funcs->init(adev);
|
||||
|
||||
spin_lock_init(&adev->gmc.invalidate_lock);
|
||||
|
||||
|
|
|
@ -0,0 +1,125 @@
|
|||
/*
|
||||
* Copyright 2021 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include "amdgpu_ras.h"
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_mca.h"
|
||||
|
||||
#define smnMCMP0_STATUST0 0x03830408
|
||||
#define smnMCMP1_STATUST0 0x03b30408
|
||||
#define smnMCMPIO_STATUST0 0x0c930408
|
||||
|
||||
|
||||
static void mca_v3_0_mp0_query_ras_error_count(struct amdgpu_device *adev,
|
||||
void *ras_error_status)
|
||||
{
|
||||
amdgpu_mca_query_ras_error_count(adev,
|
||||
smnMCMP0_STATUST0,
|
||||
ras_error_status);
|
||||
}
|
||||
|
||||
static int mca_v3_0_mp0_ras_late_init(struct amdgpu_device *adev)
|
||||
{
|
||||
return amdgpu_mca_ras_late_init(adev, &adev->mca.mp0);
|
||||
}
|
||||
|
||||
static void mca_v3_0_mp0_ras_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
amdgpu_mca_ras_fini(adev, &adev->mca.mp0);
|
||||
}
|
||||
|
||||
const struct amdgpu_mca_ras_funcs mca_v3_0_mp0_ras_funcs = {
|
||||
.ras_late_init = mca_v3_0_mp0_ras_late_init,
|
||||
.ras_fini = mca_v3_0_mp0_ras_fini,
|
||||
.query_ras_error_count = mca_v3_0_mp0_query_ras_error_count,
|
||||
.query_ras_error_address = NULL,
|
||||
.ras_block = AMDGPU_RAS_BLOCK__MP0,
|
||||
.sysfs_name = "mp0_err_count",
|
||||
};
|
||||
|
||||
static void mca_v3_0_mp1_query_ras_error_count(struct amdgpu_device *adev,
|
||||
void *ras_error_status)
|
||||
{
|
||||
amdgpu_mca_query_ras_error_count(adev,
|
||||
smnMCMP1_STATUST0,
|
||||
ras_error_status);
|
||||
}
|
||||
|
||||
static int mca_v3_0_mp1_ras_late_init(struct amdgpu_device *adev)
|
||||
{
|
||||
return amdgpu_mca_ras_late_init(adev, &adev->mca.mp1);
|
||||
}
|
||||
|
||||
static void mca_v3_0_mp1_ras_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
amdgpu_mca_ras_fini(adev, &adev->mca.mp1);
|
||||
}
|
||||
|
||||
const struct amdgpu_mca_ras_funcs mca_v3_0_mp1_ras_funcs = {
|
||||
.ras_late_init = mca_v3_0_mp1_ras_late_init,
|
||||
.ras_fini = mca_v3_0_mp1_ras_fini,
|
||||
.query_ras_error_count = mca_v3_0_mp1_query_ras_error_count,
|
||||
.query_ras_error_address = NULL,
|
||||
.ras_block = AMDGPU_RAS_BLOCK__MP1,
|
||||
.sysfs_name = "mp1_err_count",
|
||||
};
|
||||
|
||||
static void mca_v3_0_mpio_query_ras_error_count(struct amdgpu_device *adev,
|
||||
void *ras_error_status)
|
||||
{
|
||||
amdgpu_mca_query_ras_error_count(adev,
|
||||
smnMCMPIO_STATUST0,
|
||||
ras_error_status);
|
||||
}
|
||||
|
||||
static int mca_v3_0_mpio_ras_late_init(struct amdgpu_device *adev)
|
||||
{
|
||||
return amdgpu_mca_ras_late_init(adev, &adev->mca.mpio);
|
||||
}
|
||||
|
||||
static void mca_v3_0_mpio_ras_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
amdgpu_mca_ras_fini(adev, &adev->mca.mpio);
|
||||
}
|
||||
|
||||
const struct amdgpu_mca_ras_funcs mca_v3_0_mpio_ras_funcs = {
|
||||
.ras_late_init = mca_v3_0_mpio_ras_late_init,
|
||||
.ras_fini = mca_v3_0_mpio_ras_fini,
|
||||
.query_ras_error_count = mca_v3_0_mpio_query_ras_error_count,
|
||||
.query_ras_error_address = NULL,
|
||||
.ras_block = AMDGPU_RAS_BLOCK__MPIO,
|
||||
.sysfs_name = "mpio_err_count",
|
||||
};
|
||||
|
||||
|
||||
static void mca_v3_0_init(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_mca *mca = &adev->mca;
|
||||
|
||||
mca->mp0.ras_funcs = &mca_v3_0_mp0_ras_funcs;
|
||||
mca->mp1.ras_funcs = &mca_v3_0_mp1_ras_funcs;
|
||||
mca->mpio.ras_funcs = &mca_v3_0_mpio_ras_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_mca_funcs mca_v3_0_funcs = {
|
||||
.init = mca_v3_0_init,
|
||||
};
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Copyright (C) 2021 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included
|
||||
* in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
|
||||
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#ifndef __MCA_V3_0_H__
|
||||
#define __MCA_V3_0_H__
|
||||
|
||||
extern const struct amdgpu_mca_funcs mca_v3_0_funcs;
|
||||
|
||||
#endif
|
|
@ -85,6 +85,11 @@
|
|||
#define mmRCC_DEV0_EPF0_STRAP0_ALDE 0x0015
|
||||
#define mmRCC_DEV0_EPF0_STRAP0_ALDE_BASE_IDX 2
|
||||
|
||||
#define mmBIF_DOORBELL_INT_CNTL_ALDE 0x3878
|
||||
#define mmBIF_DOORBELL_INT_CNTL_ALDE_BASE_IDX 2
|
||||
#define BIF_DOORBELL_INT_CNTL_ALDE__DOORBELL_INTERRUPT_DISABLE__SHIFT 0x18
|
||||
#define BIF_DOORBELL_INT_CNTL_ALDE__DOORBELL_INTERRUPT_DISABLE_MASK 0x01000000L
|
||||
|
||||
static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
|
||||
void *ras_error_status);
|
||||
|
||||
|
@ -346,14 +351,21 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
|
|||
struct ras_err_data err_data = {0, 0, 0, NULL};
|
||||
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
|
||||
|
||||
bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
|
||||
if (adev->asic_type == CHIP_ALDEBARAN)
|
||||
bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL_ALDE);
|
||||
else
|
||||
bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
|
||||
|
||||
if (REG_GET_FIELD(bif_doorbell_intr_cntl,
|
||||
BIF_DOORBELL_INT_CNTL, RAS_CNTLR_INTERRUPT_STATUS)) {
|
||||
/* driver has to clear the interrupt status when bif ring is disabled */
|
||||
bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
|
||||
BIF_DOORBELL_INT_CNTL,
|
||||
RAS_CNTLR_INTERRUPT_CLEAR, 1);
|
||||
WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
|
||||
if (adev->asic_type == CHIP_ALDEBARAN)
|
||||
WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL_ALDE, bif_doorbell_intr_cntl);
|
||||
else
|
||||
WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
|
||||
|
||||
if (!ras->disable_ras_err_cnt_harvest) {
|
||||
/*
|
||||
|
@ -395,14 +407,22 @@ static void nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_d
|
|||
{
|
||||
uint32_t bif_doorbell_intr_cntl;
|
||||
|
||||
bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
|
||||
if (adev->asic_type == CHIP_ALDEBARAN)
|
||||
bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL_ALDE);
|
||||
else
|
||||
bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
|
||||
|
||||
if (REG_GET_FIELD(bif_doorbell_intr_cntl,
|
||||
BIF_DOORBELL_INT_CNTL, RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) {
|
||||
/* driver has to clear the interrupt status when bif ring is disabled */
|
||||
bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
|
||||
BIF_DOORBELL_INT_CNTL,
|
||||
RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1);
|
||||
WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
|
||||
|
||||
if (adev->asic_type == CHIP_ALDEBARAN)
|
||||
WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL_ALDE, bif_doorbell_intr_cntl);
|
||||
else
|
||||
WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
|
||||
|
||||
amdgpu_ras_global_ras_isr(adev);
|
||||
}
|
||||
|
@ -572,7 +592,11 @@ static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
|
|||
static void nbio_v7_4_enable_doorbell_interrupt(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL,
|
||||
if (adev->asic_type == CHIP_ALDEBARAN)
|
||||
WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL_ALDE,
|
||||
DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
|
||||
else
|
||||
WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL,
|
||||
DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
|
||||
}
|
||||
|
||||
|
|
|
@ -1353,8 +1353,6 @@ static int soc15_common_early_init(void *handle)
|
|||
adev->asic_funcs = &vega20_asic_funcs;
|
||||
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
|
||||
AMD_CG_SUPPORT_GFX_MGLS |
|
||||
AMD_CG_SUPPORT_GFX_CGCG |
|
||||
AMD_CG_SUPPORT_GFX_CGLS |
|
||||
AMD_CG_SUPPORT_GFX_CP_LS |
|
||||
AMD_CG_SUPPORT_HDP_LS |
|
||||
AMD_CG_SUPPORT_SDMA_MGCG |
|
||||
|
|
|
@ -698,6 +698,30 @@ static int uvd_v3_1_hw_fini(void *handle)
|
|||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/*
|
||||
* Proper cleanups before halting the HW engine:
|
||||
* - cancel the delayed idle work
|
||||
* - enable powergating
|
||||
* - enable clockgating
|
||||
* - disable dpm
|
||||
*
|
||||
* TODO: to align with the VCN implementation, move the
|
||||
* jobs for clockgating/powergating/dpm setting to
|
||||
* ->set_powergating_state().
|
||||
*/
|
||||
cancel_delayed_work_sync(&adev->uvd.idle_work);
|
||||
|
||||
if (adev->pm.dpm_enabled) {
|
||||
amdgpu_dpm_enable_uvd(adev, false);
|
||||
} else {
|
||||
amdgpu_asic_set_uvd_clocks(adev, 0, 0);
|
||||
/* shutdown the UVD block */
|
||||
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_PG_STATE_GATE);
|
||||
amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_GATE);
|
||||
}
|
||||
|
||||
if (RREG32(mmUVD_STATUS) != 0)
|
||||
uvd_v3_1_stop(adev);
|
||||
|
||||
|
|
|
@ -212,6 +212,30 @@ static int uvd_v4_2_hw_fini(void *handle)
|
|||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/*
|
||||
* Proper cleanups before halting the HW engine:
|
||||
* - cancel the delayed idle work
|
||||
* - enable powergating
|
||||
* - enable clockgating
|
||||
* - disable dpm
|
||||
*
|
||||
* TODO: to align with the VCN implementation, move the
|
||||
* jobs for clockgating/powergating/dpm setting to
|
||||
* ->set_powergating_state().
|
||||
*/
|
||||
cancel_delayed_work_sync(&adev->uvd.idle_work);
|
||||
|
||||
if (adev->pm.dpm_enabled) {
|
||||
amdgpu_dpm_enable_uvd(adev, false);
|
||||
} else {
|
||||
amdgpu_asic_set_uvd_clocks(adev, 0, 0);
|
||||
/* shutdown the UVD block */
|
||||
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_PG_STATE_GATE);
|
||||
amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_GATE);
|
||||
}
|
||||
|
||||
if (RREG32(mmUVD_STATUS) != 0)
|
||||
uvd_v4_2_stop(adev);
|
||||
|
||||
|
|
|
@ -210,6 +210,30 @@ static int uvd_v5_0_hw_fini(void *handle)
|
|||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/*
|
||||
* Proper cleanups before halting the HW engine:
|
||||
* - cancel the delayed idle work
|
||||
* - enable powergating
|
||||
* - enable clockgating
|
||||
* - disable dpm
|
||||
*
|
||||
* TODO: to align with the VCN implementation, move the
|
||||
* jobs for clockgating/powergating/dpm setting to
|
||||
* ->set_powergating_state().
|
||||
*/
|
||||
cancel_delayed_work_sync(&adev->uvd.idle_work);
|
||||
|
||||
if (adev->pm.dpm_enabled) {
|
||||
amdgpu_dpm_enable_uvd(adev, false);
|
||||
} else {
|
||||
amdgpu_asic_set_uvd_clocks(adev, 0, 0);
|
||||
/* shutdown the UVD block */
|
||||
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_PG_STATE_GATE);
|
||||
amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_GATE);
|
||||
}
|
||||
|
||||
if (RREG32(mmUVD_STATUS) != 0)
|
||||
uvd_v5_0_stop(adev);
|
||||
|
||||
|
@ -224,7 +248,6 @@ static int uvd_v5_0_suspend(void *handle)
|
|||
r = uvd_v5_0_hw_fini(adev);
|
||||
if (r)
|
||||
return r;
|
||||
uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
|
||||
|
||||
return amdgpu_uvd_suspend(adev);
|
||||
}
|
||||
|
|
|
@ -543,6 +543,30 @@ static int uvd_v6_0_hw_fini(void *handle)
|
|||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/*
|
||||
* Proper cleanups before halting the HW engine:
|
||||
* - cancel the delayed idle work
|
||||
* - enable powergating
|
||||
* - enable clockgating
|
||||
* - disable dpm
|
||||
*
|
||||
* TODO: to align with the VCN implementation, move the
|
||||
* jobs for clockgating/powergating/dpm setting to
|
||||
* ->set_powergating_state().
|
||||
*/
|
||||
cancel_delayed_work_sync(&adev->uvd.idle_work);
|
||||
|
||||
if (adev->pm.dpm_enabled) {
|
||||
amdgpu_dpm_enable_uvd(adev, false);
|
||||
} else {
|
||||
amdgpu_asic_set_uvd_clocks(adev, 0, 0);
|
||||
/* shutdown the UVD block */
|
||||
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_PG_STATE_GATE);
|
||||
amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_GATE);
|
||||
}
|
||||
|
||||
if (RREG32(mmUVD_STATUS) != 0)
|
||||
uvd_v6_0_stop(adev);
|
||||
|
||||
|
|
|
@ -606,6 +606,30 @@ static int uvd_v7_0_hw_fini(void *handle)
|
|||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/*
|
||||
* Proper cleanups before halting the HW engine:
|
||||
* - cancel the delayed idle work
|
||||
* - enable powergating
|
||||
* - enable clockgating
|
||||
* - disable dpm
|
||||
*
|
||||
* TODO: to align with the VCN implementation, move the
|
||||
* jobs for clockgating/powergating/dpm setting to
|
||||
* ->set_powergating_state().
|
||||
*/
|
||||
cancel_delayed_work_sync(&adev->uvd.idle_work);
|
||||
|
||||
if (adev->pm.dpm_enabled) {
|
||||
amdgpu_dpm_enable_uvd(adev, false);
|
||||
} else {
|
||||
amdgpu_asic_set_uvd_clocks(adev, 0, 0);
|
||||
/* shutdown the UVD block */
|
||||
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_PG_STATE_GATE);
|
||||
amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_GATE);
|
||||
}
|
||||
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
uvd_v7_0_stop(adev);
|
||||
else {
|
||||
|
|
|
@ -477,6 +477,31 @@ static int vce_v2_0_hw_init(void *handle)
|
|||
|
||||
static int vce_v2_0_hw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/*
|
||||
* Proper cleanups before halting the HW engine:
|
||||
* - cancel the delayed idle work
|
||||
* - enable powergating
|
||||
* - enable clockgating
|
||||
* - disable dpm
|
||||
*
|
||||
* TODO: to align with the VCN implementation, move the
|
||||
* jobs for clockgating/powergating/dpm setting to
|
||||
* ->set_powergating_state().
|
||||
*/
|
||||
cancel_delayed_work_sync(&adev->vce.idle_work);
|
||||
|
||||
if (adev->pm.dpm_enabled) {
|
||||
amdgpu_dpm_enable_vce(adev, false);
|
||||
} else {
|
||||
amdgpu_asic_set_vce_clocks(adev, 0, 0);
|
||||
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_GATE);
|
||||
amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_GATE);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -490,6 +490,29 @@ static int vce_v3_0_hw_fini(void *handle)
|
|||
int r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/*
|
||||
* Proper cleanups before halting the HW engine:
|
||||
* - cancel the delayed idle work
|
||||
* - enable powergating
|
||||
* - enable clockgating
|
||||
* - disable dpm
|
||||
*
|
||||
* TODO: to align with the VCN implementation, move the
|
||||
* jobs for clockgating/powergating/dpm setting to
|
||||
* ->set_powergating_state().
|
||||
*/
|
||||
cancel_delayed_work_sync(&adev->vce.idle_work);
|
||||
|
||||
if (adev->pm.dpm_enabled) {
|
||||
amdgpu_dpm_enable_vce(adev, false);
|
||||
} else {
|
||||
amdgpu_asic_set_vce_clocks(adev, 0, 0);
|
||||
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_GATE);
|
||||
amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_GATE);
|
||||
}
|
||||
|
||||
r = vce_v3_0_wait_for_idle(handle);
|
||||
if (r)
|
||||
return r;
|
||||
|
|
|
@ -542,6 +542,29 @@ static int vce_v4_0_hw_fini(void *handle)
|
|||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/*
|
||||
* Proper cleanups before halting the HW engine:
|
||||
* - cancel the delayed idle work
|
||||
* - enable powergating
|
||||
* - enable clockgating
|
||||
* - disable dpm
|
||||
*
|
||||
* TODO: to align with the VCN implementation, move the
|
||||
* jobs for clockgating/powergating/dpm setting to
|
||||
* ->set_powergating_state().
|
||||
*/
|
||||
cancel_delayed_work_sync(&adev->vce.idle_work);
|
||||
|
||||
if (adev->pm.dpm_enabled) {
|
||||
amdgpu_dpm_enable_vce(adev, false);
|
||||
} else {
|
||||
amdgpu_asic_set_vce_clocks(adev, 0, 0);
|
||||
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_GATE);
|
||||
amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_GATE);
|
||||
}
|
||||
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
/* vce_v4_0_wait_for_idle(handle); */
|
||||
vce_v4_0_stop(adev);
|
||||
|
|
|
@ -98,36 +98,78 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
|
|||
uint32_t *se_mask)
|
||||
{
|
||||
struct kfd_cu_info cu_info;
|
||||
uint32_t cu_per_se[KFD_MAX_NUM_SE] = {0};
|
||||
int i, se, sh, cu = 0;
|
||||
|
||||
uint32_t cu_per_sh[KFD_MAX_NUM_SE][KFD_MAX_NUM_SH_PER_SE] = {0};
|
||||
int i, se, sh, cu;
|
||||
amdgpu_amdkfd_get_cu_info(mm->dev->kgd, &cu_info);
|
||||
|
||||
if (cu_mask_count > cu_info.cu_active_number)
|
||||
cu_mask_count = cu_info.cu_active_number;
|
||||
|
||||
/* Exceeding these bounds corrupts the stack and indicates a coding error.
|
||||
* Returning with no CU's enabled will hang the queue, which should be
|
||||
* attention grabbing.
|
||||
*/
|
||||
if (cu_info.num_shader_engines > KFD_MAX_NUM_SE) {
|
||||
pr_err("Exceeded KFD_MAX_NUM_SE, chip reports %d\n", cu_info.num_shader_engines);
|
||||
return;
|
||||
}
|
||||
if (cu_info.num_shader_arrays_per_engine > KFD_MAX_NUM_SH_PER_SE) {
|
||||
pr_err("Exceeded KFD_MAX_NUM_SH, chip reports %d\n",
|
||||
cu_info.num_shader_arrays_per_engine * cu_info.num_shader_engines);
|
||||
return;
|
||||
}
|
||||
/* Count active CUs per SH.
|
||||
*
|
||||
* Some CUs in an SH may be disabled. HW expects disabled CUs to be
|
||||
* represented in the high bits of each SH's enable mask (the upper and lower
|
||||
* 16 bits of se_mask) and will take care of the actual distribution of
|
||||
* disabled CUs within each SH automatically.
|
||||
* Each half of se_mask must be filled only on bits 0-cu_per_sh[se][sh]-1.
|
||||
*
|
||||
* See note on Arcturus cu_bitmap layout in gfx_v9_0_get_cu_info.
|
||||
*/
|
||||
for (se = 0; se < cu_info.num_shader_engines; se++)
|
||||
for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++)
|
||||
cu_per_se[se] += hweight32(cu_info.cu_bitmap[se % 4][sh + (se / 4)]);
|
||||
cu_per_sh[se][sh] = hweight32(cu_info.cu_bitmap[se % 4][sh + (se / 4)]);
|
||||
|
||||
/* Symmetrically map cu_mask to all SEs:
|
||||
* cu_mask[0] bit0 -> se_mask[0] bit0;
|
||||
* cu_mask[0] bit1 -> se_mask[1] bit0;
|
||||
* ... (if # SE is 4)
|
||||
* cu_mask[0] bit4 -> se_mask[0] bit1;
|
||||
/* Symmetrically map cu_mask to all SEs & SHs:
|
||||
* se_mask programs up to 2 SH in the upper and lower 16 bits.
|
||||
*
|
||||
* Examples
|
||||
* Assuming 1 SH/SE, 4 SEs:
|
||||
* cu_mask[0] bit0 -> se_mask[0] bit0
|
||||
* cu_mask[0] bit1 -> se_mask[1] bit0
|
||||
* ...
|
||||
* cu_mask[0] bit4 -> se_mask[0] bit1
|
||||
* ...
|
||||
*
|
||||
* Assuming 2 SH/SE, 4 SEs
|
||||
* cu_mask[0] bit0 -> se_mask[0] bit0 (SE0,SH0,CU0)
|
||||
* cu_mask[0] bit1 -> se_mask[1] bit0 (SE1,SH0,CU0)
|
||||
* ...
|
||||
* cu_mask[0] bit4 -> se_mask[0] bit16 (SE0,SH1,CU0)
|
||||
* cu_mask[0] bit5 -> se_mask[1] bit16 (SE1,SH1,CU0)
|
||||
* ...
|
||||
* cu_mask[0] bit8 -> se_mask[0] bit1 (SE0,SH0,CU1)
|
||||
* ...
|
||||
*
|
||||
* First ensure all CUs are disabled, then enable user specified CUs.
|
||||
*/
|
||||
se = 0;
|
||||
for (i = 0; i < cu_mask_count; i++) {
|
||||
if (cu_mask[i / 32] & (1 << (i % 32)))
|
||||
se_mask[se] |= 1 << cu;
|
||||
for (i = 0; i < cu_info.num_shader_engines; i++)
|
||||
se_mask[i] = 0;
|
||||
|
||||
do {
|
||||
se++;
|
||||
if (se == cu_info.num_shader_engines) {
|
||||
se = 0;
|
||||
cu++;
|
||||
i = 0;
|
||||
for (cu = 0; cu < 16; cu++) {
|
||||
for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++) {
|
||||
for (se = 0; se < cu_info.num_shader_engines; se++) {
|
||||
if (cu_per_sh[se][sh] > cu) {
|
||||
if (cu_mask[i / 32] & (1 << (i % 32)))
|
||||
se_mask[se] |= 1 << (cu + sh * 16);
|
||||
i++;
|
||||
if (i == cu_mask_count)
|
||||
return;
|
||||
}
|
||||
}
|
||||
} while (cu >= cu_per_se[se] && cu < 32);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include "kfd_priv.h"
|
||||
|
||||
#define KFD_MAX_NUM_SE 8
|
||||
#define KFD_MAX_NUM_SH_PER_SE 2
|
||||
|
||||
/**
|
||||
* struct mqd_manager
|
||||
|
|
|
@ -120,6 +120,7 @@ static void svm_range_remove_notifier(struct svm_range *prange)
|
|||
|
||||
static int
|
||||
svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
|
||||
unsigned long offset, unsigned long npages,
|
||||
unsigned long *hmm_pfns, uint32_t gpuidx)
|
||||
{
|
||||
enum dma_data_direction dir = DMA_BIDIRECTIONAL;
|
||||
|
@ -136,7 +137,8 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
|
|||
prange->dma_addr[gpuidx] = addr;
|
||||
}
|
||||
|
||||
for (i = 0; i < prange->npages; i++) {
|
||||
addr += offset;
|
||||
for (i = 0; i < npages; i++) {
|
||||
if (WARN_ONCE(addr[i] && !dma_mapping_error(dev, addr[i]),
|
||||
"leaking dma mapping\n"))
|
||||
dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
|
||||
|
@ -167,6 +169,7 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
|
|||
|
||||
static int
|
||||
svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
|
||||
unsigned long offset, unsigned long npages,
|
||||
unsigned long *hmm_pfns)
|
||||
{
|
||||
struct kfd_process *p;
|
||||
|
@ -187,7 +190,8 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
|
|||
}
|
||||
adev = (struct amdgpu_device *)pdd->dev->kgd;
|
||||
|
||||
r = svm_range_dma_map_dev(adev, prange, hmm_pfns, gpuidx);
|
||||
r = svm_range_dma_map_dev(adev, prange, offset, npages,
|
||||
hmm_pfns, gpuidx);
|
||||
if (r)
|
||||
break;
|
||||
}
|
||||
|
@ -1088,11 +1092,6 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange,
|
|||
pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
|
||||
|
||||
pte_flags |= amdgpu_gem_va_map_flags(adev, mapping_flags);
|
||||
|
||||
pr_debug("svms 0x%p [0x%lx 0x%lx] vram %d PTE 0x%llx mapping 0x%x\n",
|
||||
prange->svms, prange->start, prange->last,
|
||||
(domain == SVM_RANGE_VRAM_DOMAIN) ? 1:0, pte_flags, mapping_flags);
|
||||
|
||||
return pte_flags;
|
||||
}
|
||||
|
||||
|
@ -1156,7 +1155,8 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
|
|||
|
||||
static int
|
||||
svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
struct svm_range *prange, dma_addr_t *dma_addr,
|
||||
struct svm_range *prange, unsigned long offset,
|
||||
unsigned long npages, bool readonly, dma_addr_t *dma_addr,
|
||||
struct amdgpu_device *bo_adev, struct dma_fence **fence)
|
||||
{
|
||||
struct amdgpu_bo_va bo_va;
|
||||
|
@ -1167,14 +1167,15 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
int r = 0;
|
||||
int64_t i;
|
||||
|
||||
pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
|
||||
prange->last);
|
||||
last_start = prange->start + offset;
|
||||
|
||||
pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
|
||||
last_start, last_start + npages - 1, readonly);
|
||||
|
||||
if (prange->svm_bo && prange->ttm_res)
|
||||
bo_va.is_xgmi = amdgpu_xgmi_same_hive(adev, bo_adev);
|
||||
|
||||
last_start = prange->start;
|
||||
for (i = 0; i < prange->npages; i++) {
|
||||
for (i = offset; i < offset + npages; i++) {
|
||||
last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
|
||||
dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
|
||||
if ((prange->start + i) < prange->last &&
|
||||
|
@ -1183,13 +1184,21 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
|
||||
pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
|
||||
last_start, prange->start + i, last_domain ? "GPU" : "CPU");
|
||||
|
||||
pte_flags = svm_range_get_pte_flags(adev, prange, last_domain);
|
||||
r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false, NULL,
|
||||
last_start,
|
||||
if (readonly)
|
||||
pte_flags &= ~AMDGPU_PTE_WRITEABLE;
|
||||
|
||||
pr_debug("svms 0x%p map [0x%lx 0x%llx] vram %d PTE 0x%llx\n",
|
||||
prange->svms, last_start, prange->start + i,
|
||||
(last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
|
||||
pte_flags);
|
||||
|
||||
r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false,
|
||||
NULL, last_start,
|
||||
prange->start + i, pte_flags,
|
||||
last_start - prange->start,
|
||||
NULL,
|
||||
dma_addr,
|
||||
NULL, dma_addr,
|
||||
&vm->last_update,
|
||||
&table_freed);
|
||||
if (r) {
|
||||
|
@ -1220,8 +1229,10 @@ out:
|
|||
return r;
|
||||
}
|
||||
|
||||
static int svm_range_map_to_gpus(struct svm_range *prange,
|
||||
unsigned long *bitmap, bool wait)
|
||||
static int
|
||||
svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
|
||||
unsigned long npages, bool readonly,
|
||||
unsigned long *bitmap, bool wait)
|
||||
{
|
||||
struct kfd_process_device *pdd;
|
||||
struct amdgpu_device *bo_adev;
|
||||
|
@ -1257,7 +1268,8 @@ static int svm_range_map_to_gpus(struct svm_range *prange,
|
|||
}
|
||||
|
||||
r = svm_range_map_to_gpu(adev, drm_priv_to_vm(pdd->drm_priv),
|
||||
prange, prange->dma_addr[gpuidx],
|
||||
prange, offset, npages, readonly,
|
||||
prange->dma_addr[gpuidx],
|
||||
bo_adev, wait ? &fence : NULL);
|
||||
if (r)
|
||||
break;
|
||||
|
@ -1390,7 +1402,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
|
|||
int32_t gpuidx, bool intr, bool wait)
|
||||
{
|
||||
struct svm_validate_context ctx;
|
||||
struct hmm_range *hmm_range;
|
||||
unsigned long start, end, addr;
|
||||
struct kfd_process *p;
|
||||
void *owner;
|
||||
int32_t idx;
|
||||
|
@ -1448,40 +1460,66 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
|
|||
break;
|
||||
}
|
||||
}
|
||||
r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
|
||||
prange->start << PAGE_SHIFT,
|
||||
prange->npages, &hmm_range,
|
||||
false, true, owner);
|
||||
if (r) {
|
||||
pr_debug("failed %d to get svm range pages\n", r);
|
||||
goto unreserve_out;
|
||||
}
|
||||
|
||||
r = svm_range_dma_map(prange, ctx.bitmap,
|
||||
hmm_range->hmm_pfns);
|
||||
if (r) {
|
||||
pr_debug("failed %d to dma map range\n", r);
|
||||
goto unreserve_out;
|
||||
}
|
||||
start = prange->start << PAGE_SHIFT;
|
||||
end = (prange->last + 1) << PAGE_SHIFT;
|
||||
for (addr = start; addr < end && !r; ) {
|
||||
struct hmm_range *hmm_range;
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long next;
|
||||
unsigned long offset;
|
||||
unsigned long npages;
|
||||
bool readonly;
|
||||
|
||||
prange->validated_once = true;
|
||||
vma = find_vma(mm, addr);
|
||||
if (!vma || addr < vma->vm_start) {
|
||||
r = -EFAULT;
|
||||
goto unreserve_out;
|
||||
}
|
||||
readonly = !(vma->vm_flags & VM_WRITE);
|
||||
|
||||
svm_range_lock(prange);
|
||||
if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
|
||||
pr_debug("hmm update the range, need validate again\n");
|
||||
r = -EAGAIN;
|
||||
goto unlock_out;
|
||||
}
|
||||
if (!list_empty(&prange->child_list)) {
|
||||
pr_debug("range split by unmap in parallel, validate again\n");
|
||||
r = -EAGAIN;
|
||||
goto unlock_out;
|
||||
}
|
||||
next = min(vma->vm_end, end);
|
||||
npages = (next - addr) >> PAGE_SHIFT;
|
||||
r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
|
||||
addr, npages, &hmm_range,
|
||||
readonly, true, owner);
|
||||
if (r) {
|
||||
pr_debug("failed %d to get svm range pages\n", r);
|
||||
goto unreserve_out;
|
||||
}
|
||||
|
||||
r = svm_range_map_to_gpus(prange, ctx.bitmap, wait);
|
||||
offset = (addr - start) >> PAGE_SHIFT;
|
||||
r = svm_range_dma_map(prange, ctx.bitmap, offset, npages,
|
||||
hmm_range->hmm_pfns);
|
||||
if (r) {
|
||||
pr_debug("failed %d to dma map range\n", r);
|
||||
goto unreserve_out;
|
||||
}
|
||||
|
||||
svm_range_lock(prange);
|
||||
if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
|
||||
pr_debug("hmm update the range, need validate again\n");
|
||||
r = -EAGAIN;
|
||||
goto unlock_out;
|
||||
}
|
||||
if (!list_empty(&prange->child_list)) {
|
||||
pr_debug("range split by unmap in parallel, validate again\n");
|
||||
r = -EAGAIN;
|
||||
goto unlock_out;
|
||||
}
|
||||
|
||||
r = svm_range_map_to_gpus(prange, offset, npages, readonly,
|
||||
ctx.bitmap, wait);
|
||||
|
||||
unlock_out:
|
||||
svm_range_unlock(prange);
|
||||
svm_range_unlock(prange);
|
||||
|
||||
addr = next;
|
||||
}
|
||||
|
||||
if (addr == end)
|
||||
prange->validated_once = true;
|
||||
|
||||
unreserve_out:
|
||||
svm_range_unreserve_bos(&ctx);
|
||||
|
||||
|
@ -2400,9 +2438,29 @@ svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p,
|
|||
WRITE_ONCE(pdd->faults, pdd->faults + 1);
|
||||
}
|
||||
|
||||
static bool
|
||||
svm_fault_allowed(struct mm_struct *mm, uint64_t addr, bool write_fault)
|
||||
{
|
||||
unsigned long requested = VM_READ;
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
if (write_fault)
|
||||
requested |= VM_WRITE;
|
||||
|
||||
vma = find_vma(mm, addr << PAGE_SHIFT);
|
||||
if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) {
|
||||
pr_debug("address 0x%llx VMA is removed\n", addr);
|
||||
return true;
|
||||
}
|
||||
|
||||
pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested,
|
||||
vma->vm_flags);
|
||||
return (vma->vm_flags & requested) == requested;
|
||||
}
|
||||
|
||||
int
|
||||
svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
|
||||
uint64_t addr)
|
||||
uint64_t addr, bool write_fault)
|
||||
{
|
||||
struct mm_struct *mm = NULL;
|
||||
struct svm_range_list *svms;
|
||||
|
@ -2484,6 +2542,13 @@ retry_write_locked:
|
|||
goto out_unlock_range;
|
||||
}
|
||||
|
||||
if (!svm_fault_allowed(mm, addr, write_fault)) {
|
||||
pr_debug("fault addr 0x%llx no %s permission\n", addr,
|
||||
write_fault ? "write" : "read");
|
||||
r = -EPERM;
|
||||
goto out_unlock_range;
|
||||
}
|
||||
|
||||
best_loc = svm_range_best_restore_location(prange, adev, &gpuidx);
|
||||
if (best_loc == -1) {
|
||||
pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n",
|
||||
|
|
|
@ -175,7 +175,7 @@ int svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
|
|||
unsigned long addr, struct svm_range *parent,
|
||||
struct svm_range *prange);
|
||||
int svm_range_restore_pages(struct amdgpu_device *adev,
|
||||
unsigned int pasid, uint64_t addr);
|
||||
unsigned int pasid, uint64_t addr, bool write_fault);
|
||||
int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence);
|
||||
void svm_range_add_list_work(struct svm_range_list *svms,
|
||||
struct svm_range *prange, struct mm_struct *mm,
|
||||
|
@ -209,7 +209,8 @@ static inline void svm_range_list_fini(struct kfd_process *p)
|
|||
}
|
||||
|
||||
static inline int svm_range_restore_pages(struct amdgpu_device *adev,
|
||||
unsigned int pasid, uint64_t addr)
|
||||
unsigned int pasid, uint64_t addr,
|
||||
bool write_fault)
|
||||
{
|
||||
return -EFAULT;
|
||||
}
|
||||
|
|
|
@ -522,16 +522,21 @@ void enc1_stream_encoder_hdmi_set_stream_attribute(
|
|||
switch (crtc_timing->display_color_depth) {
|
||||
case COLOR_DEPTH_888:
|
||||
REG_UPDATE(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
|
||||
DC_LOG_DEBUG("HDMI source set to 24BPP deep color depth\n");
|
||||
break;
|
||||
case COLOR_DEPTH_101010:
|
||||
if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
|
||||
REG_UPDATE_2(HDMI_CONTROL,
|
||||
HDMI_DEEP_COLOR_DEPTH, 1,
|
||||
HDMI_DEEP_COLOR_ENABLE, 0);
|
||||
DC_LOG_DEBUG("HDMI source 30BPP deep color depth" \
|
||||
"disabled for YCBCR422 pixel encoding\n");
|
||||
} else {
|
||||
REG_UPDATE_2(HDMI_CONTROL,
|
||||
HDMI_DEEP_COLOR_DEPTH, 1,
|
||||
HDMI_DEEP_COLOR_ENABLE, 1);
|
||||
DC_LOG_DEBUG("HDMI source 30BPP deep color depth" \
|
||||
"enabled for YCBCR422 non-pixel encoding\n");
|
||||
}
|
||||
break;
|
||||
case COLOR_DEPTH_121212:
|
||||
|
@ -539,16 +544,22 @@ void enc1_stream_encoder_hdmi_set_stream_attribute(
|
|||
REG_UPDATE_2(HDMI_CONTROL,
|
||||
HDMI_DEEP_COLOR_DEPTH, 2,
|
||||
HDMI_DEEP_COLOR_ENABLE, 0);
|
||||
DC_LOG_DEBUG("HDMI source 36BPP deep color depth" \
|
||||
"disabled for YCBCR422 pixel encoding\n");
|
||||
} else {
|
||||
REG_UPDATE_2(HDMI_CONTROL,
|
||||
HDMI_DEEP_COLOR_DEPTH, 2,
|
||||
HDMI_DEEP_COLOR_ENABLE, 1);
|
||||
DC_LOG_DEBUG("HDMI source 36BPP deep color depth" \
|
||||
"enabled for non-pixel YCBCR422 encoding\n");
|
||||
}
|
||||
break;
|
||||
case COLOR_DEPTH_161616:
|
||||
REG_UPDATE_2(HDMI_CONTROL,
|
||||
HDMI_DEEP_COLOR_DEPTH, 3,
|
||||
HDMI_DEEP_COLOR_ENABLE, 1);
|
||||
DC_LOG_DEBUG("HDMI source deep color depth enabled in" \
|
||||
"reserved mode\n");
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
|
|
@ -92,7 +92,7 @@
|
|||
#define DC_LOGGER_INIT(logger)
|
||||
|
||||
struct _vcs_dpi_ip_params_st dcn3_0_ip = {
|
||||
.use_min_dcfclk = 1,
|
||||
.use_min_dcfclk = 0,
|
||||
.clamp_min_dcfclk = 0,
|
||||
.odm_capable = 1,
|
||||
.gpuvm_enable = 0,
|
||||
|
@ -2398,16 +2398,37 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
|
|||
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
|
||||
|
||||
if (bw_params->clk_table.entries[0].memclk_mhz) {
|
||||
int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0;
|
||||
|
||||
if (bw_params->clk_table.entries[1].dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
|
||||
for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
|
||||
if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
|
||||
max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
|
||||
if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
|
||||
max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
|
||||
if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
|
||||
max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
|
||||
if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
|
||||
max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
|
||||
}
|
||||
|
||||
if (!max_dcfclk_mhz)
|
||||
max_dcfclk_mhz = dcn3_0_soc.clock_limits[0].dcfclk_mhz;
|
||||
if (!max_dispclk_mhz)
|
||||
max_dispclk_mhz = dcn3_0_soc.clock_limits[0].dispclk_mhz;
|
||||
if (!max_dppclk_mhz)
|
||||
max_dppclk_mhz = dcn3_0_soc.clock_limits[0].dppclk_mhz;
|
||||
if (!max_phyclk_mhz)
|
||||
max_phyclk_mhz = dcn3_0_soc.clock_limits[0].phyclk_mhz;
|
||||
|
||||
if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
|
||||
// If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array
|
||||
dcfclk_sta_targets[num_dcfclk_sta_targets] = bw_params->clk_table.entries[1].dcfclk_mhz;
|
||||
dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz;
|
||||
num_dcfclk_sta_targets++;
|
||||
} else if (bw_params->clk_table.entries[1].dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
|
||||
} else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
|
||||
// If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates
|
||||
for (i = 0; i < num_dcfclk_sta_targets; i++) {
|
||||
if (dcfclk_sta_targets[i] > bw_params->clk_table.entries[1].dcfclk_mhz) {
|
||||
dcfclk_sta_targets[i] = bw_params->clk_table.entries[1].dcfclk_mhz;
|
||||
if (dcfclk_sta_targets[i] > max_dcfclk_mhz) {
|
||||
dcfclk_sta_targets[i] = max_dcfclk_mhz;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -2447,7 +2468,7 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
|
|||
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
|
||||
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
|
||||
} else {
|
||||
if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= bw_params->clk_table.entries[1].dcfclk_mhz) {
|
||||
if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
|
||||
dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
|
||||
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
|
||||
} else {
|
||||
|
@ -2462,11 +2483,12 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
|
|||
}
|
||||
|
||||
while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES &&
|
||||
optimal_dcfclk_for_uclk[j] <= bw_params->clk_table.entries[1].dcfclk_mhz) {
|
||||
optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
|
||||
dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
|
||||
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
|
||||
}
|
||||
|
||||
dcn3_0_soc.num_states = num_states;
|
||||
for (i = 0; i < dcn3_0_soc.num_states; i++) {
|
||||
dcn3_0_soc.clock_limits[i].state = i;
|
||||
dcn3_0_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i];
|
||||
|
@ -2474,9 +2496,9 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
|
|||
dcn3_0_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];
|
||||
|
||||
/* Fill all states with max values of all other clocks */
|
||||
dcn3_0_soc.clock_limits[i].dispclk_mhz = bw_params->clk_table.entries[1].dispclk_mhz;
|
||||
dcn3_0_soc.clock_limits[i].dppclk_mhz = bw_params->clk_table.entries[1].dppclk_mhz;
|
||||
dcn3_0_soc.clock_limits[i].phyclk_mhz = bw_params->clk_table.entries[1].phyclk_mhz;
|
||||
dcn3_0_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
|
||||
dcn3_0_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz;
|
||||
dcn3_0_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz;
|
||||
dcn3_0_soc.clock_limits[i].dtbclk_mhz = dcn3_0_soc.clock_limits[0].dtbclk_mhz;
|
||||
/* These clocks cannot come from bw_params, always fill from dcn3_0_soc[1] */
|
||||
/* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */
|
||||
|
@ -2489,11 +2511,6 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
|
|||
if (dc->current_state)
|
||||
dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
|
||||
}
|
||||
|
||||
/* re-init DML with updated bb */
|
||||
dml_init_instance(&dc->dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
|
||||
if (dc->current_state)
|
||||
dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
|
||||
}
|
||||
|
||||
static const struct resource_funcs dcn30_res_pool_funcs = {
|
||||
|
|
|
@ -610,20 +610,3 @@ bool dcn31_is_abm_supported(struct dc *dc,
|
|||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void apply_riommu_invalidation_wa(struct dc *dc)
|
||||
{
|
||||
struct dce_hwseq *hws = dc->hwseq;
|
||||
|
||||
if (!hws->wa.early_riommu_invalidation)
|
||||
return;
|
||||
|
||||
REG_UPDATE(DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, 0);
|
||||
}
|
||||
|
||||
void dcn31_init_pipes(struct dc *dc, struct dc_state *context)
|
||||
{
|
||||
dcn10_init_pipes(dc, context);
|
||||
apply_riommu_invalidation_wa(dc);
|
||||
|
||||
}
|
||||
|
|
|
@ -104,7 +104,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
|
|||
};
|
||||
|
||||
static const struct hwseq_private_funcs dcn31_private_funcs = {
|
||||
.init_pipes = dcn31_init_pipes,
|
||||
.init_pipes = dcn10_init_pipes,
|
||||
.update_plane_addr = dcn20_update_plane_addr,
|
||||
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
|
||||
.update_mpcc = dcn20_update_mpcc,
|
||||
|
|
|
@ -1302,7 +1302,6 @@ static struct dce_hwseq *dcn31_hwseq_create(
|
|||
hws->regs = &hwseq_reg;
|
||||
hws->shifts = &hwseq_shift;
|
||||
hws->masks = &hwseq_mask;
|
||||
hws->wa.early_riommu_invalidation = true;
|
||||
}
|
||||
return hws;
|
||||
}
|
||||
|
|
|
@ -3644,8 +3644,7 @@ static double TruncToValidBPP(
|
|||
void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
|
||||
{
|
||||
struct vba_vars_st *v = &mode_lib->vba;
|
||||
int MinPrefetchMode = 0;
|
||||
int MaxPrefetchMode = 2;
|
||||
int MinPrefetchMode, MaxPrefetchMode;
|
||||
int i;
|
||||
unsigned int j, k, m;
|
||||
bool EnoughWritebackUnits = true;
|
||||
|
@ -3657,6 +3656,10 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|
|||
|
||||
/*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/
|
||||
|
||||
CalculateMinAndMaxPrefetchMode(
|
||||
mode_lib->vba.AllowDRAMSelfRefreshOrDRAMClockChangeInVblank,
|
||||
&MinPrefetchMode, &MaxPrefetchMode);
|
||||
|
||||
/*Scale Ratio, taps Support Check*/
|
||||
|
||||
v->ScaleRatioAndTapsSupport = true;
|
||||
|
|
|
@ -244,6 +244,8 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib)
|
|||
mode_lib->vba.DRAMClockChangeSupportsVActive = !soc->disable_dram_clock_change_vactive_support ||
|
||||
mode_lib->vba.DummyPStateCheck;
|
||||
mode_lib->vba.AllowDramClockChangeOneDisplayVactive = soc->allow_dram_clock_one_display_vactive;
|
||||
mode_lib->vba.AllowDRAMSelfRefreshOrDRAMClockChangeInVblank =
|
||||
soc->allow_dram_self_refresh_or_dram_clock_change_in_vblank;
|
||||
|
||||
mode_lib->vba.Downspreading = soc->downspread_percent;
|
||||
mode_lib->vba.DRAMChannelWidth = soc->dram_channel_width_bytes; // new!
|
||||
|
@ -733,8 +735,6 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
|
|||
mode_lib->vba.OverrideHostVMPageTableLevels;
|
||||
}
|
||||
|
||||
mode_lib->vba.AllowDRAMSelfRefreshOrDRAMClockChangeInVblank = dm_try_to_allow_self_refresh_and_mclk_switch;
|
||||
|
||||
if (mode_lib->vba.OverrideGPUVMPageTableLevels)
|
||||
mode_lib->vba.GPUVMMaxPageTableLevels = mode_lib->vba.OverrideGPUVMPageTableLevels;
|
||||
|
||||
|
|
|
@ -41,7 +41,6 @@ struct dce_hwseq_wa {
|
|||
bool DEGVIDCN10_254;
|
||||
bool DEGVIDCN21;
|
||||
bool disallow_self_refresh_during_multi_plane_transition;
|
||||
bool early_riommu_invalidation;
|
||||
};
|
||||
|
||||
struct hwseq_wa_state {
|
||||
|
|
|
@ -785,7 +785,7 @@ int radeon_dummy_page_init(struct radeon_device *rdev)
|
|||
if (rdev->dummy_page.page == NULL)
|
||||
return -ENOMEM;
|
||||
rdev->dummy_page.addr = dma_map_page(&rdev->pdev->dev, rdev->dummy_page.page,
|
||||
0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
0, PAGE_SIZE, DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(&rdev->pdev->dev, rdev->dummy_page.addr)) {
|
||||
dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
|
||||
__free_page(rdev->dummy_page.page);
|
||||
|
@ -808,8 +808,8 @@ void radeon_dummy_page_fini(struct radeon_device *rdev)
|
|||
{
|
||||
if (rdev->dummy_page.page == NULL)
|
||||
return;
|
||||
pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
dma_unmap_page(&rdev->pdev->dev, rdev->dummy_page.addr, PAGE_SIZE,
|
||||
DMA_BIDIRECTIONAL);
|
||||
__free_page(rdev->dummy_page.page);
|
||||
rdev->dummy_page.page = NULL;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue