Merge branch 'drm-next-4.14' of git://people.freedesktop.org/~agd5f/linux into drm-next
More features for 4.14. Nothing too major here. I have a few more additional patches for large page support in vega10 among other things, but they require some resevation object patches from drm-misc-next, so I'll send that request once you've pulled the latest drm-misc-next. Highlights: - Fixes for ACP audio on stoney - SR-IOV fixes for vega10 - various powerplay fixes - lots of code clean up * 'drm-next-4.14' of git://people.freedesktop.org/~agd5f/linux: (62 commits) drm/amdgpu/gfx7: fix function name drm/amd/amdgpu: Disabling Power Gating for Stoney platform drm/amd/amdgpu: Added a quirk for Stoney platform drm/amdgpu: jt_size was wrongly counted twice drm/amdgpu: fix missing endian-safe guard drm/amdgpu: ignore digest_size when loading sdma fw for raven drm/amdgpu: Uninitialized variable in amdgpu_ttm_backend_bind() drm/amd/powerplay: fix coding style in hwmgr.c drm/amd/powerplay: refine dmesg info under powerplay. drm/amdgpu: don't finish the ring if not initialized drm/radeon: Fix preferred typo drm/amdgpu: Fix preferred typo drm/radeon: Fix stolen typo drm/amdgpu: Fix stolen typo drm/amd/powerplay: fix coccinelle warnings in vega10_hwmgr.c drm/amdgpu: set gfx_v9_0_ip_funcs as static drm/radeon: switch to drm_*{get,put} helpers drm/amdgpu: switch to drm_*{get,put} helpers drm/amd/powerplay: add CZ profile support drm/amd/powerplay: fix PSI not enabled by kmd ...
This commit is contained in:
commit
2040c47361
|
@ -373,78 +373,10 @@ struct amdgpu_clock {
|
|||
};
|
||||
|
||||
/*
|
||||
* BO.
|
||||
* GEM.
|
||||
*/
|
||||
struct amdgpu_bo_list_entry {
|
||||
struct amdgpu_bo *robj;
|
||||
struct ttm_validate_buffer tv;
|
||||
struct amdgpu_bo_va *bo_va;
|
||||
uint32_t priority;
|
||||
struct page **user_pages;
|
||||
int user_invalidated;
|
||||
};
|
||||
|
||||
struct amdgpu_bo_va_mapping {
|
||||
struct list_head list;
|
||||
struct rb_node rb;
|
||||
uint64_t start;
|
||||
uint64_t last;
|
||||
uint64_t __subtree_last;
|
||||
uint64_t offset;
|
||||
uint64_t flags;
|
||||
};
|
||||
|
||||
/* bo virtual addresses in a specific vm */
|
||||
struct amdgpu_bo_va {
|
||||
/* protected by bo being reserved */
|
||||
struct list_head bo_list;
|
||||
struct dma_fence *last_pt_update;
|
||||
unsigned ref_count;
|
||||
|
||||
/* protected by vm mutex and spinlock */
|
||||
struct list_head vm_status;
|
||||
|
||||
/* mappings for this bo_va */
|
||||
struct list_head invalids;
|
||||
struct list_head valids;
|
||||
|
||||
/* constant after initialization */
|
||||
struct amdgpu_vm *vm;
|
||||
struct amdgpu_bo *bo;
|
||||
};
|
||||
|
||||
#define AMDGPU_GEM_DOMAIN_MAX 0x3
|
||||
|
||||
struct amdgpu_bo {
|
||||
/* Protected by tbo.reserved */
|
||||
u32 prefered_domains;
|
||||
u32 allowed_domains;
|
||||
struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
|
||||
struct ttm_placement placement;
|
||||
struct ttm_buffer_object tbo;
|
||||
struct ttm_bo_kmap_obj kmap;
|
||||
u64 flags;
|
||||
unsigned pin_count;
|
||||
void *kptr;
|
||||
u64 tiling_flags;
|
||||
u64 metadata_flags;
|
||||
void *metadata;
|
||||
u32 metadata_size;
|
||||
unsigned prime_shared_count;
|
||||
/* list of all virtual address to which this bo
|
||||
* is associated to
|
||||
*/
|
||||
struct list_head va;
|
||||
/* Constant after initialization */
|
||||
struct drm_gem_object gem_base;
|
||||
struct amdgpu_bo *parent;
|
||||
struct amdgpu_bo *shadow;
|
||||
|
||||
struct ttm_bo_kmap_obj dma_buf_vmap;
|
||||
struct amdgpu_mn *mn;
|
||||
struct list_head mn_list;
|
||||
struct list_head shadow_list;
|
||||
};
|
||||
#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
|
||||
|
||||
void amdgpu_gem_object_free(struct drm_gem_object *obj);
|
||||
|
@ -678,15 +610,15 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
|
|||
/* overlap the doorbell assignment with VCN as they are mutually exclusive
|
||||
* VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
|
||||
*/
|
||||
AMDGPU_DOORBELL64_RING0_1 = 0xF8,
|
||||
AMDGPU_DOORBELL64_RING2_3 = 0xF9,
|
||||
AMDGPU_DOORBELL64_RING4_5 = 0xFA,
|
||||
AMDGPU_DOORBELL64_RING6_7 = 0xFB,
|
||||
AMDGPU_DOORBELL64_UVD_RING0_1 = 0xF8,
|
||||
AMDGPU_DOORBELL64_UVD_RING2_3 = 0xF9,
|
||||
AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFA,
|
||||
AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFB,
|
||||
|
||||
AMDGPU_DOORBELL64_UVD_RING0_1 = 0xFC,
|
||||
AMDGPU_DOORBELL64_UVD_RING2_3 = 0xFD,
|
||||
AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFE,
|
||||
AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFF,
|
||||
AMDGPU_DOORBELL64_VCE_RING0_1 = 0xFC,
|
||||
AMDGPU_DOORBELL64_VCE_RING2_3 = 0xFD,
|
||||
AMDGPU_DOORBELL64_VCE_RING4_5 = 0xFE,
|
||||
AMDGPU_DOORBELL64_VCE_RING6_7 = 0xFF,
|
||||
|
||||
AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF,
|
||||
AMDGPU_DOORBELL64_INVALID = 0xFFFF
|
||||
|
@ -825,6 +757,14 @@ struct amdgpu_fpriv {
|
|||
/*
|
||||
* residency list
|
||||
*/
|
||||
struct amdgpu_bo_list_entry {
|
||||
struct amdgpu_bo *robj;
|
||||
struct ttm_validate_buffer tv;
|
||||
struct amdgpu_bo_va *bo_va;
|
||||
uint32_t priority;
|
||||
struct page **user_pages;
|
||||
int user_invalidated;
|
||||
};
|
||||
|
||||
struct amdgpu_bo_list {
|
||||
struct mutex lock;
|
||||
|
@ -1191,10 +1131,6 @@ struct amdgpu_wb {
|
|||
|
||||
int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb);
|
||||
void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb);
|
||||
int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb);
|
||||
int amdgpu_wb_get_256Bit(struct amdgpu_device *adev, u32 *wb);
|
||||
void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb);
|
||||
void amdgpu_wb_free_256bit(struct amdgpu_device *adev, u32 wb);
|
||||
|
||||
void amdgpu_get_pcie_info(struct amdgpu_device *adev);
|
||||
|
||||
|
@ -1488,7 +1424,7 @@ struct amdgpu_device {
|
|||
bool is_atom_fw;
|
||||
uint8_t *bios;
|
||||
uint32_t bios_size;
|
||||
struct amdgpu_bo *stollen_vga_memory;
|
||||
struct amdgpu_bo *stolen_vga_memory;
|
||||
uint32_t bios_scratch_reg_offset;
|
||||
uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
|
||||
|
||||
|
|
|
@ -285,19 +285,20 @@ static int acp_hw_init(void *handle)
|
|||
return 0;
|
||||
else if (r)
|
||||
return r;
|
||||
if (adev->asic_type != CHIP_STONEY) {
|
||||
adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
|
||||
if (adev->acp.acp_genpd == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
|
||||
if (adev->acp.acp_genpd == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
|
||||
adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
|
||||
adev->acp.acp_genpd->gpd.power_on = acp_poweron;
|
||||
adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
|
||||
adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
|
||||
adev->acp.acp_genpd->gpd.power_on = acp_poweron;
|
||||
|
||||
|
||||
adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device;
|
||||
adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device;
|
||||
|
||||
pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
|
||||
pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
|
||||
}
|
||||
|
||||
adev->acp.acp_cell = kzalloc(sizeof(struct mfd_cell) * ACP_DEVS,
|
||||
GFP_KERNEL);
|
||||
|
@ -319,14 +320,29 @@ static int acp_hw_init(void *handle)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_STONEY:
|
||||
i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
|
||||
DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
|
||||
break;
|
||||
default:
|
||||
i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
|
||||
}
|
||||
i2s_pdata[0].cap = DWC_I2S_PLAY;
|
||||
i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
|
||||
i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET;
|
||||
i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET;
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_STONEY:
|
||||
i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
|
||||
DW_I2S_QUIRK_COMP_PARAM1 |
|
||||
DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
|
||||
break;
|
||||
default:
|
||||
i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
|
||||
DW_I2S_QUIRK_COMP_PARAM1;
|
||||
}
|
||||
|
||||
i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
|
||||
DW_I2S_QUIRK_COMP_PARAM1;
|
||||
i2s_pdata[1].cap = DWC_I2S_RECORD;
|
||||
i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000;
|
||||
i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
|
||||
|
@ -373,12 +389,14 @@ static int acp_hw_init(void *handle)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
for (i = 0; i < ACP_DEVS ; i++) {
|
||||
dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
|
||||
r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
|
||||
if (r) {
|
||||
dev_err(dev, "Failed to add dev to genpd\n");
|
||||
return r;
|
||||
if (adev->asic_type != CHIP_STONEY) {
|
||||
for (i = 0; i < ACP_DEVS ; i++) {
|
||||
dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
|
||||
r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
|
||||
if (r) {
|
||||
dev_err(dev, "Failed to add dev to genpd\n");
|
||||
return r;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -398,20 +416,22 @@ static int acp_hw_fini(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/* return early if no ACP */
|
||||
if (!adev->acp.acp_genpd)
|
||||
if (!adev->acp.acp_cell)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < ACP_DEVS ; i++) {
|
||||
dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
|
||||
ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev);
|
||||
/* If removal fails, dont giveup and try rest */
|
||||
if (ret)
|
||||
dev_err(dev, "remove dev from genpd failed\n");
|
||||
if (adev->acp.acp_genpd) {
|
||||
for (i = 0; i < ACP_DEVS ; i++) {
|
||||
dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
|
||||
ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev);
|
||||
/* If removal fails, dont giveup and try rest */
|
||||
if (ret)
|
||||
dev_err(dev, "remove dev from genpd failed\n");
|
||||
}
|
||||
kfree(adev->acp.acp_genpd);
|
||||
}
|
||||
|
||||
mfd_remove_devices(adev->acp.parent);
|
||||
kfree(adev->acp.acp_res);
|
||||
kfree(adev->acp.acp_genpd);
|
||||
kfree(adev->acp.acp_cell);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -30,10 +30,10 @@
|
|||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_pm.h"
|
||||
#include "amd_acpi.h"
|
||||
#include "atom.h"
|
||||
|
||||
extern void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
|
||||
/* Call the ATIF method
|
||||
*/
|
||||
/**
|
||||
|
@ -289,7 +289,7 @@ out:
|
|||
* handles it.
|
||||
* Returns NOTIFY code
|
||||
*/
|
||||
int amdgpu_atif_handler(struct amdgpu_device *adev,
|
||||
static int amdgpu_atif_handler(struct amdgpu_device *adev,
|
||||
struct acpi_bus_event *event)
|
||||
{
|
||||
struct amdgpu_atif *atif = &adev->atif;
|
||||
|
|
|
@ -27,7 +27,6 @@
|
|||
#include "amdgpu_gfx.h"
|
||||
#include <linux/module.h>
|
||||
|
||||
const struct kfd2kgd_calls *kfd2kgd;
|
||||
const struct kgd2kfd_calls *kgd2kfd;
|
||||
bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
|
||||
|
||||
|
@ -61,24 +60,6 @@ int amdgpu_amdkfd_init(void)
|
|||
return ret;
|
||||
}
|
||||
|
||||
bool amdgpu_amdkfd_load_interface(struct amdgpu_device *adev)
|
||||
{
|
||||
switch (adev->asic_type) {
|
||||
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||
case CHIP_KAVERI:
|
||||
kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
|
||||
break;
|
||||
#endif
|
||||
case CHIP_CARRIZO:
|
||||
kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void amdgpu_amdkfd_fini(void)
|
||||
{
|
||||
if (kgd2kfd) {
|
||||
|
@ -89,9 +70,27 @@ void amdgpu_amdkfd_fini(void)
|
|||
|
||||
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
|
||||
{
|
||||
if (kgd2kfd)
|
||||
adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev,
|
||||
adev->pdev, kfd2kgd);
|
||||
const struct kfd2kgd_calls *kfd2kgd;
|
||||
|
||||
if (!kgd2kfd)
|
||||
return;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||
case CHIP_KAVERI:
|
||||
kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
|
||||
break;
|
||||
#endif
|
||||
case CHIP_CARRIZO:
|
||||
kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
|
||||
break;
|
||||
default:
|
||||
dev_info(adev->dev, "kfd not supported on this ASIC\n");
|
||||
return;
|
||||
}
|
||||
|
||||
adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev,
|
||||
adev->pdev, kfd2kgd);
|
||||
}
|
||||
|
||||
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
|
||||
|
@ -184,7 +183,8 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
|
|||
return -ENOMEM;
|
||||
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
|
||||
AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &(*mem)->bo);
|
||||
AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, 0,
|
||||
&(*mem)->bo);
|
||||
if (r) {
|
||||
dev_err(adev->dev,
|
||||
"failed to allocate BO for amdkfd (%d)\n", r);
|
||||
|
|
|
@ -39,8 +39,6 @@ struct kgd_mem {
|
|||
int amdgpu_amdkfd_init(void);
|
||||
void amdgpu_amdkfd_fini(void);
|
||||
|
||||
bool amdgpu_amdkfd_load_interface(struct amdgpu_device *adev);
|
||||
|
||||
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev);
|
||||
int amdgpu_amdkfd_resume(struct amdgpu_device *adev);
|
||||
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
|
||||
|
|
|
@ -81,7 +81,7 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
|
|||
|
||||
n = AMDGPU_BENCHMARK_ITERATIONS;
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL,
|
||||
NULL, &sobj);
|
||||
NULL, 0, &sobj);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
@ -94,7 +94,7 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
|
|||
goto out_cleanup;
|
||||
}
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL,
|
||||
NULL, &dobj);
|
||||
NULL, 0, &dobj);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
|
|
@ -136,7 +136,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
|
|||
}
|
||||
|
||||
bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
|
||||
usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
|
||||
if (usermm) {
|
||||
|
@ -156,11 +156,11 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
|
|||
entry->tv.bo = &entry->robj->tbo;
|
||||
entry->tv.shared = !entry->robj->prime_shared_count;
|
||||
|
||||
if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS)
|
||||
if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
|
||||
gds_obj = entry->robj;
|
||||
if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GWS)
|
||||
if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GWS)
|
||||
gws_obj = entry->robj;
|
||||
if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_OA)
|
||||
if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_OA)
|
||||
oa_obj = entry->robj;
|
||||
|
||||
total_size += amdgpu_bo_size(entry->robj);
|
||||
|
@ -270,7 +270,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
|
|||
struct amdgpu_fpriv *fpriv = filp->driver_priv;
|
||||
union drm_amdgpu_bo_list *args = data;
|
||||
uint32_t handle = args->in.list_handle;
|
||||
const void __user *uptr = (const void*)(uintptr_t)args->in.bo_info_ptr;
|
||||
const void __user *uptr = u64_to_user_ptr(args->in.bo_info_ptr);
|
||||
|
||||
struct drm_amdgpu_bo_list_entry *info;
|
||||
struct amdgpu_bo_list *list;
|
||||
|
|
|
@ -124,7 +124,7 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
|
|||
ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
|
||||
true, domain, flags,
|
||||
NULL, &placement, NULL,
|
||||
&obj);
|
||||
0, &obj);
|
||||
if (ret) {
|
||||
DRM_ERROR("(%d) bo create failed\n", ret);
|
||||
return ret;
|
||||
|
@ -166,7 +166,7 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h
|
|||
r = amdgpu_bo_reserve(obj, true);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
r = amdgpu_bo_pin_restricted(obj, obj->prefered_domains,
|
||||
r = amdgpu_bo_pin_restricted(obj, obj->preferred_domains,
|
||||
min_offset, max_offset, mcaddr);
|
||||
amdgpu_bo_unreserve(obj);
|
||||
return r;
|
||||
|
@ -659,7 +659,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
|
|||
info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
|
||||
|
||||
if (CGS_UCODE_ID_CP_MEC == type)
|
||||
info->image_size = (header->jt_offset) << 2;
|
||||
info->image_size = le32_to_cpu(header->jt_offset) << 2;
|
||||
|
||||
info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
|
||||
info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
|
||||
|
|
|
@ -54,7 +54,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
|
|||
|
||||
*offset = data->offset;
|
||||
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
|
||||
if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
|
||||
amdgpu_bo_unref(&p->uf_entry.robj);
|
||||
|
@ -90,7 +90,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
|
|||
}
|
||||
|
||||
/* get chunks */
|
||||
chunk_array_user = (uint64_t __user *)(uintptr_t)(cs->in.chunks);
|
||||
chunk_array_user = u64_to_user_ptr(cs->in.chunks);
|
||||
if (copy_from_user(chunk_array, chunk_array_user,
|
||||
sizeof(uint64_t)*cs->in.num_chunks)) {
|
||||
ret = -EFAULT;
|
||||
|
@ -110,7 +110,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
|
|||
struct drm_amdgpu_cs_chunk user_chunk;
|
||||
uint32_t __user *cdata;
|
||||
|
||||
chunk_ptr = (void __user *)(uintptr_t)chunk_array[i];
|
||||
chunk_ptr = u64_to_user_ptr(chunk_array[i]);
|
||||
if (copy_from_user(&user_chunk, chunk_ptr,
|
||||
sizeof(struct drm_amdgpu_cs_chunk))) {
|
||||
ret = -EFAULT;
|
||||
|
@ -121,7 +121,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
|
|||
p->chunks[i].length_dw = user_chunk.length_dw;
|
||||
|
||||
size = p->chunks[i].length_dw;
|
||||
cdata = (void __user *)(uintptr_t)user_chunk.chunk_data;
|
||||
cdata = u64_to_user_ptr(user_chunk.chunk_data);
|
||||
|
||||
p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
|
||||
if (p->chunks[i].kdata == NULL) {
|
||||
|
@ -348,11 +348,11 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
|
|||
* that.
|
||||
*/
|
||||
if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
|
||||
domain = bo->prefered_domains;
|
||||
domain = bo->preferred_domains;
|
||||
else
|
||||
domain = bo->allowed_domains;
|
||||
} else {
|
||||
domain = bo->prefered_domains;
|
||||
domain = bo->preferred_domains;
|
||||
}
|
||||
} else {
|
||||
domain = bo->allowed_domains;
|
||||
|
@ -1437,7 +1437,7 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
|
|||
if (fences == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
fences_user = (void __user *)(uintptr_t)(wait->in.fences);
|
||||
fences_user = u64_to_user_ptr(wait->in.fences);
|
||||
if (copy_from_user(fences, fences_user,
|
||||
sizeof(struct drm_amdgpu_fence) * fence_count)) {
|
||||
r = -EFAULT;
|
||||
|
|
|
@ -336,51 +336,16 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
|
|||
|
||||
static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (adev->vram_scratch.robj == NULL) {
|
||||
r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
|
||||
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
NULL, NULL, &adev->vram_scratch.robj);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
r = amdgpu_bo_pin(adev->vram_scratch.robj,
|
||||
AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
|
||||
if (r) {
|
||||
amdgpu_bo_unreserve(adev->vram_scratch.robj);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_kmap(adev->vram_scratch.robj,
|
||||
(void **)&adev->vram_scratch.ptr);
|
||||
if (r)
|
||||
amdgpu_bo_unpin(adev->vram_scratch.robj);
|
||||
amdgpu_bo_unreserve(adev->vram_scratch.robj);
|
||||
|
||||
return r;
|
||||
return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->vram_scratch.robj,
|
||||
&adev->vram_scratch.gpu_addr,
|
||||
(void **)&adev->vram_scratch.ptr);
|
||||
}
|
||||
|
||||
static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (adev->vram_scratch.robj == NULL) {
|
||||
return;
|
||||
}
|
||||
r = amdgpu_bo_reserve(adev->vram_scratch.robj, true);
|
||||
if (likely(r == 0)) {
|
||||
amdgpu_bo_kunmap(adev->vram_scratch.robj);
|
||||
amdgpu_bo_unpin(adev->vram_scratch.robj);
|
||||
amdgpu_bo_unreserve(adev->vram_scratch.robj);
|
||||
}
|
||||
amdgpu_bo_unref(&adev->vram_scratch.robj);
|
||||
amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -539,7 +504,8 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
|
|||
int r;
|
||||
|
||||
if (adev->wb.wb_obj == NULL) {
|
||||
r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t),
|
||||
/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
|
||||
r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
|
||||
&adev->wb.wb_obj, &adev->wb.gpu_addr,
|
||||
(void **)&adev->wb.wb);
|
||||
|
@ -570,47 +536,10 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
|
|||
int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
|
||||
{
|
||||
unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
|
||||
|
||||
if (offset < adev->wb.num_wb) {
|
||||
__set_bit(offset, adev->wb.used);
|
||||
*wb = offset;
|
||||
return 0;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_wb_get_64bit - Allocate a wb entry
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @wb: wb index
|
||||
*
|
||||
* Allocate a wb slot for use by the driver (all asics).
|
||||
* Returns 0 on success or -EINVAL on failure.
|
||||
*/
|
||||
int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb)
|
||||
{
|
||||
unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used,
|
||||
adev->wb.num_wb, 0, 2, 7, 0);
|
||||
if ((offset + 1) < adev->wb.num_wb) {
|
||||
__set_bit(offset, adev->wb.used);
|
||||
__set_bit(offset + 1, adev->wb.used);
|
||||
*wb = offset;
|
||||
return 0;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
int amdgpu_wb_get_256Bit(struct amdgpu_device *adev, u32 *wb)
|
||||
{
|
||||
int i = 0;
|
||||
unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used,
|
||||
adev->wb.num_wb, 0, 8, 63, 0);
|
||||
if ((offset + 7) < adev->wb.num_wb) {
|
||||
for (i = 0; i < 8; i++)
|
||||
__set_bit(offset + i, adev->wb.used);
|
||||
*wb = offset;
|
||||
*wb = offset * 8; /* convert to dw offset */
|
||||
return 0;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
|
@ -631,39 +560,6 @@ void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
|
|||
__clear_bit(wb, adev->wb.used);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_wb_free_64bit - Free a wb entry
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @wb: wb index
|
||||
*
|
||||
* Free a wb slot allocated for use by the driver (all asics)
|
||||
*/
|
||||
void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb)
|
||||
{
|
||||
if ((wb + 1) < adev->wb.num_wb) {
|
||||
__clear_bit(wb, adev->wb.used);
|
||||
__clear_bit(wb + 1, adev->wb.used);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_wb_free_256bit - Free a wb entry
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @wb: wb index
|
||||
*
|
||||
* Free a wb slot allocated for use by the driver (all asics)
|
||||
*/
|
||||
void amdgpu_wb_free_256bit(struct amdgpu_device *adev, u32 wb)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
if ((wb + 7) < adev->wb.num_wb)
|
||||
for (i = 0; i < 8; i++)
|
||||
__clear_bit(wb + i, adev->wb.used);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vram_location - try to find VRAM location
|
||||
* @adev: amdgpu device structure holding all necessary informations
|
||||
|
@ -1948,7 +1844,8 @@ static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
|
|||
AMD_IP_BLOCK_TYPE_DCE,
|
||||
AMD_IP_BLOCK_TYPE_GFX,
|
||||
AMD_IP_BLOCK_TYPE_SDMA,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_IP_BLOCK_TYPE_VCE
|
||||
};
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
|
||||
|
|
|
@ -482,7 +482,7 @@ static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
|
|||
{
|
||||
struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
|
||||
|
||||
drm_gem_object_unreference_unlocked(amdgpu_fb->obj);
|
||||
drm_gem_object_put_unlocked(amdgpu_fb->obj);
|
||||
drm_framebuffer_cleanup(fb);
|
||||
kfree(amdgpu_fb);
|
||||
}
|
||||
|
@ -542,14 +542,14 @@ amdgpu_user_framebuffer_create(struct drm_device *dev,
|
|||
|
||||
amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
|
||||
if (amdgpu_fb == NULL) {
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
ret = amdgpu_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj);
|
||||
if (ret) {
|
||||
kfree(amdgpu_fb);
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
|
|
|
@ -118,7 +118,7 @@ static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj)
|
|||
amdgpu_bo_unpin(abo);
|
||||
amdgpu_bo_unreserve(abo);
|
||||
}
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
}
|
||||
|
||||
static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
|
||||
|
@ -250,7 +250,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
|
|||
tmp = amdgpu_bo_gpu_offset(abo) - adev->mc.vram_start;
|
||||
info->fix.smem_start = adev->mc.aper_base + tmp;
|
||||
info->fix.smem_len = amdgpu_bo_size(abo);
|
||||
info->screen_base = abo->kptr;
|
||||
info->screen_base = amdgpu_bo_kptr(abo);
|
||||
info->screen_size = amdgpu_bo_size(abo);
|
||||
|
||||
drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
|
||||
|
@ -280,7 +280,7 @@ out:
|
|||
|
||||
}
|
||||
if (fb && ret) {
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
drm_framebuffer_unregister_private(fb);
|
||||
drm_framebuffer_cleanup(fb);
|
||||
kfree(fb);
|
||||
|
|
|
@ -144,7 +144,7 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
|
|||
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
NULL, NULL, &adev->gart.robj);
|
||||
NULL, NULL, 0, &adev->gart.robj);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -59,7 +59,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
|
|||
|
||||
retry:
|
||||
r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
|
||||
flags, NULL, NULL, &robj);
|
||||
flags, NULL, NULL, 0, &robj);
|
||||
if (r) {
|
||||
if (r != -ERESTARTSYS) {
|
||||
if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
|
||||
|
@ -91,7 +91,7 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
|
|||
spin_lock(&file->table_lock);
|
||||
idr_for_each_entry(&file->object_idr, gobj, handle) {
|
||||
WARN_ONCE(1, "And also active allocations!\n");
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
}
|
||||
idr_destroy(&file->object_idr);
|
||||
spin_unlock(&file->table_lock);
|
||||
|
@ -263,7 +263,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
r = drm_gem_handle_create(filp, gobj, &handle);
|
||||
/* drop reference from allocate - handle holds it now */
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -306,7 +306,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
|||
return r;
|
||||
|
||||
bo = gem_to_amdgpu_bo(gobj);
|
||||
bo->prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
|
||||
bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
|
||||
bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
|
||||
r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
|
||||
if (r)
|
||||
|
@ -341,7 +341,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
r = drm_gem_handle_create(filp, gobj, &handle);
|
||||
/* drop reference from allocate - handle holds it now */
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -355,7 +355,7 @@ unlock_mmap_sem:
|
|||
up_read(¤t->mm->mmap_sem);
|
||||
|
||||
release_object:
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -374,11 +374,11 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp,
|
|||
robj = gem_to_amdgpu_bo(gobj);
|
||||
if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
|
||||
(robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
return -EPERM;
|
||||
}
|
||||
*offset_p = amdgpu_bo_mmap_offset(robj);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -448,7 +448,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
|
|||
} else
|
||||
r = ret;
|
||||
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -491,7 +491,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
|
|||
unreserve:
|
||||
amdgpu_bo_unreserve(robj);
|
||||
out:
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -664,7 +664,7 @@ error_backoff:
|
|||
ttm_eu_backoff_reservation(&ticket, &list);
|
||||
|
||||
error_unref:
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -689,11 +689,11 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
|
|||
switch (args->op) {
|
||||
case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
|
||||
struct drm_amdgpu_gem_create_in info;
|
||||
void __user *out = (void __user *)(uintptr_t)args->value;
|
||||
void __user *out = u64_to_user_ptr(args->value);
|
||||
|
||||
info.bo_size = robj->gem_base.size;
|
||||
info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
|
||||
info.domains = robj->prefered_domains;
|
||||
info.domains = robj->preferred_domains;
|
||||
info.domain_flags = robj->flags;
|
||||
amdgpu_bo_unreserve(robj);
|
||||
if (copy_to_user(out, &info, sizeof(info)))
|
||||
|
@ -711,10 +711,10 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
|
|||
amdgpu_bo_unreserve(robj);
|
||||
break;
|
||||
}
|
||||
robj->prefered_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
|
||||
robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
|
||||
AMDGPU_GEM_DOMAIN_GTT |
|
||||
AMDGPU_GEM_DOMAIN_CPU);
|
||||
robj->allowed_domains = robj->prefered_domains;
|
||||
robj->allowed_domains = robj->preferred_domains;
|
||||
if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
|
||||
robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
|
||||
|
||||
|
@ -726,7 +726,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
|
|||
}
|
||||
|
||||
out:
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -754,7 +754,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
|
|||
|
||||
r = drm_gem_handle_create(file_priv, gobj, &handle);
|
||||
/* drop reference from allocate - handle holds it now */
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -158,7 +158,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
|
|||
"Error during ACPI methods call\n");
|
||||
}
|
||||
|
||||
amdgpu_amdkfd_load_interface(adev);
|
||||
amdgpu_amdkfd_device_probe(adev);
|
||||
amdgpu_amdkfd_device_init(adev);
|
||||
|
||||
|
|
|
@ -220,7 +220,7 @@ static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
|
|||
}
|
||||
|
||||
/**
|
||||
* amdgpu_bo_create_kernel - create BO for kernel use
|
||||
* amdgpu_bo_create_reserved - create reserved BO for kernel use
|
||||
*
|
||||
* @adev: amdgpu device object
|
||||
* @size: size for the new BO
|
||||
|
@ -230,24 +230,30 @@ static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
|
|||
* @gpu_addr: GPU addr of the pinned BO
|
||||
* @cpu_addr: optional CPU address mapping
|
||||
*
|
||||
* Allocates and pins a BO for kernel internal use.
|
||||
* Allocates and pins a BO for kernel internal use, and returns it still
|
||||
* reserved.
|
||||
*
|
||||
* Returns 0 on success, negative error code otherwise.
|
||||
*/
|
||||
int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
|
||||
unsigned long size, int align,
|
||||
u32 domain, struct amdgpu_bo **bo_ptr,
|
||||
u64 *gpu_addr, void **cpu_addr)
|
||||
int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
|
||||
unsigned long size, int align,
|
||||
u32 domain, struct amdgpu_bo **bo_ptr,
|
||||
u64 *gpu_addr, void **cpu_addr)
|
||||
{
|
||||
bool free = false;
|
||||
int r;
|
||||
|
||||
r = amdgpu_bo_create(adev, size, align, true, domain,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
NULL, NULL, bo_ptr);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r);
|
||||
return r;
|
||||
if (!*bo_ptr) {
|
||||
r = amdgpu_bo_create(adev, size, align, true, domain,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
NULL, NULL, 0, bo_ptr);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
|
||||
r);
|
||||
return r;
|
||||
}
|
||||
free = true;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_reserve(*bo_ptr, false);
|
||||
|
@ -270,19 +276,51 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
|
|||
}
|
||||
}
|
||||
|
||||
amdgpu_bo_unreserve(*bo_ptr);
|
||||
|
||||
return 0;
|
||||
|
||||
error_unreserve:
|
||||
amdgpu_bo_unreserve(*bo_ptr);
|
||||
|
||||
error_free:
|
||||
amdgpu_bo_unref(bo_ptr);
|
||||
if (free)
|
||||
amdgpu_bo_unref(bo_ptr);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_bo_create_kernel - create BO for kernel use
|
||||
*
|
||||
* @adev: amdgpu device object
|
||||
* @size: size for the new BO
|
||||
* @align: alignment for the new BO
|
||||
* @domain: where to place it
|
||||
* @bo_ptr: resulting BO
|
||||
* @gpu_addr: GPU addr of the pinned BO
|
||||
* @cpu_addr: optional CPU address mapping
|
||||
*
|
||||
* Allocates and pins a BO for kernel internal use.
|
||||
*
|
||||
* Returns 0 on success, negative error code otherwise.
|
||||
*/
|
||||
int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
|
||||
unsigned long size, int align,
|
||||
u32 domain, struct amdgpu_bo **bo_ptr,
|
||||
u64 *gpu_addr, void **cpu_addr)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
|
||||
gpu_addr, cpu_addr);
|
||||
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
amdgpu_bo_unreserve(*bo_ptr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_bo_free_kernel - free BO for kernel use
|
||||
*
|
||||
|
@ -318,6 +356,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
|
|||
struct sg_table *sg,
|
||||
struct ttm_placement *placement,
|
||||
struct reservation_object *resv,
|
||||
uint64_t init_value,
|
||||
struct amdgpu_bo **bo_ptr)
|
||||
{
|
||||
struct amdgpu_bo *bo;
|
||||
|
@ -352,13 +391,13 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
|
|||
}
|
||||
INIT_LIST_HEAD(&bo->shadow_list);
|
||||
INIT_LIST_HEAD(&bo->va);
|
||||
bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
|
||||
bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
|
||||
AMDGPU_GEM_DOMAIN_GTT |
|
||||
AMDGPU_GEM_DOMAIN_CPU |
|
||||
AMDGPU_GEM_DOMAIN_GDS |
|
||||
AMDGPU_GEM_DOMAIN_GWS |
|
||||
AMDGPU_GEM_DOMAIN_OA);
|
||||
bo->allowed_domains = bo->prefered_domains;
|
||||
bo->allowed_domains = bo->preferred_domains;
|
||||
if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
|
||||
bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
|
||||
|
||||
|
@ -418,7 +457,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
|
|||
bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
|
||||
struct dma_fence *fence;
|
||||
|
||||
r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
|
||||
r = amdgpu_fill_buffer(bo, init_value, bo->tbo.resv, &fence);
|
||||
if (unlikely(r))
|
||||
goto fail_unreserve;
|
||||
|
||||
|
@ -470,6 +509,7 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
|
|||
AMDGPU_GEM_CREATE_CPU_GTT_USWC,
|
||||
NULL, &placement,
|
||||
bo->tbo.resv,
|
||||
0,
|
||||
&bo->shadow);
|
||||
if (!r) {
|
||||
bo->shadow->parent = amdgpu_bo_ref(bo);
|
||||
|
@ -481,11 +521,15 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
|
|||
return r;
|
||||
}
|
||||
|
||||
/* init_value will only take effect when flags contains
|
||||
* AMDGPU_GEM_CREATE_VRAM_CLEARED.
|
||||
*/
|
||||
int amdgpu_bo_create(struct amdgpu_device *adev,
|
||||
unsigned long size, int byte_align,
|
||||
bool kernel, u32 domain, u64 flags,
|
||||
struct sg_table *sg,
|
||||
struct reservation_object *resv,
|
||||
uint64_t init_value,
|
||||
struct amdgpu_bo **bo_ptr)
|
||||
{
|
||||
struct ttm_placement placement = {0};
|
||||
|
@ -500,7 +544,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
|
|||
|
||||
r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
|
||||
domain, flags, sg, &placement,
|
||||
resv, bo_ptr);
|
||||
resv, init_value, bo_ptr);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -562,7 +606,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo)
|
|||
if (bo->pin_count)
|
||||
return 0;
|
||||
|
||||
domain = bo->prefered_domains;
|
||||
domain = bo->preferred_domains;
|
||||
|
||||
retry:
|
||||
amdgpu_ttm_placement_from_domain(bo, domain);
|
||||
|
@ -609,16 +653,16 @@ err:
|
|||
|
||||
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
|
||||
{
|
||||
bool is_iomem;
|
||||
void *kptr;
|
||||
long r;
|
||||
|
||||
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
|
||||
return -EPERM;
|
||||
|
||||
if (bo->kptr) {
|
||||
if (ptr) {
|
||||
*ptr = bo->kptr;
|
||||
}
|
||||
kptr = amdgpu_bo_kptr(bo);
|
||||
if (kptr) {
|
||||
if (ptr)
|
||||
*ptr = kptr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -631,19 +675,23 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
|
||||
if (ptr)
|
||||
*ptr = bo->kptr;
|
||||
*ptr = amdgpu_bo_kptr(bo);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
|
||||
{
|
||||
bool is_iomem;
|
||||
|
||||
return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
|
||||
}
|
||||
|
||||
void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
|
||||
{
|
||||
if (bo->kptr == NULL)
|
||||
return;
|
||||
bo->kptr = NULL;
|
||||
ttm_bo_kunmap(&bo->kmap);
|
||||
if (bo->kmap.bo)
|
||||
ttm_bo_kunmap(&bo->kmap);
|
||||
}
|
||||
|
||||
struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
|
||||
|
|
|
@ -33,6 +33,67 @@
|
|||
|
||||
#define AMDGPU_BO_INVALID_OFFSET LONG_MAX
|
||||
|
||||
struct amdgpu_bo_va_mapping {
|
||||
struct list_head list;
|
||||
struct rb_node rb;
|
||||
uint64_t start;
|
||||
uint64_t last;
|
||||
uint64_t __subtree_last;
|
||||
uint64_t offset;
|
||||
uint64_t flags;
|
||||
};
|
||||
|
||||
/* bo virtual addresses in a specific vm */
|
||||
struct amdgpu_bo_va {
|
||||
/* protected by bo being reserved */
|
||||
struct list_head bo_list;
|
||||
struct dma_fence *last_pt_update;
|
||||
unsigned ref_count;
|
||||
|
||||
/* protected by vm mutex and spinlock */
|
||||
struct list_head vm_status;
|
||||
|
||||
/* mappings for this bo_va */
|
||||
struct list_head invalids;
|
||||
struct list_head valids;
|
||||
|
||||
/* constant after initialization */
|
||||
struct amdgpu_vm *vm;
|
||||
struct amdgpu_bo *bo;
|
||||
};
|
||||
|
||||
|
||||
struct amdgpu_bo {
|
||||
/* Protected by tbo.reserved */
|
||||
u32 preferred_domains;
|
||||
u32 allowed_domains;
|
||||
struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
|
||||
struct ttm_placement placement;
|
||||
struct ttm_buffer_object tbo;
|
||||
struct ttm_bo_kmap_obj kmap;
|
||||
u64 flags;
|
||||
unsigned pin_count;
|
||||
u64 tiling_flags;
|
||||
u64 metadata_flags;
|
||||
void *metadata;
|
||||
u32 metadata_size;
|
||||
unsigned prime_shared_count;
|
||||
/* list of all virtual address to which this bo is associated to */
|
||||
struct list_head va;
|
||||
/* Constant after initialization */
|
||||
struct drm_gem_object gem_base;
|
||||
struct amdgpu_bo *parent;
|
||||
struct amdgpu_bo *shadow;
|
||||
|
||||
struct ttm_bo_kmap_obj dma_buf_vmap;
|
||||
struct amdgpu_mn *mn;
|
||||
|
||||
union {
|
||||
struct list_head mn_list;
|
||||
struct list_head shadow_list;
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* amdgpu_mem_type_to_domain - return domain corresponding to mem_type
|
||||
* @mem_type: ttm memory type
|
||||
|
@ -132,6 +193,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
|
|||
bool kernel, u32 domain, u64 flags,
|
||||
struct sg_table *sg,
|
||||
struct reservation_object *resv,
|
||||
uint64_t init_value,
|
||||
struct amdgpu_bo **bo_ptr);
|
||||
int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
|
||||
unsigned long size, int byte_align,
|
||||
|
@ -139,7 +201,12 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
|
|||
struct sg_table *sg,
|
||||
struct ttm_placement *placement,
|
||||
struct reservation_object *resv,
|
||||
uint64_t init_value,
|
||||
struct amdgpu_bo **bo_ptr);
|
||||
int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
|
||||
unsigned long size, int align,
|
||||
u32 domain, struct amdgpu_bo **bo_ptr,
|
||||
u64 *gpu_addr, void **cpu_addr);
|
||||
int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
|
||||
unsigned long size, int align,
|
||||
u32 domain, struct amdgpu_bo **bo_ptr,
|
||||
|
@ -147,6 +214,7 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
|
|||
void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
|
||||
void **cpu_addr);
|
||||
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
|
||||
void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
|
||||
void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
|
||||
struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
|
||||
void amdgpu_bo_unref(struct amdgpu_bo **bo);
|
||||
|
|
|
@ -30,6 +30,7 @@ struct cg_flag_name
|
|||
const char *name;
|
||||
};
|
||||
|
||||
void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
|
||||
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev);
|
||||
void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev);
|
||||
void amdgpu_pm_print_power_states(struct amdgpu_device *adev);
|
||||
|
|
|
@ -69,7 +69,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
|
|||
|
||||
ww_mutex_lock(&resv->lock, NULL);
|
||||
ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, 0, &bo);
|
||||
ww_mutex_unlock(&resv->lock);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
|
|
@ -184,47 +184,22 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
|||
return r;
|
||||
}
|
||||
|
||||
if (ring->funcs->support_64bit_ptrs) {
|
||||
r = amdgpu_wb_get_64bit(adev, &ring->rptr_offs);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_wb_get_64bit(adev, &ring->wptr_offs);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
} else {
|
||||
r = amdgpu_wb_get(adev, &ring->rptr_offs);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_wb_get(adev, &ring->wptr_offs);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_wb_get(adev, &ring->rptr_offs);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
|
||||
r = amdgpu_wb_get_256Bit(adev, &ring->fence_offs);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_wb_get(adev, &ring->wptr_offs);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
} else {
|
||||
r = amdgpu_wb_get(adev, &ring->fence_offs);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_wb_get(adev, &ring->fence_offs);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_wb_get(adev, &ring->cond_exe_offs);
|
||||
|
@ -286,19 +261,15 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
|
|||
{
|
||||
ring->ready = false;
|
||||
|
||||
if (ring->funcs->support_64bit_ptrs) {
|
||||
amdgpu_wb_free_64bit(ring->adev, ring->rptr_offs);
|
||||
amdgpu_wb_free_64bit(ring->adev, ring->wptr_offs);
|
||||
} else {
|
||||
amdgpu_wb_free(ring->adev, ring->rptr_offs);
|
||||
amdgpu_wb_free(ring->adev, ring->wptr_offs);
|
||||
}
|
||||
/* Not to finish a ring which is not initialized */
|
||||
if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
|
||||
return;
|
||||
|
||||
amdgpu_wb_free(ring->adev, ring->rptr_offs);
|
||||
amdgpu_wb_free(ring->adev, ring->wptr_offs);
|
||||
|
||||
amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
|
||||
if (amdgpu_sriov_vf(ring->adev) && ring->funcs->type == AMDGPU_RING_TYPE_GFX)
|
||||
amdgpu_wb_free_256bit(ring->adev, ring->fence_offs);
|
||||
else
|
||||
amdgpu_wb_free(ring->adev, ring->fence_offs);
|
||||
amdgpu_wb_free(ring->adev, ring->fence_offs);
|
||||
|
||||
amdgpu_bo_free_kernel(&ring->ring_obj,
|
||||
&ring->gpu_addr,
|
||||
|
|
|
@ -64,7 +64,7 @@ int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
|
|||
INIT_LIST_HEAD(&sa_manager->flist[i]);
|
||||
|
||||
r = amdgpu_bo_create(adev, size, align, true, domain,
|
||||
0, NULL, NULL, &sa_manager->bo);
|
||||
0, NULL, NULL, 0, &sa_manager->bo);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
|
||||
return r;
|
||||
|
|
|
@ -61,7 +61,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
|
|||
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM, 0,
|
||||
NULL, NULL, &vram_obj);
|
||||
NULL, NULL, 0, &vram_obj);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to create VRAM object\n");
|
||||
goto out_cleanup;
|
||||
|
@ -82,7 +82,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
|
|||
|
||||
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
|
||||
NULL, gtt_obj + i);
|
||||
NULL, 0, gtt_obj + i);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to create GTT object %d\n", i);
|
||||
goto out_lclean;
|
||||
|
|
|
@ -105,12 +105,12 @@ TRACE_EVENT(amdgpu_bo_create,
|
|||
__entry->bo = bo;
|
||||
__entry->pages = bo->tbo.num_pages;
|
||||
__entry->type = bo->tbo.mem.mem_type;
|
||||
__entry->prefer = bo->prefered_domains;
|
||||
__entry->prefer = bo->preferred_domains;
|
||||
__entry->allow = bo->allowed_domains;
|
||||
__entry->visible = bo->flags;
|
||||
),
|
||||
|
||||
TP_printk("bo=%p, pages=%u, type=%d, prefered=%d, allowed=%d, visible=%d",
|
||||
TP_printk("bo=%p, pages=%u, type=%d, preferred=%d, allowed=%d, visible=%d",
|
||||
__entry->bo, __entry->pages, __entry->type,
|
||||
__entry->prefer, __entry->allow, __entry->visible)
|
||||
);
|
||||
|
|
|
@ -753,7 +753,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
|
|||
struct ttm_mem_reg *bo_mem)
|
||||
{
|
||||
struct amdgpu_ttm_tt *gtt = (void*)ttm;
|
||||
int r;
|
||||
int r = 0;
|
||||
|
||||
if (gtt->userptr) {
|
||||
r = amdgpu_ttm_tt_pin_userptr(ttm);
|
||||
|
@ -1232,23 +1232,12 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
|||
/* Change the size here instead of the init above so only lpfn is affected */
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
|
||||
|
||||
r = amdgpu_bo_create(adev, adev->mc.stolen_size, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
NULL, NULL, &adev->stollen_vga_memory);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_reserve(adev->stollen_vga_memory, false);
|
||||
r = amdgpu_bo_create_kernel(adev, adev->mc.stolen_size, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->stolen_vga_memory,
|
||||
NULL, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
r = amdgpu_bo_pin(adev->stollen_vga_memory, AMDGPU_GEM_DOMAIN_VRAM, NULL);
|
||||
amdgpu_bo_unreserve(adev->stollen_vga_memory);
|
||||
if (r) {
|
||||
amdgpu_bo_unref(&adev->stollen_vga_memory);
|
||||
return r;
|
||||
}
|
||||
DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
|
||||
(unsigned) (adev->mc.real_vram_size / (1024 * 1024)));
|
||||
|
||||
|
@ -1319,13 +1308,13 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
|
|||
if (!adev->mman.initialized)
|
||||
return;
|
||||
amdgpu_ttm_debugfs_fini(adev);
|
||||
if (adev->stollen_vga_memory) {
|
||||
r = amdgpu_bo_reserve(adev->stollen_vga_memory, true);
|
||||
if (adev->stolen_vga_memory) {
|
||||
r = amdgpu_bo_reserve(adev->stolen_vga_memory, true);
|
||||
if (r == 0) {
|
||||
amdgpu_bo_unpin(adev->stollen_vga_memory);
|
||||
amdgpu_bo_unreserve(adev->stollen_vga_memory);
|
||||
amdgpu_bo_unpin(adev->stolen_vga_memory);
|
||||
amdgpu_bo_unreserve(adev->stolen_vga_memory);
|
||||
}
|
||||
amdgpu_bo_unref(&adev->stollen_vga_memory);
|
||||
amdgpu_bo_unref(&adev->stolen_vga_memory);
|
||||
}
|
||||
ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
|
||||
ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
|
||||
|
@ -1509,11 +1498,12 @@ error_free:
|
|||
}
|
||||
|
||||
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
||||
uint32_t src_data,
|
||||
uint64_t src_data,
|
||||
struct reservation_object *resv,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
/* max_bytes applies to SDMA_OP_PTEPDE as well as SDMA_OP_CONST_FILL*/
|
||||
uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
|
||||
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
|
||||
|
||||
|
@ -1545,7 +1535,9 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
|||
num_pages -= mm_node->size;
|
||||
++mm_node;
|
||||
}
|
||||
num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
|
||||
|
||||
/* 10 double words for each SDMA_OP_PTEPDE cmd */
|
||||
num_dw = num_loops * 10;
|
||||
|
||||
/* for IB padding */
|
||||
num_dw += 64;
|
||||
|
@ -1570,12 +1562,16 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
|||
uint32_t byte_count = mm_node->size << PAGE_SHIFT;
|
||||
uint64_t dst_addr;
|
||||
|
||||
WARN_ONCE(byte_count & 0x7, "size should be a multiple of 8");
|
||||
|
||||
dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
|
||||
while (byte_count) {
|
||||
uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
|
||||
|
||||
amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
|
||||
dst_addr, cur_size_in_bytes);
|
||||
amdgpu_vm_set_pte_pde(adev, &job->ibs[0],
|
||||
dst_addr, 0,
|
||||
cur_size_in_bytes >> 3, 0,
|
||||
src_data);
|
||||
|
||||
dst_addr += cur_size_in_bytes;
|
||||
byte_count -= cur_size_in_bytes;
|
||||
|
|
|
@ -73,7 +73,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
|
|||
struct dma_fence **fence, bool direct_submit,
|
||||
bool vm_needs_flush);
|
||||
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
||||
uint32_t src_data,
|
||||
uint64_t src_data,
|
||||
struct reservation_object *resv,
|
||||
struct dma_fence **fence);
|
||||
|
||||
|
|
|
@ -358,8 +358,6 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
|
|||
(le32_to_cpu(header->jt_offset) * 4);
|
||||
memcpy(dst_addr, src_addr, le32_to_cpu(header->jt_size) * 4);
|
||||
|
||||
ucode->ucode_size += le32_to_cpu(header->jt_size) * 4;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -381,7 +379,7 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
|
|||
err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
|
||||
amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
NULL, NULL, bo);
|
||||
NULL, NULL, 0, bo);
|
||||
if (err) {
|
||||
dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
|
||||
goto failed;
|
||||
|
|
|
@ -1051,7 +1051,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
NULL, NULL, &bo);
|
||||
NULL, NULL, 0, &bo);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -1101,7 +1101,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
NULL, NULL, &bo);
|
||||
NULL, NULL, 0, &bo);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
@ -937,9 +937,9 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
|
|||
unsigned i;
|
||||
int r, timeout = adev->usec_timeout;
|
||||
|
||||
/* workaround VCE ring test slow issue for sriov*/
|
||||
/* skip ring test for sriov*/
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
timeout *= 10;
|
||||
return 0;
|
||||
|
||||
r = amdgpu_ring_alloc(ring, 16);
|
||||
if (r) {
|
||||
|
|
|
@ -209,9 +209,9 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
|
|||
|
||||
if (fences == 0) {
|
||||
if (adev->pm.dpm_enabled) {
|
||||
/* might be used when with pg/cg
|
||||
amdgpu_dpm_enable_uvd(adev, false);
|
||||
} else {
|
||||
amdgpu_asic_set_uvd_clocks(adev, 0, 0);
|
||||
*/
|
||||
}
|
||||
} else {
|
||||
schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
|
||||
|
@ -223,12 +223,10 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
|
|||
struct amdgpu_device *adev = ring->adev;
|
||||
bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
|
||||
|
||||
if (set_clocks) {
|
||||
if (adev->pm.dpm_enabled) {
|
||||
amdgpu_dpm_enable_uvd(adev, true);
|
||||
} else {
|
||||
amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
|
||||
}
|
||||
if (set_clocks && adev->pm.dpm_enabled) {
|
||||
/* might be used when with pg/cg
|
||||
amdgpu_dpm_enable_uvd(adev, true);
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -361,7 +359,7 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
|
|||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
NULL, NULL, &bo);
|
||||
NULL, NULL, 0, &bo);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -413,7 +411,7 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
|
|||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
NULL, NULL, &bo);
|
||||
NULL, NULL, 0, &bo);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
@ -288,6 +288,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
|
|||
unsigned pt_idx, from, to;
|
||||
int r;
|
||||
u64 flags;
|
||||
uint64_t init_value = 0;
|
||||
|
||||
if (!parent->entries) {
|
||||
unsigned num_entries = amdgpu_vm_num_entries(adev, level);
|
||||
|
@ -321,6 +322,12 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
|
|||
flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
|
||||
AMDGPU_GEM_CREATE_SHADOW);
|
||||
|
||||
if (vm->pte_support_ats) {
|
||||
init_value = AMDGPU_PTE_SYSTEM;
|
||||
if (level != adev->vm_manager.num_level - 1)
|
||||
init_value |= AMDGPU_PDE_PTE;
|
||||
}
|
||||
|
||||
/* walk over the address space and allocate the page tables */
|
||||
for (pt_idx = from; pt_idx <= to; ++pt_idx) {
|
||||
struct reservation_object *resv = vm->root.bo->tbo.resv;
|
||||
|
@ -333,7 +340,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
|
|||
AMDGPU_GPU_PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
flags,
|
||||
NULL, resv, &pt);
|
||||
NULL, resv, init_value, &pt);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -1060,7 +1067,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
|
|||
shadow = parent->bo->shadow;
|
||||
|
||||
if (vm->use_cpu_for_update) {
|
||||
pd_addr = (unsigned long)parent->bo->kptr;
|
||||
pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo);
|
||||
r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
|
||||
if (unlikely(r))
|
||||
return r;
|
||||
|
@ -1401,7 +1408,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
|
|||
|
||||
pt = entry->bo;
|
||||
if (use_cpu_update) {
|
||||
pe_start = (unsigned long)pt->kptr;
|
||||
pe_start = (unsigned long)amdgpu_bo_kptr(pt);
|
||||
} else {
|
||||
if (pt->shadow) {
|
||||
pe_start = amdgpu_bo_gpu_offset(pt->shadow);
|
||||
|
@ -1995,15 +2002,19 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
|||
struct amdgpu_bo_va_mapping *mapping;
|
||||
struct dma_fence *f = NULL;
|
||||
int r;
|
||||
uint64_t init_pte_value = 0;
|
||||
|
||||
while (!list_empty(&vm->freed)) {
|
||||
mapping = list_first_entry(&vm->freed,
|
||||
struct amdgpu_bo_va_mapping, list);
|
||||
list_del(&mapping->list);
|
||||
|
||||
if (vm->pte_support_ats)
|
||||
init_pte_value = AMDGPU_PTE_SYSTEM;
|
||||
|
||||
r = amdgpu_vm_bo_update_mapping(adev, NULL, 0, NULL, vm,
|
||||
mapping->start, mapping->last,
|
||||
0, 0, &f);
|
||||
init_pte_value, 0, &f);
|
||||
amdgpu_vm_free_mapping(adev, vm, mapping, f);
|
||||
if (r) {
|
||||
dma_fence_put(f);
|
||||
|
@ -2494,6 +2505,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
struct amd_sched_rq *rq;
|
||||
int r, i;
|
||||
u64 flags;
|
||||
uint64_t init_pde_value = 0;
|
||||
|
||||
vm->va = RB_ROOT;
|
||||
vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
|
||||
|
@ -2515,10 +2527,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
|
||||
vm->pte_support_ats = false;
|
||||
|
||||
if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
|
||||
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
|
||||
AMDGPU_VM_USE_CPU_FOR_COMPUTE);
|
||||
else
|
||||
|
||||
if (adev->asic_type == CHIP_RAVEN) {
|
||||
vm->pte_support_ats = true;
|
||||
init_pde_value = AMDGPU_PTE_SYSTEM | AMDGPU_PDE_PTE;
|
||||
}
|
||||
} else
|
||||
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
|
||||
AMDGPU_VM_USE_CPU_FOR_GFX);
|
||||
DRM_DEBUG_DRIVER("VM update mode is %s\n",
|
||||
|
@ -2538,7 +2557,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
flags,
|
||||
NULL, NULL, &vm->root.bo);
|
||||
NULL, NULL, init_pde_value, &vm->root.bo);
|
||||
if (r)
|
||||
goto error_free_sched_entity;
|
||||
|
||||
|
|
|
@ -146,6 +146,9 @@ struct amdgpu_vm {
|
|||
|
||||
/* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
|
||||
bool use_cpu_for_update;
|
||||
|
||||
/* Flag to indicate ATS support from PTE for GFX9 */
|
||||
bool pte_support_ats;
|
||||
};
|
||||
|
||||
struct amdgpu_vm_id {
|
||||
|
|
|
@ -2431,7 +2431,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
aobj = gem_to_amdgpu_bo(obj);
|
||||
ret = amdgpu_bo_reserve(aobj, false);
|
||||
if (ret != 0) {
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2439,7 +2439,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
amdgpu_bo_unreserve(aobj);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2473,7 +2473,7 @@ unpin:
|
|||
amdgpu_bo_unpin(aobj);
|
||||
amdgpu_bo_unreserve(aobj);
|
||||
}
|
||||
drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
|
||||
drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo);
|
||||
}
|
||||
|
||||
amdgpu_crtc->cursor_bo = obj;
|
||||
|
|
|
@ -2506,7 +2506,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
aobj = gem_to_amdgpu_bo(obj);
|
||||
ret = amdgpu_bo_reserve(aobj, false);
|
||||
if (ret != 0) {
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2514,7 +2514,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
amdgpu_bo_unreserve(aobj);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2548,7 +2548,7 @@ unpin:
|
|||
amdgpu_bo_unpin(aobj);
|
||||
amdgpu_bo_unreserve(aobj);
|
||||
}
|
||||
drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
|
||||
drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo);
|
||||
}
|
||||
|
||||
amdgpu_crtc->cursor_bo = obj;
|
||||
|
|
|
@ -42,6 +42,7 @@
|
|||
#include "dce/dce_6_0_d.h"
|
||||
#include "dce/dce_6_0_sh_mask.h"
|
||||
#include "gca/gfx_7_2_enum.h"
|
||||
#include "dce_v6_0.h"
|
||||
#include "si_enums.h"
|
||||
|
||||
static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
|
||||
|
@ -2321,7 +2322,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
aobj = gem_to_amdgpu_bo(obj);
|
||||
ret = amdgpu_bo_reserve(aobj, false);
|
||||
if (ret != 0) {
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2329,7 +2330,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
amdgpu_bo_unreserve(aobj);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2363,7 +2364,7 @@ unpin:
|
|||
amdgpu_bo_unpin(aobj);
|
||||
amdgpu_bo_unreserve(aobj);
|
||||
}
|
||||
drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
|
||||
drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo);
|
||||
}
|
||||
|
||||
amdgpu_crtc->cursor_bo = obj;
|
||||
|
|
|
@ -2335,7 +2335,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
aobj = gem_to_amdgpu_bo(obj);
|
||||
ret = amdgpu_bo_reserve(aobj, false);
|
||||
if (ret != 0) {
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2343,7 +2343,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
amdgpu_bo_unreserve(aobj);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2377,7 +2377,7 @@ unpin:
|
|||
amdgpu_bo_unpin(aobj);
|
||||
amdgpu_bo_unreserve(aobj);
|
||||
}
|
||||
drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
|
||||
drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo);
|
||||
}
|
||||
|
||||
amdgpu_crtc->cursor_bo = obj;
|
||||
|
|
|
@ -479,6 +479,8 @@ static int dce_virtual_hw_init(void *handle)
|
|||
#endif
|
||||
/* no DCE */
|
||||
break;
|
||||
case CHIP_VEGA10:
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type);
|
||||
}
|
||||
|
|
|
@ -2217,40 +2217,9 @@ static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
|||
|
||||
static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (adev->gfx.rlc.save_restore_obj) {
|
||||
r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, true);
|
||||
if (unlikely(r != 0))
|
||||
dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r);
|
||||
amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj);
|
||||
amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
|
||||
|
||||
amdgpu_bo_unref(&adev->gfx.rlc.save_restore_obj);
|
||||
adev->gfx.rlc.save_restore_obj = NULL;
|
||||
}
|
||||
|
||||
if (adev->gfx.rlc.clear_state_obj) {
|
||||
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
|
||||
if (unlikely(r != 0))
|
||||
dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
|
||||
amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
|
||||
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
|
||||
|
||||
amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
|
||||
adev->gfx.rlc.clear_state_obj = NULL;
|
||||
}
|
||||
|
||||
if (adev->gfx.rlc.cp_table_obj) {
|
||||
r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true);
|
||||
if (unlikely(r != 0))
|
||||
dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
|
||||
amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
|
||||
amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
|
||||
|
||||
amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
|
||||
adev->gfx.rlc.cp_table_obj = NULL;
|
||||
}
|
||||
amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL);
|
||||
amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
|
||||
amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
|
||||
}
|
||||
|
||||
static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
|
||||
|
@ -2273,43 +2242,23 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
|
|||
|
||||
if (src_ptr) {
|
||||
/* save restore block */
|
||||
if (adev->gfx.rlc.save_restore_obj == NULL) {
|
||||
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
NULL, NULL,
|
||||
&adev->gfx.rlc.save_restore_obj);
|
||||
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
|
||||
if (unlikely(r != 0)) {
|
||||
gfx_v6_0_rlc_fini(adev);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->gfx.rlc.save_restore_gpu_addr);
|
||||
r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->gfx.rlc.save_restore_obj,
|
||||
&adev->gfx.rlc.save_restore_gpu_addr,
|
||||
(void **)&adev->gfx.rlc.sr_ptr);
|
||||
if (r) {
|
||||
amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
|
||||
dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r);
|
||||
dev_warn(adev->dev, "(%d) create RLC sr bo failed\n",
|
||||
r);
|
||||
gfx_v6_0_rlc_fini(adev);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r);
|
||||
gfx_v6_0_rlc_fini(adev);
|
||||
return r;
|
||||
}
|
||||
/* write the sr buffer */
|
||||
dst_ptr = adev->gfx.rlc.sr_ptr;
|
||||
for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
|
||||
dst_ptr[i] = cpu_to_le32(src_ptr[i]);
|
||||
|
||||
amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
|
||||
amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
|
||||
}
|
||||
|
@ -2319,39 +2268,17 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
|
|||
adev->gfx.rlc.clear_state_size = gfx_v6_0_get_csb_size(adev);
|
||||
dws = adev->gfx.rlc.clear_state_size + (256 / 4);
|
||||
|
||||
if (adev->gfx.rlc.clear_state_obj == NULL) {
|
||||
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
NULL, NULL,
|
||||
&adev->gfx.rlc.clear_state_obj);
|
||||
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
|
||||
gfx_v6_0_rlc_fini(adev);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
|
||||
if (unlikely(r != 0)) {
|
||||
gfx_v6_0_rlc_fini(adev);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->gfx.rlc.clear_state_gpu_addr);
|
||||
r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->gfx.rlc.clear_state_obj,
|
||||
&adev->gfx.rlc.clear_state_gpu_addr,
|
||||
(void **)&adev->gfx.rlc.cs_ptr);
|
||||
if (r) {
|
||||
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
|
||||
dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r);
|
||||
dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
|
||||
gfx_v6_0_rlc_fini(adev);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r);
|
||||
gfx_v6_0_rlc_fini(adev);
|
||||
return r;
|
||||
}
|
||||
/* set up the cs buffer */
|
||||
dst_ptr = adev->gfx.rlc.cs_ptr;
|
||||
reg_list_mc_addr = adev->gfx.rlc.clear_state_gpu_addr + 256;
|
||||
|
|
|
@ -1823,7 +1823,7 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
|
|||
}
|
||||
|
||||
/**
|
||||
* gmc_v7_0_init_compute_vmid - gart enable
|
||||
* gfx_v7_0_init_compute_vmid - gart enable
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
|
@ -1833,7 +1833,7 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
|
|||
#define DEFAULT_SH_MEM_BASES (0x6000)
|
||||
#define FIRST_COMPUTE_VMID (8)
|
||||
#define LAST_COMPUTE_VMID (16)
|
||||
static void gmc_v7_0_init_compute_vmid(struct amdgpu_device *adev)
|
||||
static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
uint32_t sh_mem_config;
|
||||
|
@ -1939,7 +1939,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
|
|||
cik_srbm_select(adev, 0, 0, 0, 0);
|
||||
mutex_unlock(&adev->srbm_mutex);
|
||||
|
||||
gmc_v7_0_init_compute_vmid(adev);
|
||||
gfx_v7_0_init_compute_vmid(adev);
|
||||
|
||||
WREG32(mmSX_DEBUG_1, 0x20);
|
||||
|
||||
|
@ -2774,39 +2774,18 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
|
|||
*/
|
||||
static void gfx_v7_0_cp_compute_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, r;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
||||
struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
|
||||
|
||||
if (ring->mqd_obj) {
|
||||
r = amdgpu_bo_reserve(ring->mqd_obj, true);
|
||||
if (unlikely(r != 0))
|
||||
dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r);
|
||||
|
||||
amdgpu_bo_unpin(ring->mqd_obj);
|
||||
amdgpu_bo_unreserve(ring->mqd_obj);
|
||||
|
||||
amdgpu_bo_unref(&ring->mqd_obj);
|
||||
ring->mqd_obj = NULL;
|
||||
}
|
||||
amdgpu_bo_free_kernel(&ring->mqd_obj, NULL, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static void gfx_v7_0_mec_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (adev->gfx.mec.hpd_eop_obj) {
|
||||
r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true);
|
||||
if (unlikely(r != 0))
|
||||
dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
|
||||
amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
|
||||
amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
|
||||
|
||||
amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj);
|
||||
adev->gfx.mec.hpd_eop_obj = NULL;
|
||||
}
|
||||
amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
|
||||
}
|
||||
|
||||
static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
|
||||
|
@ -2823,33 +2802,14 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
|
|||
/* allocate space for ALL pipes (even the ones we don't own) */
|
||||
mec_hpd_size = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec
|
||||
* GFX7_MEC_HPD_SIZE * 2;
|
||||
if (adev->gfx.mec.hpd_eop_obj == NULL) {
|
||||
r = amdgpu_bo_create(adev,
|
||||
mec_hpd_size,
|
||||
PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
|
||||
&adev->gfx.mec.hpd_eop_obj);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
|
||||
if (unlikely(r != 0)) {
|
||||
gfx_v7_0_mec_fini(adev);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT,
|
||||
&adev->gfx.mec.hpd_eop_gpu_addr);
|
||||
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_GTT,
|
||||
&adev->gfx.mec.hpd_eop_obj,
|
||||
&adev->gfx.mec.hpd_eop_gpu_addr,
|
||||
(void **)&hpd);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r);
|
||||
gfx_v7_0_mec_fini(adev);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r);
|
||||
dev_warn(adev->dev, "(%d) create, pin or map of HDP EOP bo failed\n", r);
|
||||
gfx_v7_0_mec_fini(adev);
|
||||
return r;
|
||||
}
|
||||
|
@ -3108,32 +3068,12 @@ static int gfx_v7_0_compute_queue_init(struct amdgpu_device *adev, int ring_id)
|
|||
struct cik_mqd *mqd;
|
||||
struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
|
||||
|
||||
if (ring->mqd_obj == NULL) {
|
||||
r = amdgpu_bo_create(adev,
|
||||
sizeof(struct cik_mqd),
|
||||
PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
|
||||
&ring->mqd_obj);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
r = amdgpu_bo_reserve(ring->mqd_obj, false);
|
||||
if (unlikely(r != 0))
|
||||
goto out;
|
||||
|
||||
r = amdgpu_bo_pin(ring->mqd_obj, AMDGPU_GEM_DOMAIN_GTT,
|
||||
&mqd_gpu_addr);
|
||||
r = amdgpu_bo_create_reserved(adev, sizeof(struct cik_mqd), PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
|
||||
&mqd_gpu_addr, (void **)&mqd);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) pin MQD bo failed\n", r);
|
||||
goto out_unreserve;
|
||||
}
|
||||
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) map MQD bo failed\n", r);
|
||||
goto out_unreserve;
|
||||
dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
mutex_lock(&adev->srbm_mutex);
|
||||
|
@ -3147,9 +3087,7 @@ static int gfx_v7_0_compute_queue_init(struct amdgpu_device *adev, int ring_id)
|
|||
mutex_unlock(&adev->srbm_mutex);
|
||||
|
||||
amdgpu_bo_kunmap(ring->mqd_obj);
|
||||
out_unreserve:
|
||||
amdgpu_bo_unreserve(ring->mqd_obj);
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3361,43 +3299,9 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
|||
*/
|
||||
static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
/* save restore block */
|
||||
if (adev->gfx.rlc.save_restore_obj) {
|
||||
r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, true);
|
||||
if (unlikely(r != 0))
|
||||
dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r);
|
||||
amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj);
|
||||
amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
|
||||
|
||||
amdgpu_bo_unref(&adev->gfx.rlc.save_restore_obj);
|
||||
adev->gfx.rlc.save_restore_obj = NULL;
|
||||
}
|
||||
|
||||
/* clear state block */
|
||||
if (adev->gfx.rlc.clear_state_obj) {
|
||||
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
|
||||
if (unlikely(r != 0))
|
||||
dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
|
||||
amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
|
||||
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
|
||||
|
||||
amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
|
||||
adev->gfx.rlc.clear_state_obj = NULL;
|
||||
}
|
||||
|
||||
/* clear state block */
|
||||
if (adev->gfx.rlc.cp_table_obj) {
|
||||
r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true);
|
||||
if (unlikely(r != 0))
|
||||
dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
|
||||
amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
|
||||
amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
|
||||
|
||||
amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
|
||||
adev->gfx.rlc.cp_table_obj = NULL;
|
||||
}
|
||||
amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL);
|
||||
amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
|
||||
amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
|
||||
}
|
||||
|
||||
static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
|
||||
|
@ -3432,39 +3336,17 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
|
|||
|
||||
if (src_ptr) {
|
||||
/* save restore block */
|
||||
if (adev->gfx.rlc.save_restore_obj == NULL) {
|
||||
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
NULL, NULL,
|
||||
&adev->gfx.rlc.save_restore_obj);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
|
||||
if (unlikely(r != 0)) {
|
||||
gfx_v7_0_rlc_fini(adev);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->gfx.rlc.save_restore_gpu_addr);
|
||||
r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->gfx.rlc.save_restore_obj,
|
||||
&adev->gfx.rlc.save_restore_gpu_addr,
|
||||
(void **)&adev->gfx.rlc.sr_ptr);
|
||||
if (r) {
|
||||
amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
|
||||
dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r);
|
||||
dev_warn(adev->dev, "(%d) create, pin or map of RLC sr bo failed\n", r);
|
||||
gfx_v7_0_rlc_fini(adev);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r);
|
||||
gfx_v7_0_rlc_fini(adev);
|
||||
return r;
|
||||
}
|
||||
/* write the sr buffer */
|
||||
dst_ptr = adev->gfx.rlc.sr_ptr;
|
||||
for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
|
||||
|
@ -3477,39 +3359,17 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
|
|||
/* clear state block */
|
||||
adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev);
|
||||
|
||||
if (adev->gfx.rlc.clear_state_obj == NULL) {
|
||||
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
NULL, NULL,
|
||||
&adev->gfx.rlc.clear_state_obj);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
|
||||
gfx_v7_0_rlc_fini(adev);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
|
||||
if (unlikely(r != 0)) {
|
||||
gfx_v7_0_rlc_fini(adev);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->gfx.rlc.clear_state_gpu_addr);
|
||||
r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->gfx.rlc.clear_state_obj,
|
||||
&adev->gfx.rlc.clear_state_gpu_addr,
|
||||
(void **)&adev->gfx.rlc.cs_ptr);
|
||||
if (r) {
|
||||
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
|
||||
dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r);
|
||||
dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
|
||||
gfx_v7_0_rlc_fini(adev);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r);
|
||||
gfx_v7_0_rlc_fini(adev);
|
||||
return r;
|
||||
}
|
||||
/* set up the cs buffer */
|
||||
dst_ptr = adev->gfx.rlc.cs_ptr;
|
||||
gfx_v7_0_get_csb_buffer(adev, dst_ptr);
|
||||
|
@ -3518,37 +3378,14 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
|
|||
}
|
||||
|
||||
if (adev->gfx.rlc.cp_table_size) {
|
||||
if (adev->gfx.rlc.cp_table_obj == NULL) {
|
||||
r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
NULL, NULL,
|
||||
&adev->gfx.rlc.cp_table_obj);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
|
||||
gfx_v7_0_rlc_fini(adev);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
|
||||
if (unlikely(r != 0)) {
|
||||
dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
|
||||
gfx_v7_0_rlc_fini(adev);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->gfx.rlc.cp_table_gpu_addr);
|
||||
r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->gfx.rlc.cp_table_obj,
|
||||
&adev->gfx.rlc.cp_table_gpu_addr,
|
||||
(void **)&adev->gfx.rlc.cp_table_ptr);
|
||||
if (r) {
|
||||
amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
|
||||
dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r);
|
||||
gfx_v7_0_rlc_fini(adev);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r);
|
||||
dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
|
||||
gfx_v7_0_rlc_fini(adev);
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -1238,29 +1238,8 @@ static void cz_init_cp_jump_table(struct amdgpu_device *adev)
|
|||
|
||||
static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
/* clear state block */
|
||||
if (adev->gfx.rlc.clear_state_obj) {
|
||||
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
|
||||
if (unlikely(r != 0))
|
||||
dev_warn(adev->dev, "(%d) reserve RLC cbs bo failed\n", r);
|
||||
amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
|
||||
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
|
||||
amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
|
||||
adev->gfx.rlc.clear_state_obj = NULL;
|
||||
}
|
||||
|
||||
/* jump table block */
|
||||
if (adev->gfx.rlc.cp_table_obj) {
|
||||
r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true);
|
||||
if (unlikely(r != 0))
|
||||
dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
|
||||
amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
|
||||
amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
|
||||
amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
|
||||
adev->gfx.rlc.cp_table_obj = NULL;
|
||||
}
|
||||
amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
|
||||
amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
|
||||
}
|
||||
|
||||
static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
|
||||
|
@ -1278,39 +1257,17 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
|
|||
/* clear state block */
|
||||
adev->gfx.rlc.clear_state_size = dws = gfx_v8_0_get_csb_size(adev);
|
||||
|
||||
if (adev->gfx.rlc.clear_state_obj == NULL) {
|
||||
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
NULL, NULL,
|
||||
&adev->gfx.rlc.clear_state_obj);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
|
||||
gfx_v8_0_rlc_fini(adev);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
|
||||
if (unlikely(r != 0)) {
|
||||
gfx_v8_0_rlc_fini(adev);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->gfx.rlc.clear_state_gpu_addr);
|
||||
r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->gfx.rlc.clear_state_obj,
|
||||
&adev->gfx.rlc.clear_state_gpu_addr,
|
||||
(void **)&adev->gfx.rlc.cs_ptr);
|
||||
if (r) {
|
||||
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
|
||||
dev_warn(adev->dev, "(%d) pin RLC cbs bo failed\n", r);
|
||||
dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
|
||||
gfx_v8_0_rlc_fini(adev);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) map RLC cbs bo failed\n", r);
|
||||
gfx_v8_0_rlc_fini(adev);
|
||||
return r;
|
||||
}
|
||||
/* set up the cs buffer */
|
||||
dst_ptr = adev->gfx.rlc.cs_ptr;
|
||||
gfx_v8_0_get_csb_buffer(adev, dst_ptr);
|
||||
|
@ -1321,34 +1278,13 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
|
|||
if ((adev->asic_type == CHIP_CARRIZO) ||
|
||||
(adev->asic_type == CHIP_STONEY)) {
|
||||
adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
|
||||
if (adev->gfx.rlc.cp_table_obj == NULL) {
|
||||
r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||
NULL, NULL,
|
||||
&adev->gfx.rlc.cp_table_obj);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
|
||||
if (unlikely(r != 0)) {
|
||||
dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->gfx.rlc.cp_table_gpu_addr);
|
||||
r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->gfx.rlc.cp_table_obj,
|
||||
&adev->gfx.rlc.cp_table_gpu_addr,
|
||||
(void **)&adev->gfx.rlc.cp_table_ptr);
|
||||
if (r) {
|
||||
amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
|
||||
dev_warn(adev->dev, "(%d) pin RLC cp table bo failed\n", r);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r);
|
||||
dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -1363,17 +1299,7 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
|
|||
|
||||
static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (adev->gfx.mec.hpd_eop_obj) {
|
||||
r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true);
|
||||
if (unlikely(r != 0))
|
||||
dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
|
||||
amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
|
||||
amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
|
||||
amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj);
|
||||
adev->gfx.mec.hpd_eop_obj = NULL;
|
||||
}
|
||||
amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
|
||||
}
|
||||
|
||||
static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
|
||||
|
@ -1389,34 +1315,13 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
|
|||
|
||||
mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE;
|
||||
|
||||
if (adev->gfx.mec.hpd_eop_obj == NULL) {
|
||||
r = amdgpu_bo_create(adev,
|
||||
mec_hpd_size,
|
||||
PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
|
||||
&adev->gfx.mec.hpd_eop_obj);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
|
||||
if (unlikely(r != 0)) {
|
||||
gfx_v8_0_mec_fini(adev);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT,
|
||||
&adev->gfx.mec.hpd_eop_gpu_addr);
|
||||
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_GTT,
|
||||
&adev->gfx.mec.hpd_eop_obj,
|
||||
&adev->gfx.mec.hpd_eop_gpu_addr,
|
||||
(void **)&hpd);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r);
|
||||
gfx_v8_0_mec_fini(adev);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r);
|
||||
gfx_v8_0_mec_fini(adev);
|
||||
dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -116,7 +116,9 @@ static const u32 golden_settings_gc_9_0[] =
|
|||
SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_2), 0x08000000, 0x08000080,
|
||||
SOC15_REG_OFFSET(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL), 0x08000000, 0x08000080,
|
||||
SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_UTCL1_CNTL), 0x08000000, 0x08000080,
|
||||
SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), 0x00001000, 0x00001000,
|
||||
SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_1), 0x0000000f, 0x01000107,
|
||||
SOC15_REG_OFFSET(GC, 0, mmSQC_CONFIG), 0x03000000, 0x020a2000,
|
||||
SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX), 0xfffffeef, 0x010b0000,
|
||||
SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_HI), 0xffffffff, 0x4a2c0e68,
|
||||
SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_LO), 0xffffffff, 0xb5d3f197,
|
||||
|
@ -772,18 +774,16 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
|
|||
if (cs_data) {
|
||||
/* clear state block */
|
||||
adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev);
|
||||
if (adev->gfx.rlc.clear_state_obj == NULL) {
|
||||
r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->gfx.rlc.clear_state_obj,
|
||||
&adev->gfx.rlc.clear_state_gpu_addr,
|
||||
(void **)&adev->gfx.rlc.cs_ptr);
|
||||
if (r) {
|
||||
dev_err(adev->dev,
|
||||
"(%d) failed to create rlc csb bo\n", r);
|
||||
gfx_v9_0_rlc_fini(adev);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->gfx.rlc.clear_state_obj,
|
||||
&adev->gfx.rlc.clear_state_gpu_addr,
|
||||
(void **)&adev->gfx.rlc.cs_ptr);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) failed to create rlc csb bo\n",
|
||||
r);
|
||||
gfx_v9_0_rlc_fini(adev);
|
||||
return r;
|
||||
}
|
||||
/* set up the cs buffer */
|
||||
dst_ptr = adev->gfx.rlc.cs_ptr;
|
||||
|
@ -795,18 +795,16 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
|
|||
if (adev->asic_type == CHIP_RAVEN) {
|
||||
/* TODO: double check the cp_table_size for RV */
|
||||
adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
|
||||
if (adev->gfx.rlc.cp_table_obj == NULL) {
|
||||
r = amdgpu_bo_create_kernel(adev, adev->gfx.rlc.cp_table_size,
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->gfx.rlc.cp_table_obj,
|
||||
&adev->gfx.rlc.cp_table_gpu_addr,
|
||||
(void **)&adev->gfx.rlc.cp_table_ptr);
|
||||
if (r) {
|
||||
dev_err(adev->dev,
|
||||
"(%d) failed to create cp table bo\n", r);
|
||||
gfx_v9_0_rlc_fini(adev);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->gfx.rlc.cp_table_obj,
|
||||
&adev->gfx.rlc.cp_table_gpu_addr,
|
||||
(void **)&adev->gfx.rlc.cp_table_ptr);
|
||||
if (r) {
|
||||
dev_err(adev->dev,
|
||||
"(%d) failed to create cp table bo\n", r);
|
||||
gfx_v9_0_rlc_fini(adev);
|
||||
return r;
|
||||
}
|
||||
|
||||
rv_init_cp_jump_table(adev);
|
||||
|
@ -821,28 +819,8 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
|
|||
|
||||
static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (adev->gfx.mec.hpd_eop_obj) {
|
||||
r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true);
|
||||
if (unlikely(r != 0))
|
||||
dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
|
||||
amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
|
||||
amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
|
||||
|
||||
amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj);
|
||||
adev->gfx.mec.hpd_eop_obj = NULL;
|
||||
}
|
||||
if (adev->gfx.mec.mec_fw_obj) {
|
||||
r = amdgpu_bo_reserve(adev->gfx.mec.mec_fw_obj, true);
|
||||
if (unlikely(r != 0))
|
||||
dev_warn(adev->dev, "(%d) reserve mec firmware bo failed\n", r);
|
||||
amdgpu_bo_unpin(adev->gfx.mec.mec_fw_obj);
|
||||
amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
|
||||
|
||||
amdgpu_bo_unref(&adev->gfx.mec.mec_fw_obj);
|
||||
adev->gfx.mec.mec_fw_obj = NULL;
|
||||
}
|
||||
amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
|
||||
amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
|
||||
}
|
||||
|
||||
static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
|
||||
|
@ -862,33 +840,13 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
|
|||
amdgpu_gfx_compute_queue_acquire(adev);
|
||||
mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
|
||||
|
||||
if (adev->gfx.mec.hpd_eop_obj == NULL) {
|
||||
r = amdgpu_bo_create(adev,
|
||||
mec_hpd_size,
|
||||
PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
|
||||
&adev->gfx.mec.hpd_eop_obj);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
|
||||
if (unlikely(r != 0)) {
|
||||
gfx_v9_0_mec_fini(adev);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT,
|
||||
&adev->gfx.mec.hpd_eop_gpu_addr);
|
||||
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_GTT,
|
||||
&adev->gfx.mec.hpd_eop_obj,
|
||||
&adev->gfx.mec.hpd_eop_gpu_addr,
|
||||
(void **)&hpd);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r);
|
||||
gfx_v9_0_mec_fini(adev);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r);
|
||||
dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
|
||||
gfx_v9_0_mec_fini(adev);
|
||||
return r;
|
||||
}
|
||||
|
@ -905,42 +863,22 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
|
|||
le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
|
||||
fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
|
||||
|
||||
if (adev->gfx.mec.mec_fw_obj == NULL) {
|
||||
r = amdgpu_bo_create(adev,
|
||||
mec_hdr->header.ucode_size_bytes,
|
||||
PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
|
||||
&adev->gfx.mec.mec_fw_obj);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
|
||||
&adev->gfx.mec.mec_fw_obj,
|
||||
&adev->gfx.mec.mec_fw_gpu_addr,
|
||||
(void **)&fw);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
|
||||
gfx_v9_0_mec_fini(adev);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_reserve(adev->gfx.mec.mec_fw_obj, false);
|
||||
if (unlikely(r != 0)) {
|
||||
gfx_v9_0_mec_fini(adev);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_pin(adev->gfx.mec.mec_fw_obj, AMDGPU_GEM_DOMAIN_GTT,
|
||||
&adev->gfx.mec.mec_fw_gpu_addr);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) pin mec firmware bo failed\n", r);
|
||||
gfx_v9_0_mec_fini(adev);
|
||||
return r;
|
||||
}
|
||||
r = amdgpu_bo_kmap(adev->gfx.mec.mec_fw_obj, (void **)&fw);
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "(%d) map firmware bo failed\n", r);
|
||||
gfx_v9_0_mec_fini(adev);
|
||||
return r;
|
||||
}
|
||||
memcpy(fw, fw_data, fw_size);
|
||||
|
||||
amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
|
||||
amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4158,7 +4096,7 @@ static int gfx_v9_0_kiq_irq(struct amdgpu_device *adev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
|
||||
static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
|
||||
.name = "gfx_v9_0",
|
||||
.early_init = gfx_v9_0_early_init,
|
||||
.late_init = gfx_v9_0_late_init,
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
#ifndef __GFX_V9_0_H__
|
||||
#define __GFX_V9_0_H__
|
||||
|
||||
extern const struct amd_ip_funcs gfx_v9_0_ip_funcs;
|
||||
extern const struct amdgpu_ip_block_version gfx_v9_0_ip_block;
|
||||
|
||||
void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
|
||||
|
|
|
@ -30,7 +30,5 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
|
|||
bool value);
|
||||
void gfxhub_v1_0_init(struct amdgpu_device *adev);
|
||||
u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev);
|
||||
extern const struct amd_ip_funcs gfxhub_v1_0_ip_funcs;
|
||||
extern const struct amdgpu_ip_block_version gfxhub_v1_0_ip_block;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -36,7 +36,4 @@ void mmhub_v1_0_initialize_power_gating(struct amdgpu_device *adev);
|
|||
void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
|
||||
bool enable);
|
||||
|
||||
extern const struct amd_ip_funcs mmhub_v1_0_ip_funcs;
|
||||
extern const struct amdgpu_ip_block_version mmhub_v1_0_ip_block;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -291,6 +291,8 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
|
|||
|
||||
DRM_DEBUG("Setting write pointer\n");
|
||||
if (ring->use_doorbell) {
|
||||
u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs];
|
||||
|
||||
DRM_DEBUG("Using doorbell -- "
|
||||
"wptr_offs == 0x%08x "
|
||||
"lower_32_bits(ring->wptr) << 2 == 0x%08x "
|
||||
|
@ -299,8 +301,7 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
|
|||
lower_32_bits(ring->wptr << 2),
|
||||
upper_32_bits(ring->wptr << 2));
|
||||
/* XXX check if swapping is necessary on BE */
|
||||
adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr << 2);
|
||||
adev->wb.wb[ring->wptr_offs + 1] = upper_32_bits(ring->wptr << 2);
|
||||
WRITE_ONCE(*wb, (ring->wptr << 2));
|
||||
DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
|
||||
ring->doorbell_index, ring->wptr << 2);
|
||||
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
|
||||
|
@ -573,12 +574,13 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
|
|||
static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
u32 rb_cntl, ib_cntl;
|
||||
u32 rb_cntl, ib_cntl, wptr_poll_cntl;
|
||||
u32 rb_bufsz;
|
||||
u32 wb_offset;
|
||||
u32 doorbell;
|
||||
u32 doorbell_offset;
|
||||
u32 temp;
|
||||
u64 wptr_gpu_addr;
|
||||
int i, r;
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
|
@ -660,6 +662,19 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
|
|||
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_F32_CNTL), temp);
|
||||
}
|
||||
|
||||
/* setup the wptr shadow polling */
|
||||
wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
|
||||
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
|
||||
lower_32_bits(wptr_gpu_addr));
|
||||
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
|
||||
upper_32_bits(wptr_gpu_addr));
|
||||
wptr_poll_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1);
|
||||
else
|
||||
wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0);
|
||||
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), wptr_poll_cntl);
|
||||
|
||||
/* enable DMA RB */
|
||||
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
|
||||
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
|
||||
|
@ -687,6 +702,7 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
|
|||
|
||||
if (adev->mman.buffer_funcs_ring == ring)
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
|
||||
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -783,15 +799,12 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
|
|||
const struct sdma_firmware_header_v1_0 *hdr;
|
||||
const __le32 *fw_data;
|
||||
u32 fw_size;
|
||||
u32 digest_size = 0;
|
||||
int i, j;
|
||||
|
||||
/* halt the MEs */
|
||||
sdma_v4_0_enable(adev, false);
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
uint16_t version_major;
|
||||
uint16_t version_minor;
|
||||
if (!adev->sdma.instance[i].fw)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -799,23 +812,12 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
|
|||
amdgpu_ucode_print_sdma_hdr(&hdr->header);
|
||||
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
|
||||
|
||||
version_major = le16_to_cpu(hdr->header.header_version_major);
|
||||
version_minor = le16_to_cpu(hdr->header.header_version_minor);
|
||||
|
||||
if (version_major == 1 && version_minor >= 1) {
|
||||
const struct sdma_firmware_header_v1_1 *sdma_v1_1_hdr = (const struct sdma_firmware_header_v1_1 *) hdr;
|
||||
digest_size = le32_to_cpu(sdma_v1_1_hdr->digest_size);
|
||||
}
|
||||
|
||||
fw_size -= digest_size;
|
||||
|
||||
fw_data = (const __le32 *)
|
||||
(adev->sdma.instance[i].fw->data +
|
||||
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
|
||||
|
||||
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_ADDR), 0);
|
||||
|
||||
|
||||
for (j = 0; j < fw_size; j++)
|
||||
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
|
||||
|
||||
|
|
|
@ -165,6 +165,9 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
|
|||
unsigned i;
|
||||
int r;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
|
||||
r = amdgpu_ring_alloc(ring, 16);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n",
|
||||
|
@ -432,13 +435,19 @@ static int uvd_v7_0_sw_init(void *handle)
|
|||
return r;
|
||||
}
|
||||
|
||||
|
||||
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
|
||||
ring = &adev->uvd.ring_enc[i];
|
||||
sprintf(ring->name, "uvd_enc%d", i);
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
ring->use_doorbell = true;
|
||||
ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2;
|
||||
|
||||
/* currently only use the first enconding ring for
|
||||
* sriov, so set unused location for other unused rings.
|
||||
*/
|
||||
if (i == 0)
|
||||
ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2;
|
||||
else
|
||||
ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1;
|
||||
}
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
|
||||
if (r)
|
||||
|
@ -685,6 +694,11 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
|
|||
/* 4, set resp to zero */
|
||||
WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
|
||||
|
||||
WDOORBELL32(adev->uvd.ring_enc[0].doorbell_index, 0);
|
||||
adev->wb.wb[adev->uvd.ring_enc[0].wptr_offs] = 0;
|
||||
adev->uvd.ring_enc[0].wptr = 0;
|
||||
adev->uvd.ring_enc[0].wptr_old = 0;
|
||||
|
||||
/* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
|
||||
WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
|
||||
|
||||
|
@ -702,7 +716,6 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
|
|||
dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
|
||||
return -EBUSY;
|
||||
}
|
||||
WDOORBELL32(adev->uvd.ring_enc[0].doorbell_index, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -736,11 +749,9 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
|
|||
init_table += header->uvd_table_offset;
|
||||
|
||||
ring = &adev->uvd.ring;
|
||||
ring->wptr = 0;
|
||||
size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
|
||||
|
||||
/* disable clock gating */
|
||||
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
|
||||
~UVD_POWER_STATUS__UVD_PG_MODE_MASK, 0);
|
||||
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS),
|
||||
0xFFFFFFFF, 0x00000004);
|
||||
/* mc resume*/
|
||||
|
@ -777,12 +788,6 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
|
|||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2),
|
||||
AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
|
||||
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_ADDR_CONFIG),
|
||||
adev->gfx.config.gb_addr_config);
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG),
|
||||
adev->gfx.config.gb_addr_config);
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG),
|
||||
adev->gfx.config.gb_addr_config);
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
|
||||
/* mc resume end*/
|
||||
|
||||
|
@ -819,17 +824,6 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
|
|||
UVD_LMI_CTRL__REQ_MODE_MASK |
|
||||
0x00100000L));
|
||||
|
||||
/* disable byte swapping */
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_SWAP_CNTL), 0);
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MP_SWAP_CNTL), 0);
|
||||
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA0), 0x40c2040);
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA1), 0x0);
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB0), 0x40c2040);
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB1), 0x0);
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_ALU), 0);
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUX), 0x88);
|
||||
|
||||
/* take all subblocks out of reset, except VCPU */
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
|
||||
UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
|
||||
|
@ -838,15 +832,6 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
|
|||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL),
|
||||
UVD_VCPU_CNTL__CLK_EN_MASK);
|
||||
|
||||
/* enable UMC */
|
||||
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
|
||||
~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
|
||||
|
||||
/* boot up the VCPU */
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0);
|
||||
|
||||
MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0x02, 0x02);
|
||||
|
||||
/* enable master interrupt */
|
||||
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
|
||||
~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
|
||||
|
@ -859,40 +844,31 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
|
|||
/* force RBC into idle state */
|
||||
size = order_base_2(ring->ring_size);
|
||||
tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
|
||||
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
|
||||
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
|
||||
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
|
||||
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
|
||||
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), tmp);
|
||||
|
||||
/* set the write pointer delay */
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL), 0);
|
||||
|
||||
/* set the wb address */
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR),
|
||||
(upper_32_bits(ring->gpu_addr) >> 2));
|
||||
|
||||
/* programm the RB_BASE for ring buffer */
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
|
||||
lower_32_bits(ring->gpu_addr));
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
|
||||
upper_32_bits(ring->gpu_addr));
|
||||
|
||||
ring->wptr = 0;
|
||||
ring = &adev->uvd.ring_enc[0];
|
||||
ring->wptr = 0;
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO), ring->gpu_addr);
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_SIZE), ring->ring_size / 4);
|
||||
|
||||
/* boot up the VCPU */
|
||||
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0);
|
||||
|
||||
/* enable UMC */
|
||||
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
|
||||
~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
|
||||
|
||||
MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0x02, 0x02);
|
||||
|
||||
/* add end packet */
|
||||
memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
|
||||
table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
|
||||
header->uvd_table_size = table_size;
|
||||
|
||||
return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
|
||||
}
|
||||
return -EINVAL; /* already initializaed ? */
|
||||
return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -173,6 +173,11 @@ static int vce_v4_0_mmsch_start(struct amdgpu_device *adev,
|
|||
/* 4, set resp to zero */
|
||||
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP), 0);
|
||||
|
||||
WDOORBELL32(adev->vce.ring[0].doorbell_index, 0);
|
||||
adev->wb.wb[adev->vce.ring[0].wptr_offs] = 0;
|
||||
adev->vce.ring[0].wptr = 0;
|
||||
adev->vce.ring[0].wptr_old = 0;
|
||||
|
||||
/* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
|
||||
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST), 0x10000001);
|
||||
|
||||
|
@ -190,7 +195,6 @@ static int vce_v4_0_mmsch_start(struct amdgpu_device *adev,
|
|||
dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
|
||||
return -EBUSY;
|
||||
}
|
||||
WDOORBELL32(adev->vce.ring[0].doorbell_index, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -274,7 +278,8 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
|
|||
|
||||
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL2), ~0x100, 0);
|
||||
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN),
|
||||
0xffffffff, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
|
||||
VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
|
||||
VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
|
||||
|
||||
/* end of MC_RESUME */
|
||||
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS),
|
||||
|
@ -296,11 +301,9 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
|
|||
memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
|
||||
table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
|
||||
header->vce_table_size = table_size;
|
||||
|
||||
return vce_v4_0_mmsch_start(adev, &adev->virt.mm_table);
|
||||
}
|
||||
|
||||
return -EINVAL; /* already initializaed ? */
|
||||
return vce_v4_0_mmsch_start(adev, &adev->virt.mm_table);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -443,12 +446,14 @@ static int vce_v4_0_sw_init(void *handle)
|
|||
if (amdgpu_sriov_vf(adev)) {
|
||||
/* DOORBELL only works under SRIOV */
|
||||
ring->use_doorbell = true;
|
||||
|
||||
/* currently only use the first encoding ring for sriov,
|
||||
* so set unused location for other unused rings.
|
||||
*/
|
||||
if (i == 0)
|
||||
ring->doorbell_index = AMDGPU_DOORBELL64_RING0_1 * 2;
|
||||
else if (i == 1)
|
||||
ring->doorbell_index = AMDGPU_DOORBELL64_RING2_3 * 2;
|
||||
ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING0_1 * 2;
|
||||
else
|
||||
ring->doorbell_index = AMDGPU_DOORBELL64_RING2_3 * 2 + 1;
|
||||
ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING2_3 * 2 + 1;
|
||||
}
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
|
||||
if (r)
|
||||
|
@ -990,11 +995,13 @@ static int vce_v4_0_set_interrupt_state(struct amdgpu_device *adev,
|
|||
{
|
||||
uint32_t val = 0;
|
||||
|
||||
if (state == AMDGPU_IRQ_STATE_ENABLE)
|
||||
val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
if (state == AMDGPU_IRQ_STATE_ENABLE)
|
||||
val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
|
||||
|
||||
WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN), val,
|
||||
~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
|
||||
WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN), val,
|
||||
~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1240,13 +1240,18 @@ static int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
|
|||
{
|
||||
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
|
||||
|
||||
if (cz_hwmgr->sclk_dpm.soft_min_clk !=
|
||||
cz_hwmgr->sclk_dpm.soft_max_clk)
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
|
||||
PPSMC_MSG_SetSclkSoftMin,
|
||||
cz_get_sclk_level(hwmgr,
|
||||
cz_hwmgr->sclk_dpm.soft_max_clk,
|
||||
PPSMC_MSG_SetSclkSoftMin));
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
|
||||
PPSMC_MSG_SetSclkSoftMin,
|
||||
cz_get_sclk_level(hwmgr,
|
||||
cz_hwmgr->sclk_dpm.soft_max_clk,
|
||||
PPSMC_MSG_SetSclkSoftMin));
|
||||
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
|
||||
PPSMC_MSG_SetSclkSoftMax,
|
||||
cz_get_sclk_level(hwmgr,
|
||||
cz_hwmgr->sclk_dpm.soft_max_clk,
|
||||
PPSMC_MSG_SetSclkSoftMax));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1292,17 +1297,55 @@ static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
|
|||
{
|
||||
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
|
||||
|
||||
if (cz_hwmgr->sclk_dpm.soft_min_clk !=
|
||||
cz_hwmgr->sclk_dpm.soft_max_clk) {
|
||||
cz_hwmgr->sclk_dpm.soft_max_clk =
|
||||
cz_hwmgr->sclk_dpm.soft_min_clk;
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
|
||||
PPSMC_MSG_SetSclkSoftMax,
|
||||
cz_get_sclk_level(hwmgr,
|
||||
cz_hwmgr->sclk_dpm.soft_min_clk,
|
||||
PPSMC_MSG_SetSclkSoftMax));
|
||||
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
|
||||
PPSMC_MSG_SetSclkSoftMin,
|
||||
cz_get_sclk_level(hwmgr,
|
||||
cz_hwmgr->sclk_dpm.soft_min_clk,
|
||||
PPSMC_MSG_SetSclkSoftMin));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cz_phm_force_dpm_sclk(struct pp_hwmgr *hwmgr, uint32_t sclk)
|
||||
{
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
|
||||
PPSMC_MSG_SetSclkSoftMin,
|
||||
cz_get_sclk_level(hwmgr,
|
||||
sclk,
|
||||
PPSMC_MSG_SetSclkSoftMin));
|
||||
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
|
||||
PPSMC_MSG_SetSclkSoftMax,
|
||||
cz_get_sclk_level(hwmgr,
|
||||
cz_hwmgr->sclk_dpm.soft_max_clk,
|
||||
sclk,
|
||||
PPSMC_MSG_SetSclkSoftMax));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cz_get_profiling_clk(struct pp_hwmgr *hwmgr, uint32_t *sclk)
|
||||
{
|
||||
struct phm_clock_voltage_dependency_table *table =
|
||||
hwmgr->dyn_state.vddc_dependency_on_sclk;
|
||||
int32_t tmp_sclk;
|
||||
int32_t count;
|
||||
|
||||
tmp_sclk = table->entries[table->count-1].clk * 70 / 100;
|
||||
|
||||
for (count = table->count-1; count >= 0; count--) {
|
||||
if (tmp_sclk >= table->entries[count].clk) {
|
||||
tmp_sclk = table->entries[count].clk;
|
||||
*sclk = tmp_sclk;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (count < 0)
|
||||
*sclk = table->entries[0].clk;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1310,30 +1353,70 @@ static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
|
|||
static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
|
||||
enum amd_dpm_forced_level level)
|
||||
{
|
||||
uint32_t sclk = 0;
|
||||
int ret = 0;
|
||||
uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
|
||||
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
|
||||
AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
|
||||
|
||||
if (level == hwmgr->dpm_level)
|
||||
return ret;
|
||||
|
||||
if (!(hwmgr->dpm_level & profile_mode_mask)) {
|
||||
/* enter profile mode, save current level, disable gfx cg*/
|
||||
if (level & profile_mode_mask) {
|
||||
hwmgr->saved_dpm_level = hwmgr->dpm_level;
|
||||
cgs_set_clockgating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_GFX,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
}
|
||||
} else {
|
||||
/* exit profile mode, restore level, enable gfx cg*/
|
||||
if (!(level & profile_mode_mask)) {
|
||||
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
|
||||
level = hwmgr->saved_dpm_level;
|
||||
cgs_set_clockgating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_GFX,
|
||||
AMD_CG_STATE_GATE);
|
||||
}
|
||||
}
|
||||
|
||||
switch (level) {
|
||||
case AMD_DPM_FORCED_LEVEL_HIGH:
|
||||
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
|
||||
ret = cz_phm_force_dpm_highest(hwmgr);
|
||||
if (ret)
|
||||
return ret;
|
||||
hwmgr->dpm_level = level;
|
||||
break;
|
||||
case AMD_DPM_FORCED_LEVEL_LOW:
|
||||
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
|
||||
ret = cz_phm_force_dpm_lowest(hwmgr);
|
||||
if (ret)
|
||||
return ret;
|
||||
hwmgr->dpm_level = level;
|
||||
break;
|
||||
case AMD_DPM_FORCED_LEVEL_AUTO:
|
||||
ret = cz_phm_unforce_dpm_levels(hwmgr);
|
||||
if (ret)
|
||||
return ret;
|
||||
hwmgr->dpm_level = level;
|
||||
break;
|
||||
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
|
||||
ret = cz_get_profiling_clk(hwmgr, &sclk);
|
||||
if (ret)
|
||||
return ret;
|
||||
hwmgr->dpm_level = level;
|
||||
cz_phm_force_dpm_sclk(hwmgr, sclk);
|
||||
break;
|
||||
case AMD_DPM_FORCED_LEVEL_MANUAL:
|
||||
hwmgr->dpm_level = level;
|
||||
break;
|
||||
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
hwmgr->dpm_level = level;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -557,9 +557,8 @@ uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, u
|
|||
return vddci_table->entries[i].value;
|
||||
}
|
||||
|
||||
PP_ASSERT_WITH_CODE(false,
|
||||
"VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
|
||||
return vddci_table->entries[i-1].value);
|
||||
pr_debug("vddci is larger than max value in vddci_table\n");
|
||||
return vddci_table->entries[i-1].value;
|
||||
}
|
||||
|
||||
int phm_find_boot_level(void *table,
|
||||
|
@ -583,26 +582,26 @@ int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
|
|||
phm_ppt_v1_voltage_lookup_table *lookup_table,
|
||||
uint16_t virtual_voltage_id, int32_t *sclk)
|
||||
{
|
||||
uint8_t entryId;
|
||||
uint8_t voltageId;
|
||||
uint8_t entry_id;
|
||||
uint8_t voltage_id;
|
||||
struct phm_ppt_v1_information *table_info =
|
||||
(struct phm_ppt_v1_information *)(hwmgr->pptable);
|
||||
|
||||
PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
|
||||
|
||||
/* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
|
||||
for (entryId = 0; entryId < table_info->vdd_dep_on_sclk->count; entryId++) {
|
||||
voltageId = table_info->vdd_dep_on_sclk->entries[entryId].vddInd;
|
||||
if (lookup_table->entries[voltageId].us_vdd == virtual_voltage_id)
|
||||
for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
|
||||
voltage_id = table_info->vdd_dep_on_sclk->entries[entry_id].vddInd;
|
||||
if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
|
||||
break;
|
||||
}
|
||||
|
||||
PP_ASSERT_WITH_CODE(entryId < table_info->vdd_dep_on_sclk->count,
|
||||
"Can't find requested voltage id in vdd_dep_on_sclk table!",
|
||||
return -EINVAL;
|
||||
);
|
||||
if (entry_id >= table_info->vdd_dep_on_sclk->count) {
|
||||
pr_debug("Can't find requested voltage id in vdd_dep_on_sclk table\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*sclk = table_info->vdd_dep_on_sclk->entries[entryId].clk;
|
||||
*sclk = table_info->vdd_dep_on_sclk->entries[entry_id].clk;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -142,7 +142,7 @@ int pp_atomfwctrl_get_voltage_table_v4(struct pp_hwmgr *hwmgr,
|
|||
}
|
||||
} else if (voltage_mode == VOLTAGE_OBJ_SVID2) {
|
||||
voltage_table->psi1_enable =
|
||||
voltage_object->svid2_voltage_obj.loadline_psi1 & 0x1;
|
||||
(voltage_object->svid2_voltage_obj.loadline_psi1 & 0x20) >> 5;
|
||||
voltage_table->psi0_enable =
|
||||
voltage_object->svid2_voltage_obj.psi0_enable & 0x1;
|
||||
voltage_table->max_vid_step =
|
||||
|
|
|
@ -265,6 +265,15 @@ static int rv_tf_set_clock_limit(struct pp_hwmgr *hwmgr, void *input,
|
|||
}
|
||||
} */
|
||||
|
||||
if (((hwmgr->uvd_arbiter.vclk_soft_min / 100) != rv_data->vclk_soft_min) ||
|
||||
((hwmgr->uvd_arbiter.dclk_soft_min / 100) != rv_data->dclk_soft_min)) {
|
||||
rv_data->vclk_soft_min = hwmgr->uvd_arbiter.vclk_soft_min / 100;
|
||||
rv_data->dclk_soft_min = hwmgr->uvd_arbiter.dclk_soft_min / 100;
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
|
||||
PPSMC_MSG_SetSoftMinVcn,
|
||||
(rv_data->vclk_soft_min << 16) | rv_data->vclk_soft_min);
|
||||
}
|
||||
|
||||
if((hwmgr->gfx_arbiter.sclk_hard_min != 0) &&
|
||||
((hwmgr->gfx_arbiter.sclk_hard_min / 100) != rv_data->soc_actual_hard_min_freq)) {
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
|
||||
|
|
|
@ -280,6 +280,8 @@ struct rv_hwmgr {
|
|||
|
||||
uint32_t f_actual_hard_min_freq;
|
||||
uint32_t fabric_actual_soft_min_freq;
|
||||
uint32_t vclk_soft_min;
|
||||
uint32_t dclk_soft_min;
|
||||
uint32_t gfx_actual_soft_min_freq;
|
||||
|
||||
bool vcn_power_gated;
|
||||
|
|
|
@ -1962,9 +1962,6 @@ static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr)
|
|||
temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
|
||||
break;
|
||||
default:
|
||||
PP_ASSERT_WITH_CODE(0,
|
||||
"Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!",
|
||||
);
|
||||
break;
|
||||
}
|
||||
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
|
||||
|
|
|
@ -2313,7 +2313,7 @@ static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
|
|||
smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_InitializeAcg);
|
||||
|
||||
smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgBtc);
|
||||
vega10_read_arg_from_smc(hwmgr->smumgr, &agc_btc_response);;
|
||||
vega10_read_arg_from_smc(hwmgr->smumgr, &agc_btc_response);
|
||||
|
||||
if (1 == agc_btc_response) {
|
||||
if (1 == data->acg_loop_state)
|
||||
|
@ -2522,6 +2522,9 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
|
|||
pp_table->DisplayDpmVoltageMode =
|
||||
(uint8_t)(table_info->uc_dcef_dpm_voltage_mode);
|
||||
|
||||
data->vddc_voltage_table.psi0_enable = voltage_table.psi0_enable;
|
||||
data->vddc_voltage_table.psi1_enable = voltage_table.psi1_enable;
|
||||
|
||||
if (data->registry_data.ulv_support &&
|
||||
table_info->us_ulv_voltage_offset) {
|
||||
result = vega10_populate_ulv_state(hwmgr);
|
||||
|
@ -3701,10 +3704,22 @@ static void vega10_apply_dal_minimum_voltage_request(
|
|||
return;
|
||||
}
|
||||
|
||||
static int vega10_get_soc_index_for_max_uclk(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table_on_mclk;
|
||||
struct phm_ppt_v2_information *table_info =
|
||||
(struct phm_ppt_v2_information *)(hwmgr->pptable);
|
||||
|
||||
vdd_dep_table_on_mclk = table_info->vdd_dep_on_mclk;
|
||||
|
||||
return vdd_dep_table_on_mclk->entries[NUM_UCLK_DPM_LEVELS - 1].vddInd + 1;
|
||||
}
|
||||
|
||||
static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct vega10_hwmgr *data =
|
||||
(struct vega10_hwmgr *)(hwmgr->backend);
|
||||
uint32_t socclk_idx;
|
||||
|
||||
vega10_apply_dal_minimum_voltage_request(hwmgr);
|
||||
|
||||
|
@ -3725,13 +3740,22 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
|
|||
if (!data->registry_data.mclk_dpm_key_disabled) {
|
||||
if (data->smc_state_table.mem_boot_level !=
|
||||
data->dpm_table.mem_table.dpm_state.soft_min_level) {
|
||||
if (data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1) {
|
||||
socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr);
|
||||
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
|
||||
hwmgr->smumgr,
|
||||
PPSMC_MSG_SetSoftMinUclkByIndex,
|
||||
data->smc_state_table.mem_boot_level),
|
||||
"Failed to set soft min mclk index!",
|
||||
return -EINVAL);
|
||||
|
||||
hwmgr->smumgr,
|
||||
PPSMC_MSG_SetSoftMinSocclkByIndex,
|
||||
socclk_idx),
|
||||
"Failed to set soft min uclk index!",
|
||||
return -EINVAL);
|
||||
} else {
|
||||
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
|
||||
hwmgr->smumgr,
|
||||
PPSMC_MSG_SetSoftMinUclkByIndex,
|
||||
data->smc_state_table.mem_boot_level),
|
||||
"Failed to set soft min uclk index!",
|
||||
return -EINVAL);
|
||||
}
|
||||
data->dpm_table.mem_table.dpm_state.soft_min_level =
|
||||
data->smc_state_table.mem_boot_level;
|
||||
}
|
||||
|
@ -4138,7 +4162,7 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
|
|||
pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
|
||||
}
|
||||
} else {
|
||||
pr_info("Cannot find requested DCEFCLK!");
|
||||
pr_debug("Cannot find requested DCEFCLK!");
|
||||
}
|
||||
|
||||
if (min_clocks.memoryClock != 0) {
|
||||
|
|
|
@ -543,7 +543,7 @@ static const struct vega10_didt_config_reg SEEDCCtrlForceStallConfig_Vega10[] =
|
|||
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
*/
|
||||
/* SQ */
|
||||
{ ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0001 },
|
||||
{ ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
|
||||
{ ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
|
||||
{ ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
|
||||
{ ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0001 },
|
||||
|
@ -556,7 +556,7 @@ static const struct vega10_didt_config_reg SEEDCCtrlForceStallConfig_Vega10[] =
|
|||
{ ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 },
|
||||
|
||||
/* TD */
|
||||
{ ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_EN_MASK, DIDT_TD_EDC_CTRL__EDC_EN__SHIFT, 0x0001 },
|
||||
{ ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_EN_MASK, DIDT_TD_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
|
||||
{ ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK, DIDT_TD_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
|
||||
{ ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
|
||||
{ ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_TD_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0001 },
|
||||
|
@ -1208,7 +1208,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
|
|||
if (0 != result)
|
||||
return result;
|
||||
|
||||
vega10_didt_set_mask(hwmgr, true);
|
||||
vega10_didt_set_mask(hwmgr, false);
|
||||
|
||||
cgs_enter_safe_mode(hwmgr->device, false);
|
||||
|
||||
|
|
|
@ -321,10 +321,7 @@ int vega10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
|
|||
|
||||
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_MicrocodeFanControl)) {
|
||||
result = vega10_fan_ctrl_set_static_mode(hwmgr,
|
||||
FDO_PWM_MODE_STATIC);
|
||||
if (!result)
|
||||
result = vega10_fan_ctrl_start_smc_fan_control(hwmgr);
|
||||
result = vega10_fan_ctrl_start_smc_fan_control(hwmgr);
|
||||
} else
|
||||
result = vega10_fan_ctrl_set_default_mode(hwmgr);
|
||||
|
||||
|
@ -633,7 +630,6 @@ int tf_vega10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
|
|||
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_MicrocodeFanControl)) {
|
||||
vega10_fan_ctrl_start_smc_fan_control(hwmgr);
|
||||
vega10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -128,6 +128,8 @@ struct phm_uvd_arbiter {
|
|||
uint32_t dclk;
|
||||
uint32_t vclk_ceiling;
|
||||
uint32_t dclk_ceiling;
|
||||
uint32_t vclk_soft_min;
|
||||
uint32_t dclk_soft_min;
|
||||
};
|
||||
|
||||
struct phm_vce_arbiter {
|
||||
|
|
|
@ -66,7 +66,12 @@
|
|||
#define PPSMC_MSG_SetMinVddcrSocVoltage 0x22
|
||||
#define PPSMC_MSG_SetMinVideoFclkFreq 0x23
|
||||
#define PPSMC_MSG_SetMinDeepSleepDcefclk 0x24
|
||||
#define PPSMC_Message_Count 0x25
|
||||
#define PPSMC_MSG_ForcePowerDownGfx 0x25
|
||||
#define PPSMC_MSG_SetPhyclkVoltageByFreq 0x26
|
||||
#define PPSMC_MSG_SetDppclkVoltageByFreq 0x27
|
||||
#define PPSMC_MSG_SetSoftMinVcn 0x28
|
||||
#define PPSMC_Message_Count 0x29
|
||||
|
||||
|
||||
typedef uint16_t PPSMC_Result;
|
||||
typedef int PPSMC_Msg;
|
||||
|
|
|
@ -464,7 +464,7 @@ struct radeon_bo_list {
|
|||
struct radeon_bo *robj;
|
||||
struct ttm_validate_buffer tv;
|
||||
uint64_t gpu_offset;
|
||||
unsigned prefered_domains;
|
||||
unsigned preferred_domains;
|
||||
unsigned allowed_domains;
|
||||
uint32_t tiling_flags;
|
||||
};
|
||||
|
@ -2327,7 +2327,7 @@ struct radeon_device {
|
|||
uint8_t *bios;
|
||||
bool is_atom_bios;
|
||||
uint16_t bios_header_start;
|
||||
struct radeon_bo *stollen_vga_memory;
|
||||
struct radeon_bo *stolen_vga_memory;
|
||||
/* Register mmio */
|
||||
resource_size_t rmmio_base;
|
||||
resource_size_t rmmio_size;
|
||||
|
|
|
@ -351,7 +351,7 @@ out:
|
|||
* handles it.
|
||||
* Returns NOTIFY code
|
||||
*/
|
||||
int radeon_atif_handler(struct radeon_device *rdev,
|
||||
static int radeon_atif_handler(struct radeon_device *rdev,
|
||||
struct acpi_bus_event *event)
|
||||
{
|
||||
struct radeon_atif *atif = &rdev->atif;
|
||||
|
|
|
@ -27,9 +27,6 @@
|
|||
struct radeon_device;
|
||||
struct acpi_bus_event;
|
||||
|
||||
int radeon_atif_handler(struct radeon_device *rdev,
|
||||
struct acpi_bus_event *event);
|
||||
|
||||
/* AMD hw uses four ACPI control methods:
|
||||
* 1. ATIF
|
||||
* ARG0: (ACPI_INTEGER) function code
|
||||
|
|
|
@ -130,7 +130,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
|
|||
p->rdev->family == CHIP_RS880)) {
|
||||
|
||||
/* TODO: is this still needed for NI+ ? */
|
||||
p->relocs[i].prefered_domains =
|
||||
p->relocs[i].preferred_domains =
|
||||
RADEON_GEM_DOMAIN_VRAM;
|
||||
|
||||
p->relocs[i].allowed_domains =
|
||||
|
@ -148,14 +148,14 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
p->relocs[i].prefered_domains = domain;
|
||||
p->relocs[i].preferred_domains = domain;
|
||||
if (domain == RADEON_GEM_DOMAIN_VRAM)
|
||||
domain |= RADEON_GEM_DOMAIN_GTT;
|
||||
p->relocs[i].allowed_domains = domain;
|
||||
}
|
||||
|
||||
if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) {
|
||||
uint32_t domain = p->relocs[i].prefered_domains;
|
||||
uint32_t domain = p->relocs[i].preferred_domains;
|
||||
if (!(domain & RADEON_GEM_DOMAIN_GTT)) {
|
||||
DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is "
|
||||
"allowed for userptr BOs\n");
|
||||
|
@ -163,7 +163,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
|
|||
}
|
||||
need_mmap_lock = true;
|
||||
domain = RADEON_GEM_DOMAIN_GTT;
|
||||
p->relocs[i].prefered_domains = domain;
|
||||
p->relocs[i].preferred_domains = domain;
|
||||
p->relocs[i].allowed_domains = domain;
|
||||
}
|
||||
|
||||
|
@ -437,7 +437,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
|
|||
if (bo == NULL)
|
||||
continue;
|
||||
|
||||
drm_gem_object_unreference_unlocked(&bo->gem_base);
|
||||
drm_gem_object_put_unlocked(&bo->gem_base);
|
||||
}
|
||||
}
|
||||
kfree(parser->track);
|
||||
|
|
|
@ -307,7 +307,7 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
robj = gem_to_radeon_bo(obj);
|
||||
ret = radeon_bo_reserve(robj, false);
|
||||
if (ret != 0) {
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
return ret;
|
||||
}
|
||||
/* Only 27 bit offset for legacy cursor */
|
||||
|
@ -317,7 +317,7 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
radeon_bo_unreserve(robj);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -352,7 +352,7 @@ unpin:
|
|||
radeon_bo_unpin(robj);
|
||||
radeon_bo_unreserve(robj);
|
||||
}
|
||||
drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
|
||||
drm_gem_object_put_unlocked(radeon_crtc->cursor_bo);
|
||||
}
|
||||
|
||||
radeon_crtc->cursor_bo = obj;
|
||||
|
|
|
@ -267,7 +267,7 @@ static void radeon_unpin_work_func(struct work_struct *__work)
|
|||
} else
|
||||
DRM_ERROR("failed to reserve buffer after flip\n");
|
||||
|
||||
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
|
||||
drm_gem_object_put_unlocked(&work->old_rbo->gem_base);
|
||||
kfree(work);
|
||||
}
|
||||
|
||||
|
@ -504,7 +504,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
|
|||
obj = old_radeon_fb->obj;
|
||||
|
||||
/* take a reference to the old object */
|
||||
drm_gem_object_reference(obj);
|
||||
drm_gem_object_get(obj);
|
||||
work->old_rbo = gem_to_radeon_bo(obj);
|
||||
|
||||
new_radeon_fb = to_radeon_framebuffer(fb);
|
||||
|
@ -603,7 +603,7 @@ pflip_cleanup:
|
|||
radeon_bo_unreserve(new_rbo);
|
||||
|
||||
cleanup:
|
||||
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
|
||||
drm_gem_object_put_unlocked(&work->old_rbo->gem_base);
|
||||
dma_fence_put(work->fence);
|
||||
kfree(work);
|
||||
return r;
|
||||
|
@ -1288,7 +1288,7 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
|
|||
{
|
||||
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
|
||||
|
||||
drm_gem_object_unreference_unlocked(radeon_fb->obj);
|
||||
drm_gem_object_put_unlocked(radeon_fb->obj);
|
||||
drm_framebuffer_cleanup(fb);
|
||||
kfree(radeon_fb);
|
||||
}
|
||||
|
@ -1348,14 +1348,14 @@ radeon_user_framebuffer_create(struct drm_device *dev,
|
|||
|
||||
radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
|
||||
if (radeon_fb == NULL) {
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
|
||||
if (ret) {
|
||||
kfree(radeon_fb);
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
|
|
|
@ -118,7 +118,7 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
|
|||
radeon_bo_unpin(rbo);
|
||||
radeon_bo_unreserve(rbo);
|
||||
}
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
}
|
||||
|
||||
static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
|
||||
|
@ -299,7 +299,7 @@ out:
|
|||
|
||||
}
|
||||
if (fb && ret) {
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
drm_framebuffer_unregister_private(fb);
|
||||
drm_framebuffer_cleanup(fb);
|
||||
kfree(fb);
|
||||
|
|
|
@ -271,7 +271,7 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
|
|||
}
|
||||
r = drm_gem_handle_create(filp, gobj, &handle);
|
||||
/* drop reference from allocate - handle holds it now */
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
if (r) {
|
||||
up_read(&rdev->exclusive_lock);
|
||||
r = radeon_gem_handle_lockup(rdev, r);
|
||||
|
@ -352,7 +352,7 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
r = drm_gem_handle_create(filp, gobj, &handle);
|
||||
/* drop reference from allocate - handle holds it now */
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
if (r)
|
||||
goto handle_lockup;
|
||||
|
||||
|
@ -361,7 +361,7 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
|||
return 0;
|
||||
|
||||
release_object:
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
|
||||
handle_lockup:
|
||||
up_read(&rdev->exclusive_lock);
|
||||
|
@ -395,7 +395,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
|
||||
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
up_read(&rdev->exclusive_lock);
|
||||
r = radeon_gem_handle_lockup(robj->rdev, r);
|
||||
return r;
|
||||
|
@ -414,11 +414,11 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
|
|||
}
|
||||
robj = gem_to_radeon_bo(gobj);
|
||||
if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
return -EPERM;
|
||||
}
|
||||
*offset_p = radeon_bo_mmap_offset(robj);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -453,7 +453,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
|
||||
args->domain = radeon_mem_type_to_domain(cur_placement);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -485,7 +485,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
|
|||
if (rdev->asic->mmio_hdp_flush &&
|
||||
radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
|
||||
robj->rdev->asic->mmio_hdp_flush(rdev);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
r = radeon_gem_handle_lockup(rdev, r);
|
||||
return r;
|
||||
}
|
||||
|
@ -504,7 +504,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
|
|||
return -ENOENT;
|
||||
robj = gem_to_radeon_bo(gobj);
|
||||
r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -527,7 +527,7 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
|
|||
radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
|
||||
radeon_bo_unreserve(rbo);
|
||||
out:
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -661,14 +661,14 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
|
|||
r = radeon_bo_reserve(rbo, false);
|
||||
if (r) {
|
||||
args->operation = RADEON_VA_RESULT_ERROR;
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
|
||||
if (!bo_va) {
|
||||
args->operation = RADEON_VA_RESULT_ERROR;
|
||||
radeon_bo_unreserve(rbo);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
|
@ -695,7 +695,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
|
|||
args->operation = RADEON_VA_RESULT_ERROR;
|
||||
}
|
||||
out:
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -736,7 +736,7 @@ int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
radeon_bo_unreserve(robj);
|
||||
out:
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -762,7 +762,7 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
|
|||
|
||||
r = drm_gem_handle_create(file_priv, gobj, &handle);
|
||||
/* drop reference from allocate - handle holds it now */
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
drm_gem_object_put_unlocked(gobj);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -445,7 +445,7 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
|
|||
list_del_init(&bo->list);
|
||||
mutex_unlock(&bo->rdev->gem.mutex);
|
||||
/* this should unref the ttm bo */
|
||||
drm_gem_object_unreference_unlocked(&bo->gem_base);
|
||||
drm_gem_object_put_unlocked(&bo->gem_base);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -546,7 +546,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
|
|||
list_for_each_entry(lobj, head, tv.head) {
|
||||
struct radeon_bo *bo = lobj->robj;
|
||||
if (!bo->pin_count) {
|
||||
u32 domain = lobj->prefered_domains;
|
||||
u32 domain = lobj->preferred_domains;
|
||||
u32 allowed = lobj->allowed_domains;
|
||||
u32 current_domain =
|
||||
radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
|
||||
|
|
|
@ -907,17 +907,17 @@ int radeon_ttm_init(struct radeon_device *rdev)
|
|||
|
||||
r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
|
||||
RADEON_GEM_DOMAIN_VRAM, 0, NULL,
|
||||
NULL, &rdev->stollen_vga_memory);
|
||||
NULL, &rdev->stolen_vga_memory);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
|
||||
r = radeon_bo_reserve(rdev->stolen_vga_memory, false);
|
||||
if (r)
|
||||
return r;
|
||||
r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
|
||||
radeon_bo_unreserve(rdev->stollen_vga_memory);
|
||||
r = radeon_bo_pin(rdev->stolen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
|
||||
radeon_bo_unreserve(rdev->stolen_vga_memory);
|
||||
if (r) {
|
||||
radeon_bo_unref(&rdev->stollen_vga_memory);
|
||||
radeon_bo_unref(&rdev->stolen_vga_memory);
|
||||
return r;
|
||||
}
|
||||
DRM_INFO("radeon: %uM of VRAM memory ready\n",
|
||||
|
@ -946,13 +946,13 @@ void radeon_ttm_fini(struct radeon_device *rdev)
|
|||
if (!rdev->mman.initialized)
|
||||
return;
|
||||
radeon_ttm_debugfs_fini(rdev);
|
||||
if (rdev->stollen_vga_memory) {
|
||||
r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
|
||||
if (rdev->stolen_vga_memory) {
|
||||
r = radeon_bo_reserve(rdev->stolen_vga_memory, false);
|
||||
if (r == 0) {
|
||||
radeon_bo_unpin(rdev->stollen_vga_memory);
|
||||
radeon_bo_unreserve(rdev->stollen_vga_memory);
|
||||
radeon_bo_unpin(rdev->stolen_vga_memory);
|
||||
radeon_bo_unreserve(rdev->stolen_vga_memory);
|
||||
}
|
||||
radeon_bo_unref(&rdev->stollen_vga_memory);
|
||||
radeon_bo_unref(&rdev->stolen_vga_memory);
|
||||
}
|
||||
ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
|
||||
ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
|
||||
|
|
|
@ -139,7 +139,7 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
|
|||
|
||||
/* add the vm page table to the list */
|
||||
list[0].robj = vm->page_directory;
|
||||
list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
|
||||
list[0].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
|
||||
list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
|
||||
list[0].tv.bo = &vm->page_directory->tbo;
|
||||
list[0].tv.shared = true;
|
||||
|
@ -151,7 +151,7 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
|
|||
continue;
|
||||
|
||||
list[idx].robj = vm->page_tables[i].bo;
|
||||
list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
|
||||
list[idx].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
|
||||
list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
|
||||
list[idx].tv.bo = &list[idx].robj->tbo;
|
||||
list[idx].tv.shared = true;
|
||||
|
|
Loading…
Reference in New Issue