drm/amdgpu: Get DRM dev from adev by inline-f
Add a static inline adev_to_drm() to obtain the DRM device pointer from an amdgpu_device pointer. Signed-off-by: Luben Tuikov <luben.tuikov@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
1348969ab6
commit
4a580877bd
|
@ -991,6 +991,11 @@ static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
|
||||||
return ddev->dev_private;
|
return ddev->dev_private;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline struct drm_device *adev_to_drm(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
return adev->ddev;
|
||||||
|
}
|
||||||
|
|
||||||
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
|
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
|
||||||
{
|
{
|
||||||
return container_of(bdev, struct amdgpu_device, mman.bdev);
|
return container_of(bdev, struct amdgpu_device, mman.bdev);
|
||||||
|
|
|
@ -463,11 +463,11 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
|
||||||
|
|
||||||
if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
|
if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
|
||||||
if (adev->flags & AMD_IS_PX) {
|
if (adev->flags & AMD_IS_PX) {
|
||||||
pm_runtime_get_sync(adev->ddev->dev);
|
pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
/* Just fire off a uevent and let userspace tell us what to do */
|
/* Just fire off a uevent and let userspace tell us what to do */
|
||||||
drm_helper_hpd_irq_event(adev->ddev);
|
drm_helper_hpd_irq_event(adev_to_drm(adev));
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* TODO: check other events */
|
/* TODO: check other events */
|
||||||
|
@ -817,7 +817,7 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
|
||||||
struct drm_encoder *tmp;
|
struct drm_encoder *tmp;
|
||||||
|
|
||||||
/* Find the encoder controlling the brightness */
|
/* Find the encoder controlling the brightness */
|
||||||
list_for_each_entry(tmp, &adev->ddev->mode_config.encoder_list,
|
list_for_each_entry(tmp, &adev_to_drm(adev)->mode_config.encoder_list,
|
||||||
head) {
|
head) {
|
||||||
struct amdgpu_encoder *enc = to_amdgpu_encoder(tmp);
|
struct amdgpu_encoder *enc = to_amdgpu_encoder(tmp);
|
||||||
|
|
||||||
|
|
|
@ -119,7 +119,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
|
||||||
.gpuvm_size = min(adev->vm_manager.max_pfn
|
.gpuvm_size = min(adev->vm_manager.max_pfn
|
||||||
<< AMDGPU_GPU_PAGE_SHIFT,
|
<< AMDGPU_GPU_PAGE_SHIFT,
|
||||||
AMDGPU_GMC_HOLE_START),
|
AMDGPU_GMC_HOLE_START),
|
||||||
.drm_render_minor = adev->ddev->render->index,
|
.drm_render_minor = adev_to_drm(adev)->render->index,
|
||||||
.sdma_doorbell_idx = adev->doorbell_index.sdma_engine,
|
.sdma_doorbell_idx = adev->doorbell_index.sdma_engine,
|
||||||
|
|
||||||
};
|
};
|
||||||
|
@ -160,7 +160,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
|
||||||
adev->doorbell_index.last_non_cp;
|
adev->doorbell_index.last_non_cp;
|
||||||
}
|
}
|
||||||
|
|
||||||
kgd2kfd_device_init(adev->kfd.dev, adev->ddev, &gpu_resources);
|
kgd2kfd_device_init(adev->kfd.dev, adev_to_drm(adev), &gpu_resources);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -479,7 +479,7 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
|
||||||
goto out_put;
|
goto out_put;
|
||||||
|
|
||||||
obj = dma_buf->priv;
|
obj = dma_buf->priv;
|
||||||
if (obj->dev->driver != adev->ddev->driver)
|
if (obj->dev->driver != adev_to_drm(adev)->driver)
|
||||||
/* Can't handle buffers from different drivers */
|
/* Can't handle buffers from different drivers */
|
||||||
goto out_put;
|
goto out_put;
|
||||||
|
|
||||||
|
|
|
@ -148,7 +148,7 @@ void amdgpu_atombios_i2c_init(struct amdgpu_device *adev)
|
||||||
|
|
||||||
if (i2c.valid) {
|
if (i2c.valid) {
|
||||||
sprintf(stmp, "0x%x", i2c.i2c_id);
|
sprintf(stmp, "0x%x", i2c.i2c_id);
|
||||||
adev->i2c_bus[i] = amdgpu_i2c_create(adev->ddev, &i2c, stmp);
|
adev->i2c_bus[i] = amdgpu_i2c_create(adev_to_drm(adev), &i2c, stmp);
|
||||||
}
|
}
|
||||||
gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
|
gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
|
||||||
((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
|
((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
|
||||||
|
@ -541,7 +541,7 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
amdgpu_link_encoder_connector(adev->ddev);
|
amdgpu_link_encoder_connector(adev_to_drm(adev));
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -1995,7 +1995,7 @@ int amdgpu_atombios_init(struct amdgpu_device *adev)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
adev->mode_info.atom_card_info = atom_card_info;
|
adev->mode_info.atom_card_info = atom_card_info;
|
||||||
atom_card_info->dev = adev->ddev;
|
atom_card_info->dev = adev_to_drm(adev);
|
||||||
atom_card_info->reg_read = cail_reg_read;
|
atom_card_info->reg_read = cail_reg_read;
|
||||||
atom_card_info->reg_write = cail_reg_write;
|
atom_card_info->reg_write = cail_reg_write;
|
||||||
/* needed for iio ops */
|
/* needed for iio ops */
|
||||||
|
|
|
@ -1521,7 +1521,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
|
||||||
struct amdgpu_hpd *hpd,
|
struct amdgpu_hpd *hpd,
|
||||||
struct amdgpu_router *router)
|
struct amdgpu_router *router)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev_to_drm(adev);
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
struct drm_connector_list_iter iter;
|
||||||
struct amdgpu_connector *amdgpu_connector;
|
struct amdgpu_connector *amdgpu_connector;
|
||||||
|
|
|
@ -69,8 +69,8 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
|
||||||
adev->debugfs_count = i;
|
adev->debugfs_count = i;
|
||||||
#if defined(CONFIG_DEBUG_FS)
|
#if defined(CONFIG_DEBUG_FS)
|
||||||
drm_debugfs_create_files(files, nfiles,
|
drm_debugfs_create_files(files, nfiles,
|
||||||
adev->ddev->primary->debugfs_root,
|
adev_to_drm(adev)->primary->debugfs_root,
|
||||||
adev->ddev->primary);
|
adev_to_drm(adev)->primary);
|
||||||
#endif
|
#endif
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -151,7 +151,7 @@ static void amdgpu_debugfs_autodump_init(struct amdgpu_device *adev)
|
||||||
init_waitqueue_head(&adev->autodump.gpu_hang);
|
init_waitqueue_head(&adev->autodump.gpu_hang);
|
||||||
|
|
||||||
debugfs_create_file("amdgpu_autodump", 0600,
|
debugfs_create_file("amdgpu_autodump", 0600,
|
||||||
adev->ddev->primary->debugfs_root,
|
adev_to_drm(adev)->primary->debugfs_root,
|
||||||
adev, &autodump_debug_fops);
|
adev, &autodump_debug_fops);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -227,23 +227,23 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
|
||||||
|
|
||||||
*pos &= (1UL << 22) - 1;
|
*pos &= (1UL << 22) - 1;
|
||||||
|
|
||||||
r = pm_runtime_get_sync(adev->ddev->dev);
|
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_virt_enable_access_debugfs(adev);
|
r = amdgpu_virt_enable_access_debugfs(adev);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (use_bank) {
|
if (use_bank) {
|
||||||
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
|
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
|
||||||
(se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) {
|
(se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) {
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
amdgpu_virt_disable_access_debugfs(adev);
|
amdgpu_virt_disable_access_debugfs(adev);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
@ -292,8 +292,8 @@ end:
|
||||||
if (pm_pg_lock)
|
if (pm_pg_lock)
|
||||||
mutex_unlock(&adev->pm.mutex);
|
mutex_unlock(&adev->pm.mutex);
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
|
|
||||||
amdgpu_virt_disable_access_debugfs(adev);
|
amdgpu_virt_disable_access_debugfs(adev);
|
||||||
return result;
|
return result;
|
||||||
|
@ -340,15 +340,15 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
|
||||||
if (size & 0x3 || *pos & 0x3)
|
if (size & 0x3 || *pos & 0x3)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
r = pm_runtime_get_sync(adev->ddev->dev);
|
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_virt_enable_access_debugfs(adev);
|
r = amdgpu_virt_enable_access_debugfs(adev);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -358,8 +358,8 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
|
||||||
value = RREG32_PCIE(*pos >> 2);
|
value = RREG32_PCIE(*pos >> 2);
|
||||||
r = put_user(value, (uint32_t *)buf);
|
r = put_user(value, (uint32_t *)buf);
|
||||||
if (r) {
|
if (r) {
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
amdgpu_virt_disable_access_debugfs(adev);
|
amdgpu_virt_disable_access_debugfs(adev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -370,8 +370,8 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
|
||||||
size -= 4;
|
size -= 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
|
|
||||||
amdgpu_virt_disable_access_debugfs(adev);
|
amdgpu_virt_disable_access_debugfs(adev);
|
||||||
return result;
|
return result;
|
||||||
|
@ -399,15 +399,15 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
|
||||||
if (size & 0x3 || *pos & 0x3)
|
if (size & 0x3 || *pos & 0x3)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
r = pm_runtime_get_sync(adev->ddev->dev);
|
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_virt_enable_access_debugfs(adev);
|
r = amdgpu_virt_enable_access_debugfs(adev);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -416,8 +416,8 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
|
||||||
|
|
||||||
r = get_user(value, (uint32_t *)buf);
|
r = get_user(value, (uint32_t *)buf);
|
||||||
if (r) {
|
if (r) {
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
amdgpu_virt_disable_access_debugfs(adev);
|
amdgpu_virt_disable_access_debugfs(adev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -430,8 +430,8 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
|
||||||
size -= 4;
|
size -= 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
|
|
||||||
amdgpu_virt_disable_access_debugfs(adev);
|
amdgpu_virt_disable_access_debugfs(adev);
|
||||||
return result;
|
return result;
|
||||||
|
@ -459,15 +459,15 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
|
||||||
if (size & 0x3 || *pos & 0x3)
|
if (size & 0x3 || *pos & 0x3)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
r = pm_runtime_get_sync(adev->ddev->dev);
|
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_virt_enable_access_debugfs(adev);
|
r = amdgpu_virt_enable_access_debugfs(adev);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -477,8 +477,8 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
|
||||||
value = RREG32_DIDT(*pos >> 2);
|
value = RREG32_DIDT(*pos >> 2);
|
||||||
r = put_user(value, (uint32_t *)buf);
|
r = put_user(value, (uint32_t *)buf);
|
||||||
if (r) {
|
if (r) {
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
amdgpu_virt_disable_access_debugfs(adev);
|
amdgpu_virt_disable_access_debugfs(adev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -489,8 +489,8 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
|
||||||
size -= 4;
|
size -= 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
|
|
||||||
amdgpu_virt_disable_access_debugfs(adev);
|
amdgpu_virt_disable_access_debugfs(adev);
|
||||||
return result;
|
return result;
|
||||||
|
@ -518,15 +518,15 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
|
||||||
if (size & 0x3 || *pos & 0x3)
|
if (size & 0x3 || *pos & 0x3)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
r = pm_runtime_get_sync(adev->ddev->dev);
|
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_virt_enable_access_debugfs(adev);
|
r = amdgpu_virt_enable_access_debugfs(adev);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -535,8 +535,8 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
|
||||||
|
|
||||||
r = get_user(value, (uint32_t *)buf);
|
r = get_user(value, (uint32_t *)buf);
|
||||||
if (r) {
|
if (r) {
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
amdgpu_virt_disable_access_debugfs(adev);
|
amdgpu_virt_disable_access_debugfs(adev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -549,8 +549,8 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
|
||||||
size -= 4;
|
size -= 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
|
|
||||||
amdgpu_virt_disable_access_debugfs(adev);
|
amdgpu_virt_disable_access_debugfs(adev);
|
||||||
return result;
|
return result;
|
||||||
|
@ -578,15 +578,15 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
|
||||||
if (size & 0x3 || *pos & 0x3)
|
if (size & 0x3 || *pos & 0x3)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
r = pm_runtime_get_sync(adev->ddev->dev);
|
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_virt_enable_access_debugfs(adev);
|
r = amdgpu_virt_enable_access_debugfs(adev);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -596,8 +596,8 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
|
||||||
value = RREG32_SMC(*pos);
|
value = RREG32_SMC(*pos);
|
||||||
r = put_user(value, (uint32_t *)buf);
|
r = put_user(value, (uint32_t *)buf);
|
||||||
if (r) {
|
if (r) {
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
amdgpu_virt_disable_access_debugfs(adev);
|
amdgpu_virt_disable_access_debugfs(adev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -608,8 +608,8 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
|
||||||
size -= 4;
|
size -= 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
|
|
||||||
amdgpu_virt_disable_access_debugfs(adev);
|
amdgpu_virt_disable_access_debugfs(adev);
|
||||||
return result;
|
return result;
|
||||||
|
@ -637,15 +637,15 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
|
||||||
if (size & 0x3 || *pos & 0x3)
|
if (size & 0x3 || *pos & 0x3)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
r = pm_runtime_get_sync(adev->ddev->dev);
|
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_virt_enable_access_debugfs(adev);
|
r = amdgpu_virt_enable_access_debugfs(adev);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -654,8 +654,8 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
|
||||||
|
|
||||||
r = get_user(value, (uint32_t *)buf);
|
r = get_user(value, (uint32_t *)buf);
|
||||||
if (r) {
|
if (r) {
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
amdgpu_virt_disable_access_debugfs(adev);
|
amdgpu_virt_disable_access_debugfs(adev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -668,8 +668,8 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
|
||||||
size -= 4;
|
size -= 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
|
|
||||||
amdgpu_virt_disable_access_debugfs(adev);
|
amdgpu_virt_disable_access_debugfs(adev);
|
||||||
return result;
|
return result;
|
||||||
|
@ -796,22 +796,22 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
|
||||||
|
|
||||||
valuesize = sizeof(values);
|
valuesize = sizeof(values);
|
||||||
|
|
||||||
r = pm_runtime_get_sync(adev->ddev->dev);
|
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_virt_enable_access_debugfs(adev);
|
r = amdgpu_virt_enable_access_debugfs(adev);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
|
r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
|
|
||||||
if (r) {
|
if (r) {
|
||||||
amdgpu_virt_disable_access_debugfs(adev);
|
amdgpu_virt_disable_access_debugfs(adev);
|
||||||
|
@ -878,15 +878,15 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
|
||||||
wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
|
wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
|
||||||
simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
|
simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
|
||||||
|
|
||||||
r = pm_runtime_get_sync(adev->ddev->dev);
|
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_virt_enable_access_debugfs(adev);
|
r = amdgpu_virt_enable_access_debugfs(adev);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -901,8 +901,8 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
|
||||||
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
|
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
|
||||||
mutex_unlock(&adev->grbm_idx_mutex);
|
mutex_unlock(&adev->grbm_idx_mutex);
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
|
|
||||||
if (!x) {
|
if (!x) {
|
||||||
amdgpu_virt_disable_access_debugfs(adev);
|
amdgpu_virt_disable_access_debugfs(adev);
|
||||||
|
@ -976,7 +976,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
|
||||||
if (!data)
|
if (!data)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
r = pm_runtime_get_sync(adev->ddev->dev);
|
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
@ -999,8 +999,8 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
|
||||||
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
|
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
|
||||||
mutex_unlock(&adev->grbm_idx_mutex);
|
mutex_unlock(&adev->grbm_idx_mutex);
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
|
|
||||||
while (size) {
|
while (size) {
|
||||||
uint32_t value;
|
uint32_t value;
|
||||||
|
@ -1022,7 +1022,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
|
||||||
return result;
|
return result;
|
||||||
|
|
||||||
err:
|
err:
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
kfree(data);
|
kfree(data);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -1047,9 +1047,9 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu
|
||||||
if (size & 0x3 || *pos & 0x3)
|
if (size & 0x3 || *pos & 0x3)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
r = pm_runtime_get_sync(adev->ddev->dev);
|
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1058,8 +1058,8 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu
|
||||||
|
|
||||||
r = get_user(value, (uint32_t *)buf);
|
r = get_user(value, (uint32_t *)buf);
|
||||||
if (r) {
|
if (r) {
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1071,8 +1071,8 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu
|
||||||
size -= 4;
|
size -= 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
@ -1096,7 +1096,7 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
|
||||||
if (size & 0x3 || *pos & 0x3)
|
if (size & 0x3 || *pos & 0x3)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
r = pm_runtime_get_sync(adev->ddev->dev);
|
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -1105,15 +1105,15 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
|
||||||
|
|
||||||
r = amdgpu_get_gfx_off_status(adev, &value);
|
r = amdgpu_get_gfx_off_status(adev, &value);
|
||||||
if (r) {
|
if (r) {
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = put_user(value, (uint32_t *)buf);
|
r = put_user(value, (uint32_t *)buf);
|
||||||
if (r) {
|
if (r) {
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1123,8 +1123,8 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
|
||||||
size -= 4;
|
size -= 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
@ -1216,7 +1216,7 @@ static const char *debugfs_regs_names[] = {
|
||||||
*/
|
*/
|
||||||
int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
|
int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct drm_minor *minor = adev->ddev->primary;
|
struct drm_minor *minor = adev_to_drm(adev)->primary;
|
||||||
struct dentry *ent, *root = minor->debugfs_root;
|
struct dentry *ent, *root = minor->debugfs_root;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
|
@ -1241,7 +1241,7 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
|
||||||
|
|
||||||
r = pm_runtime_get_sync(dev->dev);
|
r = pm_runtime_get_sync(dev->dev);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1302,7 +1302,7 @@ static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data)
|
||||||
|
|
||||||
r = pm_runtime_get_sync(dev->dev);
|
r = pm_runtime_get_sync(dev->dev);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1323,7 +1323,7 @@ static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data)
|
||||||
|
|
||||||
r = pm_runtime_get_sync(dev->dev);
|
r = pm_runtime_get_sync(dev->dev);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1527,9 +1527,9 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val)
|
||||||
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
|
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
ret = pm_runtime_get_sync(adev->ddev->dev);
|
ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1542,8 +1542,8 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -1563,7 +1563,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
|
||||||
|
|
||||||
adev->debugfs_preempt =
|
adev->debugfs_preempt =
|
||||||
debugfs_create_file("amdgpu_preempt_ib", 0600,
|
debugfs_create_file("amdgpu_preempt_ib", 0600,
|
||||||
adev->ddev->primary->debugfs_root, adev,
|
adev_to_drm(adev)->primary->debugfs_root, adev,
|
||||||
&fops_ib_preempt);
|
&fops_ib_preempt);
|
||||||
if (!(adev->debugfs_preempt)) {
|
if (!(adev->debugfs_preempt)) {
|
||||||
DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
|
DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
|
||||||
|
@ -1572,7 +1572,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
|
||||||
|
|
||||||
adev->smu.debugfs_sclk =
|
adev->smu.debugfs_sclk =
|
||||||
debugfs_create_file("amdgpu_force_sclk", 0200,
|
debugfs_create_file("amdgpu_force_sclk", 0200,
|
||||||
adev->ddev->primary->debugfs_root, adev,
|
adev_to_drm(adev)->primary->debugfs_root, adev,
|
||||||
&fops_sclk_set);
|
&fops_sclk_set);
|
||||||
if (!(adev->smu.debugfs_sclk)) {
|
if (!(adev->smu.debugfs_sclk)) {
|
||||||
DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
|
DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
|
||||||
|
|
|
@ -1509,7 +1509,7 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
|
||||||
adev->enable_virtual_display = false;
|
adev->enable_virtual_display = false;
|
||||||
|
|
||||||
if (amdgpu_virtual_display) {
|
if (amdgpu_virtual_display) {
|
||||||
struct drm_device *ddev = adev->ddev;
|
struct drm_device *ddev = adev_to_drm(adev);
|
||||||
const char *pci_address_name = pci_name(ddev->pdev);
|
const char *pci_address_name = pci_name(ddev->pdev);
|
||||||
char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
|
char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
|
||||||
|
|
||||||
|
@ -2872,13 +2872,13 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
|
||||||
if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
|
if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
|
||||||
|
|
||||||
task_barrier_enter(&hive->tb);
|
task_barrier_enter(&hive->tb);
|
||||||
adev->asic_reset_res = amdgpu_device_baco_enter(adev->ddev);
|
adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
|
||||||
|
|
||||||
if (adev->asic_reset_res)
|
if (adev->asic_reset_res)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
task_barrier_exit(&hive->tb);
|
task_barrier_exit(&hive->tb);
|
||||||
adev->asic_reset_res = amdgpu_device_baco_exit(adev->ddev);
|
adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
|
||||||
|
|
||||||
if (adev->asic_reset_res)
|
if (adev->asic_reset_res)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
@ -2894,7 +2894,7 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
|
||||||
fail:
|
fail:
|
||||||
if (adev->asic_reset_res)
|
if (adev->asic_reset_res)
|
||||||
DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
|
DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
|
||||||
adev->asic_reset_res, adev->ddev->unique);
|
adev->asic_reset_res, adev_to_drm(adev)->unique);
|
||||||
amdgpu_put_xgmi_hive(hive);
|
amdgpu_put_xgmi_hive(hive);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3243,7 +3243,7 @@ fence_driver_init:
|
||||||
}
|
}
|
||||||
|
|
||||||
/* init the mode config */
|
/* init the mode config */
|
||||||
drm_mode_config_init(adev->ddev);
|
drm_mode_config_init(adev_to_drm(adev));
|
||||||
|
|
||||||
r = amdgpu_device_ip_init(adev);
|
r = amdgpu_device_ip_init(adev);
|
||||||
if (r) {
|
if (r) {
|
||||||
|
@ -3385,9 +3385,9 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
|
||||||
amdgpu_irq_disable_all(adev);
|
amdgpu_irq_disable_all(adev);
|
||||||
if (adev->mode_info.mode_config_initialized){
|
if (adev->mode_info.mode_config_initialized){
|
||||||
if (!amdgpu_device_has_dc_support(adev))
|
if (!amdgpu_device_has_dc_support(adev))
|
||||||
drm_helper_force_disable_all(adev->ddev);
|
drm_helper_force_disable_all(adev_to_drm(adev));
|
||||||
else
|
else
|
||||||
drm_atomic_helper_shutdown(adev->ddev);
|
drm_atomic_helper_shutdown(adev_to_drm(adev));
|
||||||
}
|
}
|
||||||
amdgpu_fence_driver_fini(adev);
|
amdgpu_fence_driver_fini(adev);
|
||||||
if (adev->pm_sysfs_en)
|
if (adev->pm_sysfs_en)
|
||||||
|
@ -3411,7 +3411,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
|
||||||
amdgpu_has_atpx_dgpu_power_cntl()) &&
|
amdgpu_has_atpx_dgpu_power_cntl()) &&
|
||||||
!pci_is_thunderbolt_attached(adev->pdev))
|
!pci_is_thunderbolt_attached(adev->pdev))
|
||||||
vga_switcheroo_unregister_client(adev->pdev);
|
vga_switcheroo_unregister_client(adev->pdev);
|
||||||
if (amdgpu_device_supports_boco(adev->ddev))
|
if (amdgpu_device_supports_boco(adev_to_drm(adev)))
|
||||||
vga_switcheroo_fini_domain_pm_ops(adev->dev);
|
vga_switcheroo_fini_domain_pm_ops(adev->dev);
|
||||||
vga_client_register(adev->pdev, NULL, NULL, NULL);
|
vga_client_register(adev->pdev, NULL, NULL, NULL);
|
||||||
if (adev->rio_mem)
|
if (adev->rio_mem)
|
||||||
|
@ -4079,7 +4079,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
|
||||||
|
|
||||||
if (r) {
|
if (r) {
|
||||||
dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
|
dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
|
||||||
r, tmp_adev->ddev->unique);
|
r, adev_to_drm(tmp_adev)->unique);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4452,7 +4452,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
|
||||||
/*TODO Should we stop ?*/
|
/*TODO Should we stop ?*/
|
||||||
if (r) {
|
if (r) {
|
||||||
dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
|
dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
|
||||||
r, tmp_adev->ddev->unique);
|
r, adev_to_drm(tmp_adev)->unique);
|
||||||
tmp_adev->asic_reset_res = r;
|
tmp_adev->asic_reset_res = r;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4488,7 +4488,7 @@ skip_hw_reset:
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
|
if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
|
||||||
drm_helper_resume_force_mode(tmp_adev->ddev);
|
drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
|
||||||
}
|
}
|
||||||
|
|
||||||
tmp_adev->asic_reset_res = 0;
|
tmp_adev->asic_reset_res = 0;
|
||||||
|
@ -4665,7 +4665,7 @@ int amdgpu_device_baco_enter(struct drm_device *dev)
|
||||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||||
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
|
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
|
||||||
|
|
||||||
if (!amdgpu_device_supports_baco(adev->ddev))
|
if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
|
||||||
return -ENOTSUPP;
|
return -ENOTSUPP;
|
||||||
|
|
||||||
if (ras && ras->supported)
|
if (ras && ras->supported)
|
||||||
|
@ -4680,7 +4680,7 @@ int amdgpu_device_baco_exit(struct drm_device *dev)
|
||||||
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
|
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (!amdgpu_device_supports_baco(adev->ddev))
|
if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
|
||||||
return -ENOTSUPP;
|
return -ENOTSUPP;
|
||||||
|
|
||||||
ret = amdgpu_dpm_baco_exit(adev);
|
ret = amdgpu_dpm_baco_exit(adev);
|
||||||
|
|
|
@ -93,7 +93,7 @@ static void amdgpu_display_flip_work_func(struct work_struct *__work)
|
||||||
* targeted by the flip
|
* targeted by the flip
|
||||||
*/
|
*/
|
||||||
if (amdgpu_crtc->enabled &&
|
if (amdgpu_crtc->enabled &&
|
||||||
(amdgpu_display_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
|
(amdgpu_display_get_crtc_scanoutpos(adev_to_drm(adev), work->crtc_id, 0,
|
||||||
&vpos, &hpos, NULL, NULL,
|
&vpos, &hpos, NULL, NULL,
|
||||||
&crtc->hwmode)
|
&crtc->hwmode)
|
||||||
& (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
|
& (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
|
||||||
|
@ -619,51 +619,51 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
|
||||||
int sz;
|
int sz;
|
||||||
|
|
||||||
adev->mode_info.coherent_mode_property =
|
adev->mode_info.coherent_mode_property =
|
||||||
drm_property_create_range(adev->ddev, 0 , "coherent", 0, 1);
|
drm_property_create_range(adev_to_drm(adev), 0, "coherent", 0, 1);
|
||||||
if (!adev->mode_info.coherent_mode_property)
|
if (!adev->mode_info.coherent_mode_property)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
adev->mode_info.load_detect_property =
|
adev->mode_info.load_detect_property =
|
||||||
drm_property_create_range(adev->ddev, 0, "load detection", 0, 1);
|
drm_property_create_range(adev_to_drm(adev), 0, "load detection", 0, 1);
|
||||||
if (!adev->mode_info.load_detect_property)
|
if (!adev->mode_info.load_detect_property)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
drm_mode_create_scaling_mode_property(adev->ddev);
|
drm_mode_create_scaling_mode_property(adev_to_drm(adev));
|
||||||
|
|
||||||
sz = ARRAY_SIZE(amdgpu_underscan_enum_list);
|
sz = ARRAY_SIZE(amdgpu_underscan_enum_list);
|
||||||
adev->mode_info.underscan_property =
|
adev->mode_info.underscan_property =
|
||||||
drm_property_create_enum(adev->ddev, 0,
|
drm_property_create_enum(adev_to_drm(adev), 0,
|
||||||
"underscan",
|
"underscan",
|
||||||
amdgpu_underscan_enum_list, sz);
|
amdgpu_underscan_enum_list, sz);
|
||||||
|
|
||||||
adev->mode_info.underscan_hborder_property =
|
adev->mode_info.underscan_hborder_property =
|
||||||
drm_property_create_range(adev->ddev, 0,
|
drm_property_create_range(adev_to_drm(adev), 0,
|
||||||
"underscan hborder", 0, 128);
|
"underscan hborder", 0, 128);
|
||||||
if (!adev->mode_info.underscan_hborder_property)
|
if (!adev->mode_info.underscan_hborder_property)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
adev->mode_info.underscan_vborder_property =
|
adev->mode_info.underscan_vborder_property =
|
||||||
drm_property_create_range(adev->ddev, 0,
|
drm_property_create_range(adev_to_drm(adev), 0,
|
||||||
"underscan vborder", 0, 128);
|
"underscan vborder", 0, 128);
|
||||||
if (!adev->mode_info.underscan_vborder_property)
|
if (!adev->mode_info.underscan_vborder_property)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
sz = ARRAY_SIZE(amdgpu_audio_enum_list);
|
sz = ARRAY_SIZE(amdgpu_audio_enum_list);
|
||||||
adev->mode_info.audio_property =
|
adev->mode_info.audio_property =
|
||||||
drm_property_create_enum(adev->ddev, 0,
|
drm_property_create_enum(adev_to_drm(adev), 0,
|
||||||
"audio",
|
"audio",
|
||||||
amdgpu_audio_enum_list, sz);
|
amdgpu_audio_enum_list, sz);
|
||||||
|
|
||||||
sz = ARRAY_SIZE(amdgpu_dither_enum_list);
|
sz = ARRAY_SIZE(amdgpu_dither_enum_list);
|
||||||
adev->mode_info.dither_property =
|
adev->mode_info.dither_property =
|
||||||
drm_property_create_enum(adev->ddev, 0,
|
drm_property_create_enum(adev_to_drm(adev), 0,
|
||||||
"dither",
|
"dither",
|
||||||
amdgpu_dither_enum_list, sz);
|
amdgpu_dither_enum_list, sz);
|
||||||
|
|
||||||
if (amdgpu_device_has_dc_support(adev)) {
|
if (amdgpu_device_has_dc_support(adev)) {
|
||||||
adev->mode_info.abm_level_property =
|
adev->mode_info.abm_level_property =
|
||||||
drm_property_create_range(adev->ddev, 0,
|
drm_property_create_range(adev_to_drm(adev), 0,
|
||||||
"abm level", 0, 4);
|
"abm level", 0, 4);
|
||||||
if (!adev->mode_info.abm_level_property)
|
if (!adev->mode_info.abm_level_property)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
|
@ -135,7 +135,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
|
||||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
|
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
|
||||||
AMDGPU_GEM_CREATE_VRAM_CLEARED;
|
AMDGPU_GEM_CREATE_VRAM_CLEARED;
|
||||||
|
|
||||||
info = drm_get_format_info(adev->ddev, mode_cmd);
|
info = drm_get_format_info(adev_to_drm(adev), mode_cmd);
|
||||||
cpp = info->cpp[0];
|
cpp = info->cpp[0];
|
||||||
|
|
||||||
/* need to align pitch with crtc limits */
|
/* need to align pitch with crtc limits */
|
||||||
|
@ -231,7 +231,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = amdgpu_display_framebuffer_init(adev->ddev, &rfbdev->rfb,
|
ret = amdgpu_display_framebuffer_init(adev_to_drm(adev), &rfbdev->rfb,
|
||||||
&mode_cmd, gobj);
|
&mode_cmd, gobj);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRM_ERROR("failed to initialize framebuffer %d\n", ret);
|
DRM_ERROR("failed to initialize framebuffer %d\n", ret);
|
||||||
|
@ -254,7 +254,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
|
||||||
drm_fb_helper_fill_info(info, &rfbdev->helper, sizes);
|
drm_fb_helper_fill_info(info, &rfbdev->helper, sizes);
|
||||||
|
|
||||||
/* setup aperture base/size for vesafb takeover */
|
/* setup aperture base/size for vesafb takeover */
|
||||||
info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base;
|
info->apertures->ranges[0].base = adev_to_drm(adev)->mode_config.fb_base;
|
||||||
info->apertures->ranges[0].size = adev->gmc.aper_size;
|
info->apertures->ranges[0].size = adev->gmc.aper_size;
|
||||||
|
|
||||||
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
|
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
|
||||||
|
@ -270,7 +270,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
|
||||||
DRM_INFO("fb depth is %d\n", fb->format->depth);
|
DRM_INFO("fb depth is %d\n", fb->format->depth);
|
||||||
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
|
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
|
||||||
|
|
||||||
vga_switcheroo_client_fb_set(adev->ddev->pdev, info);
|
vga_switcheroo_client_fb_set(adev_to_drm(adev)->pdev, info);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
@ -318,7 +318,7 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* don't init fbdev if there are no connectors */
|
/* don't init fbdev if there are no connectors */
|
||||||
if (list_empty(&adev->ddev->mode_config.connector_list))
|
if (list_empty(&adev_to_drm(adev)->mode_config.connector_list))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* select 8 bpp console on low vram cards */
|
/* select 8 bpp console on low vram cards */
|
||||||
|
@ -332,10 +332,10 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
|
||||||
rfbdev->adev = adev;
|
rfbdev->adev = adev;
|
||||||
adev->mode_info.rfbdev = rfbdev;
|
adev->mode_info.rfbdev = rfbdev;
|
||||||
|
|
||||||
drm_fb_helper_prepare(adev->ddev, &rfbdev->helper,
|
drm_fb_helper_prepare(adev_to_drm(adev), &rfbdev->helper,
|
||||||
&amdgpu_fb_helper_funcs);
|
&amdgpu_fb_helper_funcs);
|
||||||
|
|
||||||
ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper);
|
ret = drm_fb_helper_init(adev_to_drm(adev), &rfbdev->helper);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
kfree(rfbdev);
|
kfree(rfbdev);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -343,7 +343,7 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
|
||||||
|
|
||||||
/* disable all the possible outputs/crtcs before entering KMS mode */
|
/* disable all the possible outputs/crtcs before entering KMS mode */
|
||||||
if (!amdgpu_device_has_dc_support(adev))
|
if (!amdgpu_device_has_dc_support(adev))
|
||||||
drm_helper_disable_unused_functions(adev->ddev);
|
drm_helper_disable_unused_functions(adev_to_drm(adev));
|
||||||
|
|
||||||
drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
|
drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -354,7 +354,7 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev)
|
||||||
if (!adev->mode_info.rfbdev)
|
if (!adev->mode_info.rfbdev)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
amdgpu_fbdev_destroy(adev->ddev, adev->mode_info.rfbdev);
|
amdgpu_fbdev_destroy(adev_to_drm(adev), adev->mode_info.rfbdev);
|
||||||
kfree(adev->mode_info.rfbdev);
|
kfree(adev->mode_info.rfbdev);
|
||||||
adev->mode_info.rfbdev = NULL;
|
adev->mode_info.rfbdev = NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -155,7 +155,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
|
||||||
seq);
|
seq);
|
||||||
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
|
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
|
||||||
seq, flags | AMDGPU_FENCE_FLAG_INT);
|
seq, flags | AMDGPU_FENCE_FLAG_INT);
|
||||||
pm_runtime_get_noresume(adev->ddev->dev);
|
pm_runtime_get_noresume(adev_to_drm(adev)->dev);
|
||||||
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
|
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
|
||||||
if (unlikely(rcu_dereference_protected(*ptr, 1))) {
|
if (unlikely(rcu_dereference_protected(*ptr, 1))) {
|
||||||
struct dma_fence *old;
|
struct dma_fence *old;
|
||||||
|
@ -284,8 +284,8 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
|
||||||
BUG();
|
BUG();
|
||||||
|
|
||||||
dma_fence_put(fence);
|
dma_fence_put(fence);
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
} while (last_seq != seq);
|
} while (last_seq != seq);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
|
|
@ -93,7 +93,7 @@ retry:
|
||||||
|
|
||||||
void amdgpu_gem_force_release(struct amdgpu_device *adev)
|
void amdgpu_gem_force_release(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct drm_device *ddev = adev->ddev;
|
struct drm_device *ddev = adev_to_drm(adev);
|
||||||
struct drm_file *file;
|
struct drm_file *file;
|
||||||
|
|
||||||
mutex_lock(&ddev->filelist_mutex);
|
mutex_lock(&ddev->filelist_mutex);
|
||||||
|
|
|
@ -253,7 +253,7 @@ void amdgpu_i2c_add(struct amdgpu_device *adev,
|
||||||
const struct amdgpu_i2c_bus_rec *rec,
|
const struct amdgpu_i2c_bus_rec *rec,
|
||||||
const char *name)
|
const char *name)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev_to_drm(adev);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) {
|
for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) {
|
||||||
|
|
|
@ -85,7 +85,7 @@ static void amdgpu_hotplug_work_func(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
|
struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
|
||||||
hotplug_work);
|
hotplug_work);
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev_to_drm(adev);
|
||||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
struct drm_connector_list_iter iter;
|
||||||
|
@ -268,9 +268,9 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
|
||||||
if (!adev->enable_virtual_display)
|
if (!adev->enable_virtual_display)
|
||||||
/* Disable vblank IRQs aggressively for power-saving */
|
/* Disable vblank IRQs aggressively for power-saving */
|
||||||
/* XXX: can this be enabled for DC? */
|
/* XXX: can this be enabled for DC? */
|
||||||
adev->ddev->vblank_disable_immediate = true;
|
adev_to_drm(adev)->vblank_disable_immediate = true;
|
||||||
|
|
||||||
r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
|
r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -284,14 +284,14 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
|
||||||
|
|
||||||
adev->irq.installed = true;
|
adev->irq.installed = true;
|
||||||
/* Use vector 0 for MSI-X */
|
/* Use vector 0 for MSI-X */
|
||||||
r = drm_irq_install(adev->ddev, pci_irq_vector(adev->pdev, 0));
|
r = drm_irq_install(adev_to_drm(adev), pci_irq_vector(adev->pdev, 0));
|
||||||
if (r) {
|
if (r) {
|
||||||
adev->irq.installed = false;
|
adev->irq.installed = false;
|
||||||
if (!amdgpu_device_has_dc_support(adev))
|
if (!amdgpu_device_has_dc_support(adev))
|
||||||
flush_work(&adev->hotplug_work);
|
flush_work(&adev->hotplug_work);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
adev->ddev->max_vblank_count = 0x00ffffff;
|
adev_to_drm(adev)->max_vblank_count = 0x00ffffff;
|
||||||
|
|
||||||
DRM_DEBUG("amdgpu: irq initialized.\n");
|
DRM_DEBUG("amdgpu: irq initialized.\n");
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -311,7 +311,7 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
|
||||||
unsigned i, j;
|
unsigned i, j;
|
||||||
|
|
||||||
if (adev->irq.installed) {
|
if (adev->irq.installed) {
|
||||||
drm_irq_uninstall(adev->ddev);
|
drm_irq_uninstall(adev_to_drm(adev));
|
||||||
adev->irq.installed = false;
|
adev->irq.installed = false;
|
||||||
if (adev->irq.msi_enabled)
|
if (adev->irq.msi_enabled)
|
||||||
pci_free_irq_vectors(adev->pdev);
|
pci_free_irq_vectors(adev->pdev);
|
||||||
|
@ -522,7 +522,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
|
||||||
int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
|
int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
|
||||||
unsigned type)
|
unsigned type)
|
||||||
{
|
{
|
||||||
if (!adev->ddev->irq_enabled)
|
if (!adev_to_drm(adev)->irq_enabled)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
||||||
if (type >= src->num_types)
|
if (type >= src->num_types)
|
||||||
|
@ -552,7 +552,7 @@ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
|
||||||
int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
|
int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
|
||||||
unsigned type)
|
unsigned type)
|
||||||
{
|
{
|
||||||
if (!adev->ddev->irq_enabled)
|
if (!adev_to_drm(adev)->irq_enabled)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
||||||
if (type >= src->num_types)
|
if (type >= src->num_types)
|
||||||
|
@ -583,7 +583,7 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
|
||||||
bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
|
bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
|
||||||
unsigned type)
|
unsigned type)
|
||||||
{
|
{
|
||||||
if (!adev->ddev->irq_enabled)
|
if (!adev_to_drm(adev)->irq_enabled)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (type >= src->num_types)
|
if (type >= src->num_types)
|
||||||
|
|
|
@ -555,7 +555,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
|
||||||
bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
|
bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
|
||||||
if (bo == NULL)
|
if (bo == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
drm_gem_private_object_init(adev->ddev, &bo->tbo.base, size);
|
drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
|
||||||
INIT_LIST_HEAD(&bo->shadow_list);
|
INIT_LIST_HEAD(&bo->shadow_list);
|
||||||
bo->vm_bo = NULL;
|
bo->vm_bo = NULL;
|
||||||
bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
|
bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
|
||||||
|
|
|
@ -226,7 +226,7 @@ static int init_pmu_by_type(struct amdgpu_device *adev,
|
||||||
pmu_entry->pmu.attr_groups = attr_groups;
|
pmu_entry->pmu.attr_groups = attr_groups;
|
||||||
pmu_entry->pmu_perf_type = pmu_perf_type;
|
pmu_entry->pmu_perf_type = pmu_perf_type;
|
||||||
snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d",
|
snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d",
|
||||||
pmu_file_prefix, adev->ddev->primary->index);
|
pmu_file_prefix, adev_to_drm(adev)->primary->index);
|
||||||
|
|
||||||
ret = perf_pmu_register(&pmu_entry->pmu, pmu_name, -1);
|
ret = perf_pmu_register(&pmu_entry->pmu, pmu_name, -1);
|
||||||
|
|
||||||
|
|
|
@ -46,7 +46,7 @@ static ssize_t amdgpu_rap_debugfs_write(struct file *f, const char __user *buf,
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
|
struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
|
||||||
struct ta_rap_shared_memory *rap_shared_mem;
|
struct ta_rap_shared_memory *rap_shared_mem;
|
||||||
struct ta_rap_cmd_output_data *rap_cmd_output;
|
struct ta_rap_cmd_output_data *rap_cmd_output;
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev_to_drm(adev);
|
||||||
uint32_t op;
|
uint32_t op;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -116,7 +116,7 @@ static const struct file_operations amdgpu_rap_debugfs_ops = {
|
||||||
void amdgpu_rap_debugfs_init(struct amdgpu_device *adev)
|
void amdgpu_rap_debugfs_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_DEBUG_FS)
|
#if defined(CONFIG_DEBUG_FS)
|
||||||
struct drm_minor *minor = adev->ddev->primary;
|
struct drm_minor *minor = adev_to_drm(adev)->primary;
|
||||||
|
|
||||||
if (!adev->psp.rap_context.rap_initialized)
|
if (!adev->psp.rap_context.rap_initialized)
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -1197,7 +1197,7 @@ static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
|
||||||
static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
|
static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
||||||
struct drm_minor *minor = adev->ddev->primary;
|
struct drm_minor *minor = adev_to_drm(adev)->primary;
|
||||||
|
|
||||||
con->dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
|
con->dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
|
||||||
debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir,
|
debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir,
|
||||||
|
|
|
@ -420,7 +420,7 @@ int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
|
||||||
struct amdgpu_ring *ring)
|
struct amdgpu_ring *ring)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_DEBUG_FS)
|
#if defined(CONFIG_DEBUG_FS)
|
||||||
struct drm_minor *minor = adev->ddev->primary;
|
struct drm_minor *minor = adev_to_drm(adev)->primary;
|
||||||
struct dentry *ent, *root = minor->debugfs_root;
|
struct dentry *ent, *root = minor->debugfs_root;
|
||||||
char name[32];
|
char name[32];
|
||||||
|
|
||||||
|
|
|
@ -1921,8 +1921,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
||||||
/* No others user of address space so set it to 0 */
|
/* No others user of address space so set it to 0 */
|
||||||
r = ttm_bo_device_init(&adev->mman.bdev,
|
r = ttm_bo_device_init(&adev->mman.bdev,
|
||||||
&amdgpu_bo_driver,
|
&amdgpu_bo_driver,
|
||||||
adev->ddev->anon_inode->i_mapping,
|
adev_to_drm(adev)->anon_inode->i_mapping,
|
||||||
adev->ddev->vma_offset_manager,
|
adev_to_drm(adev)->vma_offset_manager,
|
||||||
dma_addressing_limited(adev->dev));
|
dma_addressing_limited(adev->dev));
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
|
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
|
||||||
|
@ -2606,7 +2606,7 @@ int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
|
||||||
#if defined(CONFIG_DEBUG_FS)
|
#if defined(CONFIG_DEBUG_FS)
|
||||||
unsigned count;
|
unsigned count;
|
||||||
|
|
||||||
struct drm_minor *minor = adev->ddev->primary;
|
struct drm_minor *minor = adev_to_drm(adev)->primary;
|
||||||
struct dentry *ent, *root = minor->debugfs_root;
|
struct dentry *ent, *root = minor->debugfs_root;
|
||||||
|
|
||||||
for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
|
for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
|
||||||
|
|
|
@ -45,7 +45,7 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
|
||||||
if (adev->mode_info.num_crtc == 0)
|
if (adev->mode_info.num_crtc == 0)
|
||||||
adev->mode_info.num_crtc = 1;
|
adev->mode_info.num_crtc = 1;
|
||||||
adev->enable_virtual_display = true;
|
adev->enable_virtual_display = true;
|
||||||
adev->ddev->driver->driver_features &= ~DRIVER_ATOMIC;
|
adev_to_drm(adev)->driver->driver_features &= ~DRIVER_ATOMIC;
|
||||||
adev->cg_flags = 0;
|
adev->cg_flags = 0;
|
||||||
adev->pg_flags = 0;
|
adev->pg_flags = 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -294,7 +294,7 @@ static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
|
||||||
|
|
||||||
|
|
||||||
remove_link:
|
remove_link:
|
||||||
sysfs_remove_link(&adev->dev->kobj, adev->ddev->unique);
|
sysfs_remove_link(&adev->dev->kobj, adev_to_drm(adev)->unique);
|
||||||
|
|
||||||
remove_file:
|
remove_file:
|
||||||
device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
|
device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
|
||||||
|
|
|
@ -1673,7 +1673,7 @@ amdgpu_atombios_encoder_set_crtc_source(struct drm_encoder *encoder)
|
||||||
void
|
void
|
||||||
amdgpu_atombios_encoder_init_dig(struct amdgpu_device *adev)
|
amdgpu_atombios_encoder_init_dig(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev_to_drm(adev);
|
||||||
struct drm_encoder *encoder;
|
struct drm_encoder *encoder;
|
||||||
|
|
||||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||||
|
|
|
@ -328,7 +328,7 @@ static void dce_v10_0_hpd_set_polarity(struct amdgpu_device *adev,
|
||||||
*/
|
*/
|
||||||
static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
|
static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev_to_drm(adev);
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
struct drm_connector_list_iter iter;
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
|
@ -383,7 +383,7 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
|
||||||
*/
|
*/
|
||||||
static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
|
static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev_to_drm(adev);
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
struct drm_connector_list_iter iter;
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
|
@ -2701,7 +2701,7 @@ static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
|
||||||
if (amdgpu_crtc == NULL)
|
if (amdgpu_crtc == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v10_0_crtc_funcs);
|
drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v10_0_crtc_funcs);
|
||||||
|
|
||||||
drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
|
drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
|
||||||
amdgpu_crtc->crtc_id = index;
|
amdgpu_crtc->crtc_id = index;
|
||||||
|
@ -2709,8 +2709,8 @@ static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
|
||||||
|
|
||||||
amdgpu_crtc->max_cursor_width = 128;
|
amdgpu_crtc->max_cursor_width = 128;
|
||||||
amdgpu_crtc->max_cursor_height = 128;
|
amdgpu_crtc->max_cursor_height = 128;
|
||||||
adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
|
adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
|
||||||
adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
|
adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
|
||||||
|
|
||||||
switch (amdgpu_crtc->crtc_id) {
|
switch (amdgpu_crtc->crtc_id) {
|
||||||
case 0:
|
case 0:
|
||||||
|
@ -2792,24 +2792,24 @@ static int dce_v10_0_sw_init(void *handle)
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
|
adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
|
||||||
|
|
||||||
adev->ddev->mode_config.async_page_flip = true;
|
adev_to_drm(adev)->mode_config.async_page_flip = true;
|
||||||
|
|
||||||
adev->ddev->mode_config.max_width = 16384;
|
adev_to_drm(adev)->mode_config.max_width = 16384;
|
||||||
adev->ddev->mode_config.max_height = 16384;
|
adev_to_drm(adev)->mode_config.max_height = 16384;
|
||||||
|
|
||||||
adev->ddev->mode_config.preferred_depth = 24;
|
adev_to_drm(adev)->mode_config.preferred_depth = 24;
|
||||||
adev->ddev->mode_config.prefer_shadow = 1;
|
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
|
||||||
|
|
||||||
adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
|
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
|
||||||
|
|
||||||
r = amdgpu_display_modeset_create_props(adev);
|
r = amdgpu_display_modeset_create_props(adev);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
adev->ddev->mode_config.max_width = 16384;
|
adev_to_drm(adev)->mode_config.max_width = 16384;
|
||||||
adev->ddev->mode_config.max_height = 16384;
|
adev_to_drm(adev)->mode_config.max_height = 16384;
|
||||||
|
|
||||||
/* allocate crtcs */
|
/* allocate crtcs */
|
||||||
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
||||||
|
@ -2819,7 +2819,7 @@ static int dce_v10_0_sw_init(void *handle)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (amdgpu_atombios_get_connector_info_from_object_table(adev))
|
if (amdgpu_atombios_get_connector_info_from_object_table(adev))
|
||||||
amdgpu_display_print_display_setup(adev->ddev);
|
amdgpu_display_print_display_setup(adev_to_drm(adev));
|
||||||
else
|
else
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
@ -2832,7 +2832,7 @@ static int dce_v10_0_sw_init(void *handle)
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
drm_kms_helper_poll_init(adev->ddev);
|
drm_kms_helper_poll_init(adev_to_drm(adev));
|
||||||
|
|
||||||
adev->mode_info.mode_config_initialized = true;
|
adev->mode_info.mode_config_initialized = true;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2844,13 +2844,13 @@ static int dce_v10_0_sw_fini(void *handle)
|
||||||
|
|
||||||
kfree(adev->mode_info.bios_hardcoded_edid);
|
kfree(adev->mode_info.bios_hardcoded_edid);
|
||||||
|
|
||||||
drm_kms_helper_poll_fini(adev->ddev);
|
drm_kms_helper_poll_fini(adev_to_drm(adev));
|
||||||
|
|
||||||
dce_v10_0_audio_fini(adev);
|
dce_v10_0_audio_fini(adev);
|
||||||
|
|
||||||
dce_v10_0_afmt_fini(adev);
|
dce_v10_0_afmt_fini(adev);
|
||||||
|
|
||||||
drm_mode_config_cleanup(adev->ddev);
|
drm_mode_config_cleanup(adev_to_drm(adev));
|
||||||
adev->mode_info.mode_config_initialized = false;
|
adev->mode_info.mode_config_initialized = false;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -3157,14 +3157,14 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
|
||||||
if (amdgpu_crtc == NULL)
|
if (amdgpu_crtc == NULL)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
spin_lock_irqsave(&adev->ddev->event_lock, flags);
|
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
|
||||||
works = amdgpu_crtc->pflip_works;
|
works = amdgpu_crtc->pflip_works;
|
||||||
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
|
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
|
||||||
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
|
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
|
||||||
"AMDGPU_FLIP_SUBMITTED(%d)\n",
|
"AMDGPU_FLIP_SUBMITTED(%d)\n",
|
||||||
amdgpu_crtc->pflip_status,
|
amdgpu_crtc->pflip_status,
|
||||||
AMDGPU_FLIP_SUBMITTED);
|
AMDGPU_FLIP_SUBMITTED);
|
||||||
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
|
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3176,7 +3176,7 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
|
||||||
if (works->event)
|
if (works->event)
|
||||||
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
|
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
|
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
|
||||||
|
|
||||||
drm_crtc_vblank_put(&amdgpu_crtc->base);
|
drm_crtc_vblank_put(&amdgpu_crtc->base);
|
||||||
schedule_work(&works->unpin_work);
|
schedule_work(&works->unpin_work);
|
||||||
|
@ -3245,7 +3245,7 @@ static int dce_v10_0_crtc_irq(struct amdgpu_device *adev,
|
||||||
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
|
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
|
||||||
|
|
||||||
if (amdgpu_irq_enabled(adev, source, irq_type)) {
|
if (amdgpu_irq_enabled(adev, source, irq_type)) {
|
||||||
drm_handle_vblank(adev->ddev, crtc);
|
drm_handle_vblank(adev_to_drm(adev), crtc);
|
||||||
}
|
}
|
||||||
DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
|
DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
|
||||||
|
|
||||||
|
@ -3485,7 +3485,7 @@ static void dce_v10_0_encoder_add(struct amdgpu_device *adev,
|
||||||
uint32_t supported_device,
|
uint32_t supported_device,
|
||||||
u16 caps)
|
u16 caps)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev_to_drm(adev);
|
||||||
struct drm_encoder *encoder;
|
struct drm_encoder *encoder;
|
||||||
struct amdgpu_encoder *amdgpu_encoder;
|
struct amdgpu_encoder *amdgpu_encoder;
|
||||||
|
|
||||||
|
|
|
@ -346,7 +346,7 @@ static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev,
|
||||||
*/
|
*/
|
||||||
static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
|
static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev_to_drm(adev);
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
struct drm_connector_list_iter iter;
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
|
@ -400,7 +400,7 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
|
||||||
*/
|
*/
|
||||||
static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
|
static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev_to_drm(adev);
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
struct drm_connector_list_iter iter;
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
|
@ -2809,7 +2809,7 @@ static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
|
||||||
if (amdgpu_crtc == NULL)
|
if (amdgpu_crtc == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v11_0_crtc_funcs);
|
drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v11_0_crtc_funcs);
|
||||||
|
|
||||||
drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
|
drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
|
||||||
amdgpu_crtc->crtc_id = index;
|
amdgpu_crtc->crtc_id = index;
|
||||||
|
@ -2817,8 +2817,8 @@ static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
|
||||||
|
|
||||||
amdgpu_crtc->max_cursor_width = 128;
|
amdgpu_crtc->max_cursor_width = 128;
|
||||||
amdgpu_crtc->max_cursor_height = 128;
|
amdgpu_crtc->max_cursor_height = 128;
|
||||||
adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
|
adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
|
||||||
adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
|
adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
|
||||||
|
|
||||||
switch (amdgpu_crtc->crtc_id) {
|
switch (amdgpu_crtc->crtc_id) {
|
||||||
case 0:
|
case 0:
|
||||||
|
@ -2913,24 +2913,24 @@ static int dce_v11_0_sw_init(void *handle)
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
|
adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
|
||||||
|
|
||||||
adev->ddev->mode_config.async_page_flip = true;
|
adev_to_drm(adev)->mode_config.async_page_flip = true;
|
||||||
|
|
||||||
adev->ddev->mode_config.max_width = 16384;
|
adev_to_drm(adev)->mode_config.max_width = 16384;
|
||||||
adev->ddev->mode_config.max_height = 16384;
|
adev_to_drm(adev)->mode_config.max_height = 16384;
|
||||||
|
|
||||||
adev->ddev->mode_config.preferred_depth = 24;
|
adev_to_drm(adev)->mode_config.preferred_depth = 24;
|
||||||
adev->ddev->mode_config.prefer_shadow = 1;
|
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
|
||||||
|
|
||||||
adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
|
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
|
||||||
|
|
||||||
r = amdgpu_display_modeset_create_props(adev);
|
r = amdgpu_display_modeset_create_props(adev);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
adev->ddev->mode_config.max_width = 16384;
|
adev_to_drm(adev)->mode_config.max_width = 16384;
|
||||||
adev->ddev->mode_config.max_height = 16384;
|
adev_to_drm(adev)->mode_config.max_height = 16384;
|
||||||
|
|
||||||
|
|
||||||
/* allocate crtcs */
|
/* allocate crtcs */
|
||||||
|
@ -2941,7 +2941,7 @@ static int dce_v11_0_sw_init(void *handle)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (amdgpu_atombios_get_connector_info_from_object_table(adev))
|
if (amdgpu_atombios_get_connector_info_from_object_table(adev))
|
||||||
amdgpu_display_print_display_setup(adev->ddev);
|
amdgpu_display_print_display_setup(adev_to_drm(adev));
|
||||||
else
|
else
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
@ -2954,7 +2954,7 @@ static int dce_v11_0_sw_init(void *handle)
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
drm_kms_helper_poll_init(adev->ddev);
|
drm_kms_helper_poll_init(adev_to_drm(adev));
|
||||||
|
|
||||||
adev->mode_info.mode_config_initialized = true;
|
adev->mode_info.mode_config_initialized = true;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2966,13 +2966,13 @@ static int dce_v11_0_sw_fini(void *handle)
|
||||||
|
|
||||||
kfree(adev->mode_info.bios_hardcoded_edid);
|
kfree(adev->mode_info.bios_hardcoded_edid);
|
||||||
|
|
||||||
drm_kms_helper_poll_fini(adev->ddev);
|
drm_kms_helper_poll_fini(adev_to_drm(adev));
|
||||||
|
|
||||||
dce_v11_0_audio_fini(adev);
|
dce_v11_0_audio_fini(adev);
|
||||||
|
|
||||||
dce_v11_0_afmt_fini(adev);
|
dce_v11_0_afmt_fini(adev);
|
||||||
|
|
||||||
drm_mode_config_cleanup(adev->ddev);
|
drm_mode_config_cleanup(adev_to_drm(adev));
|
||||||
adev->mode_info.mode_config_initialized = false;
|
adev->mode_info.mode_config_initialized = false;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -3283,14 +3283,14 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
|
||||||
if(amdgpu_crtc == NULL)
|
if(amdgpu_crtc == NULL)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
spin_lock_irqsave(&adev->ddev->event_lock, flags);
|
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
|
||||||
works = amdgpu_crtc->pflip_works;
|
works = amdgpu_crtc->pflip_works;
|
||||||
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
|
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
|
||||||
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
|
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
|
||||||
"AMDGPU_FLIP_SUBMITTED(%d)\n",
|
"AMDGPU_FLIP_SUBMITTED(%d)\n",
|
||||||
amdgpu_crtc->pflip_status,
|
amdgpu_crtc->pflip_status,
|
||||||
AMDGPU_FLIP_SUBMITTED);
|
AMDGPU_FLIP_SUBMITTED);
|
||||||
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
|
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3302,7 +3302,7 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
|
||||||
if(works->event)
|
if(works->event)
|
||||||
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
|
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
|
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
|
||||||
|
|
||||||
drm_crtc_vblank_put(&amdgpu_crtc->base);
|
drm_crtc_vblank_put(&amdgpu_crtc->base);
|
||||||
schedule_work(&works->unpin_work);
|
schedule_work(&works->unpin_work);
|
||||||
|
@ -3372,7 +3372,7 @@ static int dce_v11_0_crtc_irq(struct amdgpu_device *adev,
|
||||||
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
|
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
|
||||||
|
|
||||||
if (amdgpu_irq_enabled(adev, source, irq_type)) {
|
if (amdgpu_irq_enabled(adev, source, irq_type)) {
|
||||||
drm_handle_vblank(adev->ddev, crtc);
|
drm_handle_vblank(adev_to_drm(adev), crtc);
|
||||||
}
|
}
|
||||||
DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
|
DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
|
||||||
|
|
||||||
|
@ -3611,7 +3611,7 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
|
||||||
uint32_t supported_device,
|
uint32_t supported_device,
|
||||||
u16 caps)
|
u16 caps)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev_to_drm(adev);
|
||||||
struct drm_encoder *encoder;
|
struct drm_encoder *encoder;
|
||||||
struct amdgpu_encoder *amdgpu_encoder;
|
struct amdgpu_encoder *amdgpu_encoder;
|
||||||
|
|
||||||
|
|
|
@ -279,7 +279,7 @@ static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
|
||||||
*/
|
*/
|
||||||
static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
|
static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev_to_drm(adev);
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
struct drm_connector_list_iter iter;
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
|
@ -324,7 +324,7 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
|
||||||
*/
|
*/
|
||||||
static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
|
static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev_to_drm(adev);
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
struct drm_connector_list_iter iter;
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
|
@ -2591,7 +2591,7 @@ static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
|
||||||
if (amdgpu_crtc == NULL)
|
if (amdgpu_crtc == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
|
drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
|
||||||
|
|
||||||
drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
|
drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
|
||||||
amdgpu_crtc->crtc_id = index;
|
amdgpu_crtc->crtc_id = index;
|
||||||
|
@ -2599,8 +2599,8 @@ static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
|
||||||
|
|
||||||
amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
|
amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
|
||||||
amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
|
amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
|
||||||
adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
|
adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
|
||||||
adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
|
adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
|
||||||
|
|
||||||
amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
|
amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
|
||||||
|
|
||||||
|
@ -2669,20 +2669,20 @@ static int dce_v6_0_sw_init(void *handle)
|
||||||
|
|
||||||
adev->mode_info.mode_config_initialized = true;
|
adev->mode_info.mode_config_initialized = true;
|
||||||
|
|
||||||
adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
|
adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
|
||||||
adev->ddev->mode_config.async_page_flip = true;
|
adev_to_drm(adev)->mode_config.async_page_flip = true;
|
||||||
adev->ddev->mode_config.max_width = 16384;
|
adev_to_drm(adev)->mode_config.max_width = 16384;
|
||||||
adev->ddev->mode_config.max_height = 16384;
|
adev_to_drm(adev)->mode_config.max_height = 16384;
|
||||||
adev->ddev->mode_config.preferred_depth = 24;
|
adev_to_drm(adev)->mode_config.preferred_depth = 24;
|
||||||
adev->ddev->mode_config.prefer_shadow = 1;
|
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
|
||||||
adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
|
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
|
||||||
|
|
||||||
r = amdgpu_display_modeset_create_props(adev);
|
r = amdgpu_display_modeset_create_props(adev);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
adev->ddev->mode_config.max_width = 16384;
|
adev_to_drm(adev)->mode_config.max_width = 16384;
|
||||||
adev->ddev->mode_config.max_height = 16384;
|
adev_to_drm(adev)->mode_config.max_height = 16384;
|
||||||
|
|
||||||
/* allocate crtcs */
|
/* allocate crtcs */
|
||||||
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
||||||
|
@ -2693,7 +2693,7 @@ static int dce_v6_0_sw_init(void *handle)
|
||||||
|
|
||||||
ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
|
ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
|
||||||
if (ret)
|
if (ret)
|
||||||
amdgpu_display_print_display_setup(adev->ddev);
|
amdgpu_display_print_display_setup(adev_to_drm(adev));
|
||||||
else
|
else
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
@ -2706,7 +2706,7 @@ static int dce_v6_0_sw_init(void *handle)
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
drm_kms_helper_poll_init(adev->ddev);
|
drm_kms_helper_poll_init(adev_to_drm(adev));
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -2717,12 +2717,12 @@ static int dce_v6_0_sw_fini(void *handle)
|
||||||
|
|
||||||
kfree(adev->mode_info.bios_hardcoded_edid);
|
kfree(adev->mode_info.bios_hardcoded_edid);
|
||||||
|
|
||||||
drm_kms_helper_poll_fini(adev->ddev);
|
drm_kms_helper_poll_fini(adev_to_drm(adev));
|
||||||
|
|
||||||
dce_v6_0_audio_fini(adev);
|
dce_v6_0_audio_fini(adev);
|
||||||
dce_v6_0_afmt_fini(adev);
|
dce_v6_0_afmt_fini(adev);
|
||||||
|
|
||||||
drm_mode_config_cleanup(adev->ddev);
|
drm_mode_config_cleanup(adev_to_drm(adev));
|
||||||
adev->mode_info.mode_config_initialized = false;
|
adev->mode_info.mode_config_initialized = false;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2967,7 +2967,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
|
||||||
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
|
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
|
||||||
|
|
||||||
if (amdgpu_irq_enabled(adev, source, irq_type)) {
|
if (amdgpu_irq_enabled(adev, source, irq_type)) {
|
||||||
drm_handle_vblank(adev->ddev, crtc);
|
drm_handle_vblank(adev_to_drm(adev), crtc);
|
||||||
}
|
}
|
||||||
DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
|
DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
|
||||||
break;
|
break;
|
||||||
|
@ -3036,14 +3036,14 @@ static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
|
||||||
if (amdgpu_crtc == NULL)
|
if (amdgpu_crtc == NULL)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
spin_lock_irqsave(&adev->ddev->event_lock, flags);
|
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
|
||||||
works = amdgpu_crtc->pflip_works;
|
works = amdgpu_crtc->pflip_works;
|
||||||
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
|
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
|
||||||
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
|
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
|
||||||
"AMDGPU_FLIP_SUBMITTED(%d)\n",
|
"AMDGPU_FLIP_SUBMITTED(%d)\n",
|
||||||
amdgpu_crtc->pflip_status,
|
amdgpu_crtc->pflip_status,
|
||||||
AMDGPU_FLIP_SUBMITTED);
|
AMDGPU_FLIP_SUBMITTED);
|
||||||
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
|
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3055,7 +3055,7 @@ static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
|
||||||
if (works->event)
|
if (works->event)
|
||||||
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
|
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
|
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
|
||||||
|
|
||||||
drm_crtc_vblank_put(&amdgpu_crtc->base);
|
drm_crtc_vblank_put(&amdgpu_crtc->base);
|
||||||
schedule_work(&works->unpin_work);
|
schedule_work(&works->unpin_work);
|
||||||
|
@ -3297,7 +3297,7 @@ static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
|
||||||
uint32_t supported_device,
|
uint32_t supported_device,
|
||||||
u16 caps)
|
u16 caps)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev_to_drm(adev);
|
||||||
struct drm_encoder *encoder;
|
struct drm_encoder *encoder;
|
||||||
struct amdgpu_encoder *amdgpu_encoder;
|
struct amdgpu_encoder *amdgpu_encoder;
|
||||||
|
|
||||||
|
|
|
@ -273,7 +273,7 @@ static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
|
||||||
*/
|
*/
|
||||||
static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
|
static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev_to_drm(adev);
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
struct drm_connector_list_iter iter;
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
|
@ -318,7 +318,7 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
|
||||||
*/
|
*/
|
||||||
static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
|
static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev_to_drm(adev);
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
struct drm_connector_list_iter iter;
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
|
@ -2609,7 +2609,7 @@ static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
|
||||||
if (amdgpu_crtc == NULL)
|
if (amdgpu_crtc == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v8_0_crtc_funcs);
|
drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v8_0_crtc_funcs);
|
||||||
|
|
||||||
drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
|
drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
|
||||||
amdgpu_crtc->crtc_id = index;
|
amdgpu_crtc->crtc_id = index;
|
||||||
|
@ -2617,8 +2617,8 @@ static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
|
||||||
|
|
||||||
amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH;
|
amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH;
|
||||||
amdgpu_crtc->max_cursor_height = CIK_CURSOR_HEIGHT;
|
amdgpu_crtc->max_cursor_height = CIK_CURSOR_HEIGHT;
|
||||||
adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
|
adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
|
||||||
adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
|
adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
|
||||||
|
|
||||||
amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
|
amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
|
||||||
|
|
||||||
|
@ -2689,24 +2689,24 @@ static int dce_v8_0_sw_init(void *handle)
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
|
adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
|
||||||
|
|
||||||
adev->ddev->mode_config.async_page_flip = true;
|
adev_to_drm(adev)->mode_config.async_page_flip = true;
|
||||||
|
|
||||||
adev->ddev->mode_config.max_width = 16384;
|
adev_to_drm(adev)->mode_config.max_width = 16384;
|
||||||
adev->ddev->mode_config.max_height = 16384;
|
adev_to_drm(adev)->mode_config.max_height = 16384;
|
||||||
|
|
||||||
adev->ddev->mode_config.preferred_depth = 24;
|
adev_to_drm(adev)->mode_config.preferred_depth = 24;
|
||||||
adev->ddev->mode_config.prefer_shadow = 1;
|
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
|
||||||
|
|
||||||
adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
|
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
|
||||||
|
|
||||||
r = amdgpu_display_modeset_create_props(adev);
|
r = amdgpu_display_modeset_create_props(adev);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
adev->ddev->mode_config.max_width = 16384;
|
adev_to_drm(adev)->mode_config.max_width = 16384;
|
||||||
adev->ddev->mode_config.max_height = 16384;
|
adev_to_drm(adev)->mode_config.max_height = 16384;
|
||||||
|
|
||||||
/* allocate crtcs */
|
/* allocate crtcs */
|
||||||
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
||||||
|
@ -2716,7 +2716,7 @@ static int dce_v8_0_sw_init(void *handle)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (amdgpu_atombios_get_connector_info_from_object_table(adev))
|
if (amdgpu_atombios_get_connector_info_from_object_table(adev))
|
||||||
amdgpu_display_print_display_setup(adev->ddev);
|
amdgpu_display_print_display_setup(adev_to_drm(adev));
|
||||||
else
|
else
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
@ -2729,7 +2729,7 @@ static int dce_v8_0_sw_init(void *handle)
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
drm_kms_helper_poll_init(adev->ddev);
|
drm_kms_helper_poll_init(adev_to_drm(adev));
|
||||||
|
|
||||||
adev->mode_info.mode_config_initialized = true;
|
adev->mode_info.mode_config_initialized = true;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2741,13 +2741,13 @@ static int dce_v8_0_sw_fini(void *handle)
|
||||||
|
|
||||||
kfree(adev->mode_info.bios_hardcoded_edid);
|
kfree(adev->mode_info.bios_hardcoded_edid);
|
||||||
|
|
||||||
drm_kms_helper_poll_fini(adev->ddev);
|
drm_kms_helper_poll_fini(adev_to_drm(adev));
|
||||||
|
|
||||||
dce_v8_0_audio_fini(adev);
|
dce_v8_0_audio_fini(adev);
|
||||||
|
|
||||||
dce_v8_0_afmt_fini(adev);
|
dce_v8_0_afmt_fini(adev);
|
||||||
|
|
||||||
drm_mode_config_cleanup(adev->ddev);
|
drm_mode_config_cleanup(adev_to_drm(adev));
|
||||||
adev->mode_info.mode_config_initialized = false;
|
adev->mode_info.mode_config_initialized = false;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -3057,7 +3057,7 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
|
||||||
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
|
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
|
||||||
|
|
||||||
if (amdgpu_irq_enabled(adev, source, irq_type)) {
|
if (amdgpu_irq_enabled(adev, source, irq_type)) {
|
||||||
drm_handle_vblank(adev->ddev, crtc);
|
drm_handle_vblank(adev_to_drm(adev), crtc);
|
||||||
}
|
}
|
||||||
DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
|
DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
|
||||||
break;
|
break;
|
||||||
|
@ -3126,14 +3126,14 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
|
||||||
if (amdgpu_crtc == NULL)
|
if (amdgpu_crtc == NULL)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
spin_lock_irqsave(&adev->ddev->event_lock, flags);
|
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
|
||||||
works = amdgpu_crtc->pflip_works;
|
works = amdgpu_crtc->pflip_works;
|
||||||
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
|
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
|
||||||
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
|
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
|
||||||
"AMDGPU_FLIP_SUBMITTED(%d)\n",
|
"AMDGPU_FLIP_SUBMITTED(%d)\n",
|
||||||
amdgpu_crtc->pflip_status,
|
amdgpu_crtc->pflip_status,
|
||||||
AMDGPU_FLIP_SUBMITTED);
|
AMDGPU_FLIP_SUBMITTED);
|
||||||
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
|
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3145,7 +3145,7 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
|
||||||
if (works->event)
|
if (works->event)
|
||||||
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
|
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
|
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
|
||||||
|
|
||||||
drm_crtc_vblank_put(&amdgpu_crtc->base);
|
drm_crtc_vblank_put(&amdgpu_crtc->base);
|
||||||
schedule_work(&works->unpin_work);
|
schedule_work(&works->unpin_work);
|
||||||
|
@ -3373,7 +3373,7 @@ static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
|
||||||
uint32_t supported_device,
|
uint32_t supported_device,
|
||||||
u16 caps)
|
u16 caps)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev_to_drm(adev);
|
||||||
struct drm_encoder *encoder;
|
struct drm_encoder *encoder;
|
||||||
struct amdgpu_encoder *amdgpu_encoder;
|
struct amdgpu_encoder *amdgpu_encoder;
|
||||||
|
|
||||||
|
|
|
@ -235,7 +235,7 @@ static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
|
||||||
if (amdgpu_crtc == NULL)
|
if (amdgpu_crtc == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_virtual_crtc_funcs);
|
drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_virtual_crtc_funcs);
|
||||||
|
|
||||||
drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
|
drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
|
||||||
amdgpu_crtc->crtc_id = index;
|
amdgpu_crtc->crtc_id = index;
|
||||||
|
@ -374,24 +374,24 @@ static int dce_virtual_sw_init(void *handle)
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
adev->ddev->max_vblank_count = 0;
|
adev_to_drm(adev)->max_vblank_count = 0;
|
||||||
|
|
||||||
adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
|
adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
|
||||||
|
|
||||||
adev->ddev->mode_config.max_width = 16384;
|
adev_to_drm(adev)->mode_config.max_width = 16384;
|
||||||
adev->ddev->mode_config.max_height = 16384;
|
adev_to_drm(adev)->mode_config.max_height = 16384;
|
||||||
|
|
||||||
adev->ddev->mode_config.preferred_depth = 24;
|
adev_to_drm(adev)->mode_config.preferred_depth = 24;
|
||||||
adev->ddev->mode_config.prefer_shadow = 1;
|
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
|
||||||
|
|
||||||
adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
|
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
|
||||||
|
|
||||||
r = amdgpu_display_modeset_create_props(adev);
|
r = amdgpu_display_modeset_create_props(adev);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
adev->ddev->mode_config.max_width = 16384;
|
adev_to_drm(adev)->mode_config.max_width = 16384;
|
||||||
adev->ddev->mode_config.max_height = 16384;
|
adev_to_drm(adev)->mode_config.max_height = 16384;
|
||||||
|
|
||||||
/* allocate crtcs, encoders, connectors */
|
/* allocate crtcs, encoders, connectors */
|
||||||
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
||||||
|
@ -403,7 +403,7 @@ static int dce_virtual_sw_init(void *handle)
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
drm_kms_helper_poll_init(adev->ddev);
|
drm_kms_helper_poll_init(adev_to_drm(adev));
|
||||||
|
|
||||||
adev->mode_info.mode_config_initialized = true;
|
adev->mode_info.mode_config_initialized = true;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -415,9 +415,9 @@ static int dce_virtual_sw_fini(void *handle)
|
||||||
|
|
||||||
kfree(adev->mode_info.bios_hardcoded_edid);
|
kfree(adev->mode_info.bios_hardcoded_edid);
|
||||||
|
|
||||||
drm_kms_helper_poll_fini(adev->ddev);
|
drm_kms_helper_poll_fini(adev_to_drm(adev));
|
||||||
|
|
||||||
drm_mode_config_cleanup(adev->ddev);
|
drm_mode_config_cleanup(adev_to_drm(adev));
|
||||||
/* clear crtcs pointer to avoid dce irq finish routine access freed data */
|
/* clear crtcs pointer to avoid dce irq finish routine access freed data */
|
||||||
memset(adev->mode_info.crtcs, 0, sizeof(adev->mode_info.crtcs[0]) * AMDGPU_MAX_CRTCS);
|
memset(adev->mode_info.crtcs, 0, sizeof(adev->mode_info.crtcs[0]) * AMDGPU_MAX_CRTCS);
|
||||||
adev->mode_info.mode_config_initialized = false;
|
adev->mode_info.mode_config_initialized = false;
|
||||||
|
@ -602,7 +602,7 @@ static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
|
||||||
if (!encoder)
|
if (!encoder)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
encoder->possible_crtcs = 1 << index;
|
encoder->possible_crtcs = 1 << index;
|
||||||
drm_encoder_init(adev->ddev, encoder, &dce_virtual_encoder_funcs,
|
drm_encoder_init(adev_to_drm(adev), encoder, &dce_virtual_encoder_funcs,
|
||||||
DRM_MODE_ENCODER_VIRTUAL, NULL);
|
DRM_MODE_ENCODER_VIRTUAL, NULL);
|
||||||
drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs);
|
drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs);
|
||||||
|
|
||||||
|
@ -613,7 +613,7 @@ static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* add a new connector */
|
/* add a new connector */
|
||||||
drm_connector_init(adev->ddev, connector, &dce_virtual_connector_funcs,
|
drm_connector_init(adev_to_drm(adev), connector, &dce_virtual_connector_funcs,
|
||||||
DRM_MODE_CONNECTOR_VIRTUAL);
|
DRM_MODE_CONNECTOR_VIRTUAL);
|
||||||
drm_connector_helper_add(connector, &dce_virtual_connector_helper_funcs);
|
drm_connector_helper_add(connector, &dce_virtual_connector_helper_funcs);
|
||||||
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
|
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
|
||||||
|
@ -663,14 +663,14 @@ static int dce_virtual_pageflip(struct amdgpu_device *adev,
|
||||||
if (amdgpu_crtc == NULL)
|
if (amdgpu_crtc == NULL)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
spin_lock_irqsave(&adev->ddev->event_lock, flags);
|
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
|
||||||
works = amdgpu_crtc->pflip_works;
|
works = amdgpu_crtc->pflip_works;
|
||||||
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
|
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
|
||||||
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
|
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
|
||||||
"AMDGPU_FLIP_SUBMITTED(%d)\n",
|
"AMDGPU_FLIP_SUBMITTED(%d)\n",
|
||||||
amdgpu_crtc->pflip_status,
|
amdgpu_crtc->pflip_status,
|
||||||
AMDGPU_FLIP_SUBMITTED);
|
AMDGPU_FLIP_SUBMITTED);
|
||||||
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
|
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -682,7 +682,7 @@ static int dce_virtual_pageflip(struct amdgpu_device *adev,
|
||||||
if (works->event)
|
if (works->event)
|
||||||
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
|
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
|
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
|
||||||
|
|
||||||
drm_crtc_vblank_put(&amdgpu_crtc->base);
|
drm_crtc_vblank_put(&amdgpu_crtc->base);
|
||||||
amdgpu_bo_unref(&works->old_abo);
|
amdgpu_bo_unref(&works->old_abo);
|
||||||
|
|
|
@ -268,7 +268,7 @@ static struct amdgpu_crtc *
|
||||||
get_crtc_by_otg_inst(struct amdgpu_device *adev,
|
get_crtc_by_otg_inst(struct amdgpu_device *adev,
|
||||||
int otg_inst)
|
int otg_inst)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev_to_drm(adev);
|
||||||
struct drm_crtc *crtc;
|
struct drm_crtc *crtc;
|
||||||
struct amdgpu_crtc *amdgpu_crtc;
|
struct amdgpu_crtc *amdgpu_crtc;
|
||||||
|
|
||||||
|
@ -320,7 +320,7 @@ static void dm_pflip_high_irq(void *interrupt_params)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&adev->ddev->event_lock, flags);
|
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
|
||||||
|
|
||||||
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
|
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
|
||||||
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
|
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
|
||||||
|
@ -328,7 +328,7 @@ static void dm_pflip_high_irq(void *interrupt_params)
|
||||||
AMDGPU_FLIP_SUBMITTED,
|
AMDGPU_FLIP_SUBMITTED,
|
||||||
amdgpu_crtc->crtc_id,
|
amdgpu_crtc->crtc_id,
|
||||||
amdgpu_crtc);
|
amdgpu_crtc);
|
||||||
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
|
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -380,7 +380,7 @@ static void dm_pflip_high_irq(void *interrupt_params)
|
||||||
e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
|
e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
|
||||||
e->pipe = amdgpu_crtc->crtc_id;
|
e->pipe = amdgpu_crtc->crtc_id;
|
||||||
|
|
||||||
list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
|
list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
|
||||||
e = NULL;
|
e = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -393,7 +393,7 @@ static void dm_pflip_high_irq(void *interrupt_params)
|
||||||
amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
|
amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
|
||||||
|
|
||||||
amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
|
amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
|
||||||
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
|
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
|
||||||
|
|
||||||
DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
|
DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
|
||||||
amdgpu_crtc->crtc_id, amdgpu_crtc,
|
amdgpu_crtc->crtc_id, amdgpu_crtc,
|
||||||
|
@ -429,7 +429,7 @@ static void dm_vupdate_high_irq(void *interrupt_params)
|
||||||
/* BTR processing for pre-DCE12 ASICs */
|
/* BTR processing for pre-DCE12 ASICs */
|
||||||
if (acrtc_state->stream &&
|
if (acrtc_state->stream &&
|
||||||
adev->family < AMDGPU_FAMILY_AI) {
|
adev->family < AMDGPU_FAMILY_AI) {
|
||||||
spin_lock_irqsave(&adev->ddev->event_lock, flags);
|
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
|
||||||
mod_freesync_handle_v_update(
|
mod_freesync_handle_v_update(
|
||||||
adev->dm.freesync_module,
|
adev->dm.freesync_module,
|
||||||
acrtc_state->stream,
|
acrtc_state->stream,
|
||||||
|
@ -439,7 +439,7 @@ static void dm_vupdate_high_irq(void *interrupt_params)
|
||||||
adev->dm.dc,
|
adev->dm.dc,
|
||||||
acrtc_state->stream,
|
acrtc_state->stream,
|
||||||
&acrtc_state->vrr_params.adjust);
|
&acrtc_state->vrr_params.adjust);
|
||||||
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
|
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -489,7 +489,7 @@ static void dm_crtc_high_irq(void *interrupt_params)
|
||||||
if (adev->family < AMDGPU_FAMILY_AI)
|
if (adev->family < AMDGPU_FAMILY_AI)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock_irqsave(&adev->ddev->event_lock, flags);
|
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
|
||||||
|
|
||||||
if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
|
if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
|
||||||
acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
|
acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
|
||||||
|
@ -522,7 +522,7 @@ static void dm_crtc_high_irq(void *interrupt_params)
|
||||||
acrtc->pflip_status = AMDGPU_FLIP_NONE;
|
acrtc->pflip_status = AMDGPU_FLIP_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
|
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dm_set_clockgating_state(void *handle,
|
static int dm_set_clockgating_state(void *handle,
|
||||||
|
@ -850,7 +850,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||||
#endif
|
#endif
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
adev->dm.ddev = adev->ddev;
|
adev->dm.ddev = adev_to_drm(adev);
|
||||||
adev->dm.adev = adev;
|
adev->dm.adev = adev;
|
||||||
|
|
||||||
/* Zero all the fields */
|
/* Zero all the fields */
|
||||||
|
@ -986,10 +986,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||||
/* TODO: Add_display_info? */
|
/* TODO: Add_display_info? */
|
||||||
|
|
||||||
/* TODO use dynamic cursor width */
|
/* TODO use dynamic cursor width */
|
||||||
adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
|
adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
|
||||||
adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
|
adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
|
||||||
|
|
||||||
if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
|
if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
|
||||||
DRM_ERROR(
|
DRM_ERROR(
|
||||||
"amdgpu: failed to initialize sw for display support.\n");
|
"amdgpu: failed to initialize sw for display support.\n");
|
||||||
goto error;
|
goto error;
|
||||||
|
@ -1390,7 +1390,7 @@ static int dm_late_init(void *handle)
|
||||||
bool ret = true;
|
bool ret = true;
|
||||||
|
|
||||||
if (!adev->dm.fw_dmcu)
|
if (!adev->dm.fw_dmcu)
|
||||||
return detect_mst_link_for_all_connectors(adev->ddev);
|
return detect_mst_link_for_all_connectors(adev_to_drm(adev));
|
||||||
|
|
||||||
dmcu = adev->dm.dc->res_pool->dmcu;
|
dmcu = adev->dm.dc->res_pool->dmcu;
|
||||||
|
|
||||||
|
@ -1420,7 +1420,7 @@ static int dm_late_init(void *handle)
|
||||||
if (!ret)
|
if (!ret)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
return detect_mst_link_for_all_connectors(adev->ddev);
|
return detect_mst_link_for_all_connectors(adev_to_drm(adev));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void s3_handle_mst(struct drm_device *dev, bool suspend)
|
static void s3_handle_mst(struct drm_device *dev, bool suspend)
|
||||||
|
@ -1672,9 +1672,9 @@ static int dm_suspend(void *handle)
|
||||||
}
|
}
|
||||||
|
|
||||||
WARN_ON(adev->dm.cached_state);
|
WARN_ON(adev->dm.cached_state);
|
||||||
adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
|
adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
|
||||||
|
|
||||||
s3_handle_mst(adev->ddev, true);
|
s3_handle_mst(adev_to_drm(adev), true);
|
||||||
|
|
||||||
amdgpu_dm_irq_suspend(adev);
|
amdgpu_dm_irq_suspend(adev);
|
||||||
|
|
||||||
|
@ -1828,7 +1828,7 @@ cleanup:
|
||||||
static int dm_resume(void *handle)
|
static int dm_resume(void *handle)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = handle;
|
struct amdgpu_device *adev = handle;
|
||||||
struct drm_device *ddev = adev->ddev;
|
struct drm_device *ddev = adev_to_drm(adev);
|
||||||
struct amdgpu_display_manager *dm = &adev->dm;
|
struct amdgpu_display_manager *dm = &adev->dm;
|
||||||
struct amdgpu_dm_connector *aconnector;
|
struct amdgpu_dm_connector *aconnector;
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
|
@ -2443,7 +2443,7 @@ static void handle_hpd_rx_irq(void *param)
|
||||||
|
|
||||||
static void register_hpd_handlers(struct amdgpu_device *adev)
|
static void register_hpd_handlers(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev_to_drm(adev);
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct amdgpu_dm_connector *aconnector;
|
struct amdgpu_dm_connector *aconnector;
|
||||||
const struct dc_link *dc_link;
|
const struct dc_link *dc_link;
|
||||||
|
@ -2874,18 +2874,18 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
|
||||||
|
|
||||||
adev->mode_info.mode_config_initialized = true;
|
adev->mode_info.mode_config_initialized = true;
|
||||||
|
|
||||||
adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
|
adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
|
||||||
adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
|
adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
|
||||||
|
|
||||||
adev->ddev->mode_config.max_width = 16384;
|
adev_to_drm(adev)->mode_config.max_width = 16384;
|
||||||
adev->ddev->mode_config.max_height = 16384;
|
adev_to_drm(adev)->mode_config.max_height = 16384;
|
||||||
|
|
||||||
adev->ddev->mode_config.preferred_depth = 24;
|
adev_to_drm(adev)->mode_config.preferred_depth = 24;
|
||||||
adev->ddev->mode_config.prefer_shadow = 1;
|
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
|
||||||
/* indicates support for immediate flip */
|
/* indicates support for immediate flip */
|
||||||
adev->ddev->mode_config.async_page_flip = true;
|
adev_to_drm(adev)->mode_config.async_page_flip = true;
|
||||||
|
|
||||||
adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
|
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
|
||||||
|
|
||||||
state = kzalloc(sizeof(*state), GFP_KERNEL);
|
state = kzalloc(sizeof(*state), GFP_KERNEL);
|
||||||
if (!state)
|
if (!state)
|
||||||
|
@ -2899,7 +2899,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
|
||||||
|
|
||||||
dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
|
dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
|
||||||
|
|
||||||
drm_atomic_private_obj_init(adev->ddev,
|
drm_atomic_private_obj_init(adev_to_drm(adev),
|
||||||
&adev->dm.atomic_obj,
|
&adev->dm.atomic_obj,
|
||||||
&state->base,
|
&state->base,
|
||||||
&dm_atomic_state_funcs);
|
&dm_atomic_state_funcs);
|
||||||
|
@ -3065,13 +3065,13 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
|
||||||
props.type = BACKLIGHT_RAW;
|
props.type = BACKLIGHT_RAW;
|
||||||
|
|
||||||
snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
|
snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
|
||||||
dm->adev->ddev->primary->index);
|
adev_to_drm(dm->adev)->primary->index);
|
||||||
|
|
||||||
dm->backlight_dev = backlight_device_register(bl_name,
|
dm->backlight_dev = backlight_device_register(bl_name,
|
||||||
dm->adev->ddev->dev,
|
adev_to_drm(dm->adev)->dev,
|
||||||
dm,
|
dm,
|
||||||
&amdgpu_dm_backlight_ops,
|
&amdgpu_dm_backlight_ops,
|
||||||
&props);
|
&props);
|
||||||
|
|
||||||
if (IS_ERR(dm->backlight_dev))
|
if (IS_ERR(dm->backlight_dev))
|
||||||
DRM_ERROR("DM: Backlight registration failed!\n");
|
DRM_ERROR("DM: Backlight registration failed!\n");
|
||||||
|
@ -3395,7 +3395,7 @@ static ssize_t s3_debug_store(struct device *device,
|
||||||
if (ret == 0) {
|
if (ret == 0) {
|
||||||
if (s3_state) {
|
if (s3_state) {
|
||||||
dm_resume(adev);
|
dm_resume(adev);
|
||||||
drm_kms_helper_hotplug_event(adev->ddev);
|
drm_kms_helper_hotplug_event(adev_to_drm(adev));
|
||||||
} else
|
} else
|
||||||
dm_suspend(adev);
|
dm_suspend(adev);
|
||||||
}
|
}
|
||||||
|
@ -3522,7 +3522,7 @@ static int dm_early_init(void *handle)
|
||||||
*/
|
*/
|
||||||
#if defined(CONFIG_DEBUG_KERNEL_DC)
|
#if defined(CONFIG_DEBUG_KERNEL_DC)
|
||||||
device_create_file(
|
device_create_file(
|
||||||
adev->ddev->dev,
|
adev_to_drm(adev)->dev,
|
||||||
&dev_attr_s3_debug);
|
&dev_attr_s3_debug);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -5986,7 +5986,7 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
|
||||||
num_formats = get_plane_formats(plane, plane_cap, formats,
|
num_formats = get_plane_formats(plane, plane_cap, formats,
|
||||||
ARRAY_SIZE(formats));
|
ARRAY_SIZE(formats));
|
||||||
|
|
||||||
res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
|
res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
|
||||||
&dm_plane_funcs, formats, num_formats,
|
&dm_plane_funcs, formats, num_formats,
|
||||||
NULL, plane->type, NULL);
|
NULL, plane->type, NULL);
|
||||||
if (res)
|
if (res)
|
||||||
|
@ -6830,7 +6830,7 @@ static void update_freesync_state_on_stream(
|
||||||
if (!new_stream->timing.h_total || !new_stream->timing.v_total)
|
if (!new_stream->timing.h_total || !new_stream->timing.v_total)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock_irqsave(&adev->ddev->event_lock, flags);
|
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
|
||||||
vrr_params = new_crtc_state->vrr_params;
|
vrr_params = new_crtc_state->vrr_params;
|
||||||
|
|
||||||
if (surface) {
|
if (surface) {
|
||||||
|
@ -6883,7 +6883,7 @@ static void update_freesync_state_on_stream(
|
||||||
(int)new_crtc_state->base.vrr_enabled,
|
(int)new_crtc_state->base.vrr_enabled,
|
||||||
(int)vrr_params.state);
|
(int)vrr_params.state);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
|
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pre_update_freesync_state_on_stream(
|
static void pre_update_freesync_state_on_stream(
|
||||||
|
@ -6906,7 +6906,7 @@ static void pre_update_freesync_state_on_stream(
|
||||||
if (!new_stream->timing.h_total || !new_stream->timing.v_total)
|
if (!new_stream->timing.h_total || !new_stream->timing.v_total)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock_irqsave(&adev->ddev->event_lock, flags);
|
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
|
||||||
vrr_params = new_crtc_state->vrr_params;
|
vrr_params = new_crtc_state->vrr_params;
|
||||||
|
|
||||||
if (new_crtc_state->vrr_supported &&
|
if (new_crtc_state->vrr_supported &&
|
||||||
|
@ -6929,7 +6929,7 @@ static void pre_update_freesync_state_on_stream(
|
||||||
sizeof(vrr_params.adjust)) != 0);
|
sizeof(vrr_params.adjust)) != 0);
|
||||||
|
|
||||||
new_crtc_state->vrr_params = vrr_params;
|
new_crtc_state->vrr_params = vrr_params;
|
||||||
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
|
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
|
static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
|
||||||
|
@ -7741,7 +7741,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||||
* send vblank event on all events not handled in flip and
|
* send vblank event on all events not handled in flip and
|
||||||
* mark consumed event for drm_atomic_helper_commit_hw_done
|
* mark consumed event for drm_atomic_helper_commit_hw_done
|
||||||
*/
|
*/
|
||||||
spin_lock_irqsave(&adev->ddev->event_lock, flags);
|
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
|
||||||
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
|
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
|
||||||
|
|
||||||
if (new_crtc_state->event)
|
if (new_crtc_state->event)
|
||||||
|
@ -7749,7 +7749,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||||
|
|
||||||
new_crtc_state->event = NULL;
|
new_crtc_state->event = NULL;
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
|
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
|
||||||
|
|
||||||
/* Signal HW programming completion */
|
/* Signal HW programming completion */
|
||||||
drm_atomic_helper_commit_hw_done(state);
|
drm_atomic_helper_commit_hw_done(state);
|
||||||
|
|
|
@ -2194,7 +2194,7 @@ static int force_timing_sync_set(void *data, u64 val)
|
||||||
|
|
||||||
adev->dm.force_timing_sync = (bool)val;
|
adev->dm.force_timing_sync = (bool)val;
|
||||||
|
|
||||||
amdgpu_dm_trigger_timing_sync(adev->ddev);
|
amdgpu_dm_trigger_timing_sync(adev_to_drm(adev));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -2253,7 +2253,7 @@ int dtn_debugfs_init(struct amdgpu_device *adev)
|
||||||
.llseek = default_llseek
|
.llseek = default_llseek
|
||||||
};
|
};
|
||||||
|
|
||||||
struct drm_minor *minor = adev->ddev->primary;
|
struct drm_minor *minor = adev_to_drm(adev)->primary;
|
||||||
struct dentry *root = minor->debugfs_root;
|
struct dentry *root = minor->debugfs_root;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
|
|
@ -719,7 +719,7 @@ void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
|
||||||
*/
|
*/
|
||||||
void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
|
void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev_to_drm(adev);
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
struct drm_connector_list_iter iter;
|
||||||
|
|
||||||
|
@ -755,7 +755,7 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
|
||||||
*/
|
*/
|
||||||
void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
|
void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev_to_drm(adev);
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct drm_connector_list_iter iter;
|
struct drm_connector_list_iter iter;
|
||||||
|
|
||||||
|
|
|
@ -310,7 +310,7 @@ static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
|
||||||
void
|
void
|
||||||
dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev)
|
dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev_to_drm(adev);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < adev->dm.display_indexes_num; i++) {
|
for (i = 0; i < adev->dm.display_indexes_num; i++) {
|
||||||
|
@ -426,7 +426,7 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
|
||||||
aconnector->mst_mgr.cbs = &dm_mst_cbs;
|
aconnector->mst_mgr.cbs = &dm_mst_cbs;
|
||||||
drm_dp_mst_topology_mgr_init(
|
drm_dp_mst_topology_mgr_init(
|
||||||
&aconnector->mst_mgr,
|
&aconnector->mst_mgr,
|
||||||
dm->adev->ddev,
|
adev_to_drm(dm->adev),
|
||||||
&aconnector->dm_dp_aux.aux,
|
&aconnector->dm_dp_aux.aux,
|
||||||
16,
|
16,
|
||||||
4,
|
4,
|
||||||
|
|
|
@ -122,7 +122,7 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
|
||||||
|
|
||||||
void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
|
void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct drm_device *ddev = adev->ddev;
|
struct drm_device *ddev = adev_to_drm(adev);
|
||||||
struct drm_crtc *crtc;
|
struct drm_crtc *crtc;
|
||||||
struct amdgpu_crtc *amdgpu_crtc;
|
struct amdgpu_crtc *amdgpu_crtc;
|
||||||
|
|
||||||
|
@ -143,7 +143,7 @@ void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
|
||||||
|
|
||||||
u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
|
u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev_to_drm(adev);
|
||||||
struct drm_crtc *crtc;
|
struct drm_crtc *crtc;
|
||||||
struct amdgpu_crtc *amdgpu_crtc;
|
struct amdgpu_crtc *amdgpu_crtc;
|
||||||
u32 vblank_in_pixels;
|
u32 vblank_in_pixels;
|
||||||
|
@ -170,7 +170,7 @@ u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
|
||||||
|
|
||||||
u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
|
u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = adev->ddev;
|
struct drm_device *dev = adev_to_drm(adev);
|
||||||
struct drm_crtc *crtc;
|
struct drm_crtc *crtc;
|
||||||
struct amdgpu_crtc *amdgpu_crtc;
|
struct amdgpu_crtc *amdgpu_crtc;
|
||||||
u32 vrefresh = 0;
|
u32 vrefresh = 0;
|
||||||
|
|
|
@ -1903,7 +1903,7 @@ static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
|
||||||
struct amdgpu_device *adev = drm_to_adev(ddev);
|
struct amdgpu_device *adev = drm_to_adev(ddev);
|
||||||
|
|
||||||
return snprintf(buf, PAGE_SIZE, "%s: thermal throttling logging %s, with interval %d seconds\n",
|
return snprintf(buf, PAGE_SIZE, "%s: thermal throttling logging %s, with interval %d seconds\n",
|
||||||
adev->ddev->unique,
|
adev_to_drm(adev)->unique,
|
||||||
atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
|
atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
|
||||||
adev->throttling_logging_rs.interval / HZ + 1);
|
adev->throttling_logging_rs.interval / HZ + 1);
|
||||||
}
|
}
|
||||||
|
@ -2199,9 +2199,9 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
|
||||||
if (channel >= PP_TEMP_MAX)
|
if (channel >= PP_TEMP_MAX)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
r = pm_runtime_get_sync(adev->ddev->dev);
|
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2226,8 +2226,8 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
|
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
@ -2332,9 +2332,9 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
|
||||||
if (amdgpu_in_reset(adev))
|
if (amdgpu_in_reset(adev))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
ret = pm_runtime_get_sync(adev->ddev->dev);
|
ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2342,16 +2342,16 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
|
||||||
pwm_mode = smu_get_fan_control_mode(&adev->smu);
|
pwm_mode = smu_get_fan_control_mode(&adev->smu);
|
||||||
} else {
|
} else {
|
||||||
if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
|
if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
|
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
|
||||||
}
|
}
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
|
|
||||||
return sprintf(buf, "%i\n", pwm_mode);
|
return sprintf(buf, "%i\n", pwm_mode);
|
||||||
}
|
}
|
||||||
|
@ -2372,9 +2372,9 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
ret = pm_runtime_get_sync(adev->ddev->dev);
|
ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2382,16 +2382,16 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
|
||||||
smu_set_fan_control_mode(&adev->smu, value);
|
smu_set_fan_control_mode(&adev->smu, value);
|
||||||
} else {
|
} else {
|
||||||
if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
|
if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
amdgpu_dpm_set_fan_control_mode(adev, value);
|
amdgpu_dpm_set_fan_control_mode(adev, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
@ -2422,9 +2422,9 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
|
||||||
if (amdgpu_in_reset(adev))
|
if (amdgpu_in_reset(adev))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
err = pm_runtime_get_sync(adev->ddev->dev);
|
err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2435,15 +2435,15 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
|
||||||
|
|
||||||
if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
|
if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
|
||||||
pr_info("manual fan speed control should be enabled first\n");
|
pr_info("manual fan speed control should be enabled first\n");
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = kstrtou32(buf, 10, &value);
|
err = kstrtou32(buf, 10, &value);
|
||||||
if (err) {
|
if (err) {
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2456,8 +2456,8 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
|
||||||
else
|
else
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
@ -2476,9 +2476,9 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
|
||||||
if (amdgpu_in_reset(adev))
|
if (amdgpu_in_reset(adev))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
err = pm_runtime_get_sync(adev->ddev->dev);
|
err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2489,8 +2489,8 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
|
||||||
else
|
else
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
@ -2511,9 +2511,9 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
|
||||||
if (amdgpu_in_reset(adev))
|
if (amdgpu_in_reset(adev))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
err = pm_runtime_get_sync(adev->ddev->dev);
|
err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2524,8 +2524,8 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
|
||||||
else
|
else
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
@ -2545,17 +2545,17 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
|
||||||
if (amdgpu_in_reset(adev))
|
if (amdgpu_in_reset(adev))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
r = pm_runtime_get_sync(adev->ddev->dev);
|
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
|
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
|
||||||
(void *)&min_rpm, &size);
|
(void *)&min_rpm, &size);
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
|
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
@ -2575,17 +2575,17 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
|
||||||
if (amdgpu_in_reset(adev))
|
if (amdgpu_in_reset(adev))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
r = pm_runtime_get_sync(adev->ddev->dev);
|
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
|
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
|
||||||
(void *)&max_rpm, &size);
|
(void *)&max_rpm, &size);
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
|
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
@ -2604,9 +2604,9 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
|
||||||
if (amdgpu_in_reset(adev))
|
if (amdgpu_in_reset(adev))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
err = pm_runtime_get_sync(adev->ddev->dev);
|
err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2617,8 +2617,8 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
|
||||||
else
|
else
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
@ -2638,9 +2638,9 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
|
||||||
if (amdgpu_in_reset(adev))
|
if (amdgpu_in_reset(adev))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
err = pm_runtime_get_sync(adev->ddev->dev);
|
err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2650,15 +2650,15 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
|
||||||
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
|
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
|
||||||
|
|
||||||
if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
|
if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return -ENODATA;
|
return -ENODATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = kstrtou32(buf, 10, &value);
|
err = kstrtou32(buf, 10, &value);
|
||||||
if (err) {
|
if (err) {
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2669,8 +2669,8 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
|
||||||
else
|
else
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
@ -2689,9 +2689,9 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
|
||||||
if (amdgpu_in_reset(adev))
|
if (amdgpu_in_reset(adev))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
ret = pm_runtime_get_sync(adev->ddev->dev);
|
ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2699,16 +2699,16 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
|
||||||
pwm_mode = smu_get_fan_control_mode(&adev->smu);
|
pwm_mode = smu_get_fan_control_mode(&adev->smu);
|
||||||
} else {
|
} else {
|
||||||
if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
|
if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
|
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
|
||||||
}
|
}
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
|
|
||||||
return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
|
return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
|
||||||
}
|
}
|
||||||
|
@ -2737,9 +2737,9 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
|
||||||
else
|
else
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
err = pm_runtime_get_sync(adev->ddev->dev);
|
err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2747,15 +2747,15 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
|
||||||
smu_set_fan_control_mode(&adev->smu, pwm_mode);
|
smu_set_fan_control_mode(&adev->smu, pwm_mode);
|
||||||
} else {
|
} else {
|
||||||
if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
|
if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
|
amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
|
||||||
}
|
}
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
@ -2771,9 +2771,9 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
|
||||||
if (amdgpu_in_reset(adev))
|
if (amdgpu_in_reset(adev))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
r = pm_runtime_get_sync(adev->ddev->dev);
|
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2781,8 +2781,8 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
|
||||||
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
|
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
|
||||||
(void *)&vddgfx, &size);
|
(void *)&vddgfx, &size);
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
|
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
@ -2812,9 +2812,9 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
|
||||||
if (!(adev->flags & AMD_IS_APU))
|
if (!(adev->flags & AMD_IS_APU))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
r = pm_runtime_get_sync(adev->ddev->dev);
|
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2822,8 +2822,8 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
|
||||||
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
|
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
|
||||||
(void *)&vddnb, &size);
|
(void *)&vddnb, &size);
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
|
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
@ -2850,9 +2850,9 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
|
||||||
if (amdgpu_in_reset(adev))
|
if (amdgpu_in_reset(adev))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
r = pm_runtime_get_sync(adev->ddev->dev);
|
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2860,8 +2860,8 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
|
||||||
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
|
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
|
||||||
(void *)&query, &size);
|
(void *)&query, &size);
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
|
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
@ -2891,9 +2891,9 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
|
||||||
if (amdgpu_in_reset(adev))
|
if (amdgpu_in_reset(adev))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
r = pm_runtime_get_sync(adev->ddev->dev);
|
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2907,8 +2907,8 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
|
||||||
size = snprintf(buf, PAGE_SIZE, "\n");
|
size = snprintf(buf, PAGE_SIZE, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
|
|
||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
|
@ -2925,9 +2925,9 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
|
||||||
if (amdgpu_in_reset(adev))
|
if (amdgpu_in_reset(adev))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
r = pm_runtime_get_sync(adev->ddev->dev);
|
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2941,8 +2941,8 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
|
||||||
size = snprintf(buf, PAGE_SIZE, "\n");
|
size = snprintf(buf, PAGE_SIZE, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
|
|
||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
|
@ -2970,9 +2970,9 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
|
||||||
value = value / 1000000; /* convert to Watt */
|
value = value / 1000000; /* convert to Watt */
|
||||||
|
|
||||||
|
|
||||||
err = pm_runtime_get_sync(adev->ddev->dev);
|
err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2983,8 +2983,8 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
|
||||||
else
|
else
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
@ -3003,9 +3003,9 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
|
||||||
if (amdgpu_in_reset(adev))
|
if (amdgpu_in_reset(adev))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
r = pm_runtime_get_sync(adev->ddev->dev);
|
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3013,8 +3013,8 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
|
||||||
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
|
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
|
||||||
(void *)&sclk, &size);
|
(void *)&sclk, &size);
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
|
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
@ -3040,9 +3040,9 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
|
||||||
if (amdgpu_in_reset(adev))
|
if (amdgpu_in_reset(adev))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
r = pm_runtime_get_sync(adev->ddev->dev);
|
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3050,8 +3050,8 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
|
||||||
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
|
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
|
||||||
(void *)&mclk, &size);
|
(void *)&mclk, &size);
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||||
|
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
Loading…
Reference in New Issue