drm fixes for 5.3-rc4
core: - mode parser strncpy fix i915: - GLK DSI escape clock setting - HDCP memleak fix tegra: - one gpiod/of regression fix amdgpu: - Fixes VCN to handle the latest navi10 firmware - Fixes for fan control on navi10 - Properly handle SMU metrics table on navi10 - Fix a resume regression on Stoney - kfd revert a GWS ioctl vmwgfx: - memory leak fix rockchip: - suspend fix -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJdTQ/bAAoJEAx081l5xIa+YAUP/i0sQUbM41u/CijX14TRSEOt bdv6yIGsCQ4B5qu2LTm/53XzmQzh8ENsN9zDoQYbqerrdRjtcZ1h9Iux/e5a2mzo +apVXfBo4QFvn7IelTjg/7MAyK7PO9vZmSEC4/xUxetz/LxgTrED3JWUGLUK5SbR 6zBupVPJmsizBWAOhwQFlwoBpDYbVfL5sJMq7NNBCjwrmgJEXsElZC4ZHFaIgXyh MtWfJtVHJ9211q8gSVoAbjlFyuinn5qejCbx0Bo83CrZN6LNWu6tCaajsWa7QGGo arxT1xlU2bMI5MpfNfb+jM3rDycR5vB3uO597S3M3gkVrN05ZiG2AhpX4r9ohMYB qxGR0rLcu7P7CE8V4fDQWjIeaK0fV24waEYNMa0cUA+iilpt43/0DEKBpIR5oL74 nf445bmDSd4JazgxQXExUy+7szoWHJyqVhkpHnLKi1WdgOR2ZEZpQMnZGmg60RfE QbgpRlszl/i0tBolMvC4Dgha+fyXjdUMEDgXb8lM7BZZFJ+Ew4Oxxq1V7Zy2+sub XOrWp7oE6tucwd6XabGKDUakhDnxGp2taIXOMu+2K8b/H38sJdlirecrzAQdQfix YB5FQcu/VqmQ2IdHU2bmXUm0lbd6e4s23JC4uRcnQFelVcrOey9GooauDX7OBgaj EBMoR1qdJPjFHeQSuYda =LjgJ -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2019-08-09' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "Usual fixes roundup. Nothing too crazy or serious, one non-released ioctl is removed in the amdkfd driver. core: - mode parser strncpy fix i915: - GLK DSI escape clock setting - HDCP memleak fix tegra: - one gpiod/of regression fix amdgpu: - fix VCN to handle the latest navi10 firmware - fix for fan control on navi10 - properly handle SMU metrics table on navi10 - fix a resume regression on Stoney - kfd revert a GWS ioctl vmwgfx: - memory leak fix rockchip: - suspend fix" * tag 'drm-fixes-2019-08-09' of git://anongit.freedesktop.org/drm/drm: drm/vmwgfx: fix memory leak when too many retries have occurred Revert "drm/amdkfd: New IOCTL to allocate queue GWS" Revert "drm/amdgpu: fix transform feedback GDS hang on gfx10 (v2)" drm/amdgpu: pin the csb buffer on hw init for gfx v8 drm/rockchip: Suspend DP late drm/i915: Fix wrong escape clock divisor init for GLK drm/i915: fix possible memory leak in intel_hdcp_auth_downstream() drm/modes: Fix unterminated strncpy drm/amd/powerplay: correct navi10 vcn powergate drm/amd/powerplay: honor hw limit on fetching metrics data for navi10 drm/amd/powerplay: Allow changing of fan_control in smu_v11_0 drm/amd/amdgpu/vcn_v2_0: Move VCN 2.0 specific dec ring test to vcn_v2_0 drm/amd/amdgpu/vcn_v2_0: Mark RB commands as KMD commands drm/tegra: Fix gpiod_get_from_of_node() regression
This commit is contained in:
commit
2226fb57a9
|
@ -32,7 +32,6 @@ struct amdgpu_gds {
|
|||
uint32_t gws_size;
|
||||
uint32_t oa_size;
|
||||
uint32_t gds_compute_max_wave_id;
|
||||
uint32_t vgt_gs_max_wave_id;
|
||||
};
|
||||
|
||||
struct amdgpu_gds_reg_offset {
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#define AMDGPU_VCN_FIRMWARE_OFFSET 256
|
||||
#define AMDGPU_VCN_MAX_ENC_RINGS 3
|
||||
|
||||
#define VCN_DEC_KMD_CMD 0x80000000
|
||||
#define VCN_DEC_CMD_FENCE 0x00000000
|
||||
#define VCN_DEC_CMD_TRAP 0x00000001
|
||||
#define VCN_DEC_CMD_WRITE_REG 0x00000004
|
||||
|
|
|
@ -4206,15 +4206,6 @@ static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
|||
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||
u32 header, control = 0;
|
||||
|
||||
/* Prevent a hw deadlock due to a wave ID mismatch between ME and GDS.
|
||||
* This resets the wave ID counters. (needed by transform feedback)
|
||||
* TODO: This might only be needed on a VMID switch when we change
|
||||
* the GDS OA mapping, not sure.
|
||||
*/
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
amdgpu_ring_write(ring, mmVGT_GS_MAX_WAVE_ID);
|
||||
amdgpu_ring_write(ring, ring->adev->gds.vgt_gs_max_wave_id);
|
||||
|
||||
if (ib->flags & AMDGPU_IB_FLAG_CE)
|
||||
header = PACKET3(PACKET3_INDIRECT_BUFFER_CNST, 2);
|
||||
else
|
||||
|
@ -4961,7 +4952,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
|
|||
5 + /* HDP_INVL */
|
||||
8 + 8 + /* FENCE x2 */
|
||||
2, /* SWITCH_BUFFER */
|
||||
.emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_gfx */
|
||||
.emit_ib_size = 4, /* gfx_v10_0_ring_emit_ib_gfx */
|
||||
.emit_ib = gfx_v10_0_ring_emit_ib_gfx,
|
||||
.emit_fence = gfx_v10_0_ring_emit_fence,
|
||||
.emit_pipeline_sync = gfx_v10_0_ring_emit_pipeline_sync,
|
||||
|
@ -5112,7 +5103,6 @@ static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev)
|
|||
default:
|
||||
adev->gds.gds_size = 0x10000;
|
||||
adev->gds.gds_compute_max_wave_id = 0x4ff;
|
||||
adev->gds.vgt_gs_max_wave_id = 0x3ff;
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -1321,6 +1321,39 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int gfx_v8_0_csb_vram_pin(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
|
||||
r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
|
||||
AMDGPU_GEM_DOMAIN_VRAM);
|
||||
if (!r)
|
||||
adev->gfx.rlc.clear_state_gpu_addr =
|
||||
amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
|
||||
|
||||
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static void gfx_v8_0_csb_vram_unpin(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (!adev->gfx.rlc.clear_state_obj)
|
||||
return;
|
||||
|
||||
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
|
||||
if (likely(r == 0)) {
|
||||
amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
|
||||
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
|
||||
}
|
||||
}
|
||||
|
||||
static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
|
||||
|
@ -4785,6 +4818,10 @@ static int gfx_v8_0_hw_init(void *handle)
|
|||
gfx_v8_0_init_golden_registers(adev);
|
||||
gfx_v8_0_constants_init(adev);
|
||||
|
||||
r = gfx_v8_0_csb_vram_pin(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = adev->gfx.rlc.funcs->resume(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -4901,6 +4938,9 @@ static int gfx_v8_0_hw_fini(void *handle)
|
|||
else
|
||||
pr_err("rlc is busy, skip halt rlc\n");
|
||||
amdgpu_gfx_rlc_exit_safe_mode(adev);
|
||||
|
||||
gfx_v8_0_csb_vram_unpin(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1485,7 +1485,7 @@ static void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring)
|
|||
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET, 0));
|
||||
amdgpu_ring_write(ring, 0);
|
||||
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
|
||||
amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_START << 1);
|
||||
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1498,7 +1498,7 @@ static void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring)
|
|||
static void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring)
|
||||
{
|
||||
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
|
||||
amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1);
|
||||
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_END << 1));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1543,7 +1543,7 @@ static void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64
|
|||
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
|
||||
|
||||
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
|
||||
amdgpu_ring_write(ring, VCN_DEC_CMD_FENCE << 1);
|
||||
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_FENCE << 1));
|
||||
|
||||
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET, 0));
|
||||
amdgpu_ring_write(ring, 0);
|
||||
|
@ -1553,7 +1553,7 @@ static void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64
|
|||
|
||||
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
|
||||
|
||||
amdgpu_ring_write(ring, VCN_DEC_CMD_TRAP << 1);
|
||||
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_TRAP << 1));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1597,7 +1597,7 @@ static void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring,
|
|||
|
||||
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
|
||||
|
||||
amdgpu_ring_write(ring, VCN_DEC_CMD_REG_READ_COND_WAIT << 1);
|
||||
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_REG_READ_COND_WAIT << 1));
|
||||
}
|
||||
|
||||
static void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
|
@ -1626,7 +1626,7 @@ static void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
|
|||
|
||||
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
|
||||
|
||||
amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1);
|
||||
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_WRITE_REG << 1));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2079,6 +2079,36 @@ static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
uint32_t tmp = 0;
|
||||
unsigned i;
|
||||
int r;
|
||||
|
||||
WREG32(adev->vcn.external.scratch9, 0xCAFEDEAD);
|
||||
r = amdgpu_ring_alloc(ring, 4);
|
||||
if (r)
|
||||
return r;
|
||||
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0));
|
||||
amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
|
||||
amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
|
||||
amdgpu_ring_write(ring, 0xDEADBEEF);
|
||||
amdgpu_ring_commit(ring);
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
tmp = RREG32(adev->vcn.external.scratch9);
|
||||
if (tmp == 0xDEADBEEF)
|
||||
break;
|
||||
DRM_UDELAY(1);
|
||||
}
|
||||
|
||||
if (i >= adev->usec_timeout)
|
||||
r = -ETIMEDOUT;
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
||||
static int vcn_v2_0_set_powergating_state(void *handle,
|
||||
enum amd_powergating_state state)
|
||||
{
|
||||
|
@ -2142,7 +2172,7 @@ static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
|
|||
.emit_ib = vcn_v2_0_dec_ring_emit_ib,
|
||||
.emit_fence = vcn_v2_0_dec_ring_emit_fence,
|
||||
.emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
|
||||
.test_ring = amdgpu_vcn_dec_ring_test_ring,
|
||||
.test_ring = vcn_v2_0_dec_ring_test_ring,
|
||||
.test_ib = amdgpu_vcn_dec_ring_test_ib,
|
||||
.insert_nop = vcn_v2_0_dec_ring_insert_nop,
|
||||
.insert_start = vcn_v2_0_dec_ring_insert_start,
|
||||
|
|
|
@ -1567,32 +1567,6 @@ copy_from_user_failed:
|
|||
return err;
|
||||
}
|
||||
|
||||
static int kfd_ioctl_alloc_queue_gws(struct file *filep,
|
||||
struct kfd_process *p, void *data)
|
||||
{
|
||||
int retval;
|
||||
struct kfd_ioctl_alloc_queue_gws_args *args = data;
|
||||
struct kfd_dev *dev;
|
||||
|
||||
if (!hws_gws_support)
|
||||
return -ENODEV;
|
||||
|
||||
dev = kfd_device_by_id(args->gpu_id);
|
||||
if (!dev) {
|
||||
pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
|
||||
return -ENODEV;
|
||||
}
|
||||
if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
|
||||
return -ENODEV;
|
||||
|
||||
mutex_lock(&p->mutex);
|
||||
retval = pqm_set_gws(&p->pqm, args->queue_id, args->num_gws ? dev->gws : NULL);
|
||||
mutex_unlock(&p->mutex);
|
||||
|
||||
args->first_gws = 0;
|
||||
return retval;
|
||||
}
|
||||
|
||||
static int kfd_ioctl_get_dmabuf_info(struct file *filep,
|
||||
struct kfd_process *p, void *data)
|
||||
{
|
||||
|
@ -1795,8 +1769,6 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
|
|||
AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF,
|
||||
kfd_ioctl_import_dmabuf, 0),
|
||||
|
||||
AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_QUEUE_GWS,
|
||||
kfd_ioctl_alloc_queue_gws, 0),
|
||||
};
|
||||
|
||||
#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
|
||||
|
|
|
@ -315,6 +315,8 @@ int smu_get_power_num_states(struct smu_context *smu,
|
|||
int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
|
||||
void *data, uint32_t *size)
|
||||
{
|
||||
struct smu_power_context *smu_power = &smu->smu_power;
|
||||
struct smu_power_gate *power_gate = &smu_power->power_gate;
|
||||
int ret = 0;
|
||||
|
||||
switch (sensor) {
|
||||
|
@ -339,7 +341,7 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
|
|||
*size = 4;
|
||||
break;
|
||||
case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
|
||||
*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT) ? 1 : 0;
|
||||
*(uint32_t *)data = power_gate->vcn_gated ? 0 : 1;
|
||||
*size = 4;
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -451,6 +451,7 @@ struct smu_dpm_context {
|
|||
struct smu_power_gate {
|
||||
bool uvd_gated;
|
||||
bool vce_gated;
|
||||
bool vcn_gated;
|
||||
};
|
||||
|
||||
struct smu_power_context {
|
||||
|
|
|
@ -502,6 +502,8 @@ static int navi10_store_powerplay_table(struct smu_context *smu)
|
|||
|
||||
static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables)
|
||||
{
|
||||
struct smu_table_context *smu_table = &smu->smu_table;
|
||||
|
||||
SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
|
||||
|
@ -516,9 +518,35 @@ static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables)
|
|||
sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM);
|
||||
|
||||
smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
|
||||
if (!smu_table->metrics_table)
|
||||
return -ENOMEM;
|
||||
smu_table->metrics_time = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int navi10_get_metrics_table(struct smu_context *smu,
|
||||
SmuMetrics_t *metrics_table)
|
||||
{
|
||||
struct smu_table_context *smu_table= &smu->smu_table;
|
||||
int ret = 0;
|
||||
|
||||
if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) {
|
||||
ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
|
||||
(void *)smu_table->metrics_table, false);
|
||||
if (ret) {
|
||||
pr_info("Failed to export SMU metrics table!\n");
|
||||
return ret;
|
||||
}
|
||||
smu_table->metrics_time = jiffies;
|
||||
}
|
||||
|
||||
memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int navi10_allocate_dpm_context(struct smu_context *smu)
|
||||
{
|
||||
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
|
||||
|
@ -577,20 +605,27 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
|
|||
|
||||
static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
|
||||
{
|
||||
struct smu_power_context *smu_power = &smu->smu_power;
|
||||
struct smu_power_gate *power_gate = &smu_power->power_gate;
|
||||
int ret = 0;
|
||||
|
||||
if (enable) {
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
/* vcn dpm on is a prerequisite for vcn power gate messages */
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
|
||||
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
power_gate->vcn_gated = false;
|
||||
} else {
|
||||
ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
|
||||
ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
power_gate->vcn_gated = true;
|
||||
}
|
||||
|
||||
ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, enable);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -598,15 +633,10 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
|
|||
enum smu_clk_type clk_type,
|
||||
uint32_t *value)
|
||||
{
|
||||
static SmuMetrics_t metrics;
|
||||
int ret = 0, clk_id = 0;
|
||||
SmuMetrics_t metrics;
|
||||
|
||||
if (!value)
|
||||
return -EINVAL;
|
||||
|
||||
memset(&metrics, 0, sizeof(metrics));
|
||||
|
||||
ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics, false);
|
||||
ret = navi10_get_metrics_table(smu, &metrics);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -894,8 +924,9 @@ static int navi10_get_gpu_power(struct smu_context *smu, uint32_t *value)
|
|||
if (!value)
|
||||
return -EINVAL;
|
||||
|
||||
ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics,
|
||||
false);
|
||||
ret = navi10_get_metrics_table(smu, &metrics);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -914,10 +945,7 @@ static int navi10_get_current_activity_percent(struct smu_context *smu,
|
|||
if (!value)
|
||||
return -EINVAL;
|
||||
|
||||
msleep(1);
|
||||
|
||||
ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
|
||||
(void *)&metrics, false);
|
||||
ret = navi10_get_metrics_table(smu, &metrics);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -956,10 +984,9 @@ static int navi10_get_fan_speed_rpm(struct smu_context *smu,
|
|||
if (!speed)
|
||||
return -EINVAL;
|
||||
|
||||
memset(&metrics, 0, sizeof(metrics));
|
||||
|
||||
ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
|
||||
(void *)&metrics, false);
|
||||
ret = navi10_get_metrics_table(smu, &metrics);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1307,7 +1334,7 @@ static int navi10_thermal_get_temperature(struct smu_context *smu,
|
|||
if (!value)
|
||||
return -EINVAL;
|
||||
|
||||
ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics, false);
|
||||
ret = navi10_get_metrics_table(smu, &metrics);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -1391,7 +1391,7 @@ smu_v11_0_smc_fan_control(struct smu_context *smu, bool start)
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
if (smu_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
|
||||
if (!smu_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
|
||||
return 0;
|
||||
|
||||
ret = smu_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, start);
|
||||
|
|
|
@ -1770,7 +1770,9 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
|
|||
}
|
||||
|
||||
if (named_mode) {
|
||||
strncpy(mode->name, name, mode_end);
|
||||
if (mode_end + 1 > DRM_DISPLAY_MODE_LEN)
|
||||
return false;
|
||||
strscpy(mode->name, name, mode_end + 1);
|
||||
} else {
|
||||
ret = drm_mode_parse_cmdline_res_mode(name, mode_end,
|
||||
parse_extras,
|
||||
|
|
|
@ -536,7 +536,8 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
|
|||
|
||||
if (drm_hdcp_check_ksvs_revoked(dev, ksv_fifo, num_downstream)) {
|
||||
DRM_ERROR("Revoked Ksv(s) in ksv_fifo\n");
|
||||
return -EPERM;
|
||||
ret = -EPERM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -396,8 +396,8 @@ static void glk_dsi_program_esc_clock(struct drm_device *dev,
|
|||
else
|
||||
txesc2_div = 10;
|
||||
|
||||
I915_WRITE(MIPIO_TXESC_CLK_DIV1, txesc1_div & GLK_TX_ESC_CLK_DIV1_MASK);
|
||||
I915_WRITE(MIPIO_TXESC_CLK_DIV2, txesc2_div & GLK_TX_ESC_CLK_DIV2_MASK);
|
||||
I915_WRITE(MIPIO_TXESC_CLK_DIV1, (1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK);
|
||||
I915_WRITE(MIPIO_TXESC_CLK_DIV2, (1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK);
|
||||
}
|
||||
|
||||
/* Program BXT Mipi clocks and dividers */
|
||||
|
|
|
@ -432,7 +432,7 @@ static int rockchip_dp_resume(struct device *dev)
|
|||
|
||||
static const struct dev_pm_ops rockchip_dp_pm_ops = {
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
.suspend = rockchip_dp_suspend,
|
||||
.suspend_late = rockchip_dp_suspend,
|
||||
.resume_early = rockchip_dp_resume,
|
||||
#endif
|
||||
};
|
||||
|
|
|
@ -126,8 +126,12 @@ int tegra_output_probe(struct tegra_output *output)
|
|||
"nvidia,hpd-gpio", 0,
|
||||
GPIOD_IN,
|
||||
"HDMI hotplug detect");
|
||||
if (IS_ERR(output->hpd_gpio))
|
||||
return PTR_ERR(output->hpd_gpio);
|
||||
if (IS_ERR(output->hpd_gpio)) {
|
||||
if (PTR_ERR(output->hpd_gpio) != -ENOENT)
|
||||
return PTR_ERR(output->hpd_gpio);
|
||||
|
||||
output->hpd_gpio = NULL;
|
||||
}
|
||||
|
||||
if (output->hpd_gpio) {
|
||||
err = gpiod_to_irq(output->hpd_gpio);
|
||||
|
|
|
@ -389,8 +389,10 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
|
|||
break;
|
||||
}
|
||||
|
||||
if (retries == RETRIES)
|
||||
if (retries == RETRIES) {
|
||||
kfree(reply);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*msg_len = reply_len;
|
||||
*msg = reply;
|
||||
|
|
|
@ -410,21 +410,6 @@ struct kfd_ioctl_unmap_memory_from_gpu_args {
|
|||
__u32 n_success; /* to/from KFD */
|
||||
};
|
||||
|
||||
/* Allocate GWS for specific queue
|
||||
*
|
||||
* @gpu_id: device identifier
|
||||
* @queue_id: queue's id that GWS is allocated for
|
||||
* @num_gws: how many GWS to allocate
|
||||
* @first_gws: index of the first GWS allocated.
|
||||
* only support contiguous GWS allocation
|
||||
*/
|
||||
struct kfd_ioctl_alloc_queue_gws_args {
|
||||
__u32 gpu_id; /* to KFD */
|
||||
__u32 queue_id; /* to KFD */
|
||||
__u32 num_gws; /* to KFD */
|
||||
__u32 first_gws; /* from KFD */
|
||||
};
|
||||
|
||||
struct kfd_ioctl_get_dmabuf_info_args {
|
||||
__u64 size; /* from KFD */
|
||||
__u64 metadata_ptr; /* to KFD */
|
||||
|
@ -544,10 +529,7 @@ enum kfd_mmio_remap {
|
|||
#define AMDKFD_IOC_IMPORT_DMABUF \
|
||||
AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args)
|
||||
|
||||
#define AMDKFD_IOC_ALLOC_QUEUE_GWS \
|
||||
AMDKFD_IOWR(0x1E, struct kfd_ioctl_alloc_queue_gws_args)
|
||||
|
||||
#define AMDKFD_COMMAND_START 0x01
|
||||
#define AMDKFD_COMMAND_END 0x1F
|
||||
#define AMDKFD_COMMAND_END 0x1E
|
||||
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue