drm/i915/pcode: rename sandybridge_pcode_* to snb_pcode_*
Prefer acronym-based naming to be in line with the rest of the driver. Signed-off-by: Jani Nikula <jani.nikula@intel.com> Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20220112111740.1208374-1-jani.nikula@intel.com
This commit is contained in:
parent
2616be2eac
commit
6650ebcbea
|
@ -75,10 +75,9 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
|
|||
u16 dclk;
|
||||
int ret;
|
||||
|
||||
ret = sandybridge_pcode_read(dev_priv,
|
||||
ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
|
||||
ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
|
||||
&val, &val2);
|
||||
ret = snb_pcode_read(dev_priv, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
|
||||
ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
|
||||
&val, &val2);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -102,10 +101,8 @@ static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv,
|
|||
int ret;
|
||||
int i;
|
||||
|
||||
ret = sandybridge_pcode_read(dev_priv,
|
||||
ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
|
||||
ADL_PCODE_MEM_SS_READ_PSF_GV_INFO,
|
||||
&val, NULL);
|
||||
ret = snb_pcode_read(dev_priv, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
|
||||
ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -805,8 +805,7 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
|
|||
"trying to change cdclk frequency with cdclk not enabled\n"))
|
||||
return;
|
||||
|
||||
ret = sandybridge_pcode_write(dev_priv,
|
||||
BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
|
||||
ret = snb_pcode_write(dev_priv, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
|
||||
if (ret) {
|
||||
drm_err(&dev_priv->drm,
|
||||
"failed to inform pcode about cdclk change\n");
|
||||
|
@ -834,8 +833,8 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
|
|||
LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
|
||||
drm_err(&dev_priv->drm, "Switching back to LCPLL failed\n");
|
||||
|
||||
sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
|
||||
cdclk_config->voltage_level);
|
||||
snb_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
|
||||
cdclk_config->voltage_level);
|
||||
|
||||
intel_de_write(dev_priv, CDCLK_FREQ,
|
||||
DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
|
||||
|
@ -1138,8 +1137,8 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
|
|||
intel_de_posting_read(dev_priv, CDCLK_CTL);
|
||||
|
||||
/* inform PCU of the change */
|
||||
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
|
||||
cdclk_config->voltage_level);
|
||||
snb_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
|
||||
cdclk_config->voltage_level);
|
||||
|
||||
intel_update_cdclk(dev_priv);
|
||||
}
|
||||
|
@ -1717,10 +1716,9 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
|
|||
* BSpec requires us to wait up to 150usec, but that leads to
|
||||
* timeouts; the 2ms used here is based on experiment.
|
||||
*/
|
||||
ret = sandybridge_pcode_write_timeout(dev_priv,
|
||||
HSW_PCODE_DE_WRITE_FREQ_REQ,
|
||||
0x80000000, 150, 2);
|
||||
|
||||
ret = snb_pcode_write_timeout(dev_priv,
|
||||
HSW_PCODE_DE_WRITE_FREQ_REQ,
|
||||
0x80000000, 150, 2);
|
||||
if (ret) {
|
||||
drm_err(&dev_priv->drm,
|
||||
"Failed to inform PCU about cdclk change (err %d, freq %d)\n",
|
||||
|
@ -1781,8 +1779,8 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
|
|||
intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe));
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 11) {
|
||||
ret = sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
|
||||
cdclk_config->voltage_level);
|
||||
ret = snb_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
|
||||
cdclk_config->voltage_level);
|
||||
} else {
|
||||
/*
|
||||
* The timeout isn't specified, the 2ms used here is based on
|
||||
|
@ -1790,10 +1788,10 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
|
|||
* FIXME: Waiting for the request completion could be delayed
|
||||
* until the next PCODE request based on BSpec.
|
||||
*/
|
||||
ret = sandybridge_pcode_write_timeout(dev_priv,
|
||||
HSW_PCODE_DE_WRITE_FREQ_REQ,
|
||||
cdclk_config->voltage_level,
|
||||
150, 2);
|
||||
ret = snb_pcode_write_timeout(dev_priv,
|
||||
HSW_PCODE_DE_WRITE_FREQ_REQ,
|
||||
cdclk_config->voltage_level,
|
||||
150, 2);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
|
|
|
@ -1118,8 +1118,8 @@ void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
|
|||
drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
|
||||
|
||||
if (IS_BROADWELL(dev_priv)) {
|
||||
drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
|
||||
IPS_ENABLE | IPS_PCODE_CONTROL));
|
||||
drm_WARN_ON(dev, snb_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
|
||||
IPS_ENABLE | IPS_PCODE_CONTROL));
|
||||
/* Quoting Art Runyan: "its not safe to expect any particular
|
||||
* value in IPS_CTL bit 31 after enabling IPS through the
|
||||
* mailbox." Moreover, the mailbox may return a bogus state,
|
||||
|
@ -1149,7 +1149,7 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
|
|||
|
||||
if (IS_BROADWELL(dev_priv)) {
|
||||
drm_WARN_ON(dev,
|
||||
sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
|
||||
snb_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
|
||||
/*
|
||||
* Wait for PCODE to finish disabling IPS. The BSpec specified
|
||||
* 42ms timeout value leads to occasional timeouts so use 100ms
|
||||
|
|
|
@ -683,9 +683,8 @@ static void icl_tc_cold_exit(struct drm_i915_private *i915)
|
|||
int ret, tries = 0;
|
||||
|
||||
while (1) {
|
||||
ret = sandybridge_pcode_write_timeout(i915,
|
||||
ICL_PCODE_EXIT_TCCOLD,
|
||||
0, 250, 1);
|
||||
ret = snb_pcode_write_timeout(i915, ICL_PCODE_EXIT_TCCOLD, 0,
|
||||
250, 1);
|
||||
if (ret != -EAGAIN || ++tries == 3)
|
||||
break;
|
||||
msleep(1);
|
||||
|
@ -4053,8 +4052,7 @@ tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
|
|||
* Spec states that we should timeout the request after 200us
|
||||
* but the function below will timeout after 500us
|
||||
*/
|
||||
ret = sandybridge_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val,
|
||||
&high_val);
|
||||
ret = snb_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val, &high_val);
|
||||
if (ret == 0) {
|
||||
if (block &&
|
||||
(low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
|
||||
|
@ -5469,8 +5467,7 @@ static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
|
|||
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
|
||||
{
|
||||
if (IS_HASWELL(dev_priv)) {
|
||||
if (sandybridge_pcode_write(dev_priv,
|
||||
GEN6_PCODE_WRITE_D_COMP, val))
|
||||
if (snb_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Failed to write to D_COMP\n");
|
||||
} else {
|
||||
|
|
|
@ -297,8 +297,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
|
|||
* Mailbox interface.
|
||||
*/
|
||||
if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) {
|
||||
ret = sandybridge_pcode_write(dev_priv,
|
||||
SKL_PCODE_LOAD_HDCP_KEYS, 1);
|
||||
ret = snb_pcode_write(dev_priv, SKL_PCODE_LOAD_HDCP_KEYS, 1);
|
||||
if (ret) {
|
||||
drm_err(&dev_priv->drm,
|
||||
"Failed to initiate HDCP key load (%d)\n",
|
||||
|
|
|
@ -134,8 +134,7 @@ static int gen6_drpc(struct seq_file *m)
|
|||
}
|
||||
|
||||
if (GRAPHICS_VER(i915) <= 7)
|
||||
sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS,
|
||||
&rc6vids, NULL);
|
||||
snb_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL);
|
||||
|
||||
seq_printf(m, "RC1e Enabled: %s\n",
|
||||
yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
|
||||
|
@ -557,9 +556,8 @@ static int llc_show(struct seq_file *m, void *data)
|
|||
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
|
||||
for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
|
||||
ia_freq = gpu_freq;
|
||||
sandybridge_pcode_read(i915,
|
||||
GEN6_PCODE_READ_MIN_FREQ_TABLE,
|
||||
&ia_freq, NULL);
|
||||
snb_pcode_read(i915, GEN6_PCODE_READ_MIN_FREQ_TABLE,
|
||||
&ia_freq, NULL);
|
||||
seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
|
||||
intel_gpu_freq(rps,
|
||||
(gpu_freq *
|
||||
|
|
|
@ -140,11 +140,10 @@ static void gen6_update_ring_freq(struct intel_llc *llc)
|
|||
unsigned int ia_freq, ring_freq;
|
||||
|
||||
calc_ia_freq(llc, gpu_freq, &consts, &ia_freq, &ring_freq);
|
||||
sandybridge_pcode_write(i915,
|
||||
GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
|
||||
ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
|
||||
ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
|
||||
gpu_freq);
|
||||
snb_pcode_write(i915, GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
|
||||
ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
|
||||
ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
|
||||
gpu_freq);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -261,8 +261,7 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6)
|
|||
GEN6_RC_CTL_HW_ENABLE;
|
||||
|
||||
rc6vids = 0;
|
||||
ret = sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS,
|
||||
&rc6vids, NULL);
|
||||
ret = snb_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL);
|
||||
if (GRAPHICS_VER(i915) == 6 && ret) {
|
||||
drm_dbg(&i915->drm, "Couldn't check for BIOS workaround\n");
|
||||
} else if (GRAPHICS_VER(i915) == 6 &&
|
||||
|
@ -272,7 +271,7 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6)
|
|||
GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
|
||||
rc6vids &= 0xffff00;
|
||||
rc6vids |= GEN6_ENCODE_RC6_VID(450);
|
||||
ret = sandybridge_pcode_write(i915, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
|
||||
ret = snb_pcode_write(i915, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
|
||||
if (ret)
|
||||
drm_err(&i915->drm,
|
||||
"Couldn't fix incorrect rc6 voltage\n");
|
||||
|
|
|
@ -1019,9 +1019,8 @@ static void gen6_rps_init(struct intel_rps *rps)
|
|||
IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) {
|
||||
u32 ddcc_status = 0;
|
||||
|
||||
if (sandybridge_pcode_read(i915,
|
||||
HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
|
||||
&ddcc_status, NULL) == 0)
|
||||
if (snb_pcode_read(i915, HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
|
||||
&ddcc_status, NULL) == 0)
|
||||
rps->efficient_freq =
|
||||
clamp_t(u8,
|
||||
(ddcc_status >> 8) & 0xff,
|
||||
|
@ -1869,8 +1868,7 @@ void intel_rps_init(struct intel_rps *rps)
|
|||
if (GRAPHICS_VER(i915) == 6 || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) {
|
||||
u32 params = 0;
|
||||
|
||||
sandybridge_pcode_read(i915, GEN6_READ_OC_PARAMS,
|
||||
¶ms, NULL);
|
||||
snb_pcode_read(i915, GEN6_READ_OC_PARAMS, ¶ms, NULL);
|
||||
if (params & BIT(31)) { /* OC supported */
|
||||
drm_dbg(&i915->drm,
|
||||
"Overclocking supported, max: %dMHz, overclock: %dMHz\n",
|
||||
|
|
|
@ -31,9 +31,8 @@ static int gen6_verify_ring_freq(struct intel_llc *llc)
|
|||
calc_ia_freq(llc, gpu_freq, &consts, &ia_freq, &ring_freq);
|
||||
|
||||
val = gpu_freq;
|
||||
if (sandybridge_pcode_read(i915,
|
||||
GEN6_PCODE_READ_MIN_FREQ_TABLE,
|
||||
&val, NULL)) {
|
||||
if (snb_pcode_read(i915, GEN6_PCODE_READ_MIN_FREQ_TABLE,
|
||||
&val, NULL)) {
|
||||
pr_err("Failed to read freq table[%d], range [%d, %d]\n",
|
||||
gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq);
|
||||
err = -ENXIO;
|
||||
|
|
|
@ -519,9 +519,8 @@ static void show_pcu_config(struct intel_rps *rps)
|
|||
for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
|
||||
int ia_freq = gpu_freq;
|
||||
|
||||
sandybridge_pcode_read(i915,
|
||||
GEN6_PCODE_READ_MIN_FREQ_TABLE,
|
||||
&ia_freq, NULL);
|
||||
snb_pcode_read(i915, GEN6_PCODE_READ_MIN_FREQ_TABLE,
|
||||
&ia_freq, NULL);
|
||||
|
||||
pr_info("%5d %5d %5d\n",
|
||||
gpu_freq * 50,
|
||||
|
|
|
@ -389,10 +389,8 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv)
|
|||
u32 val = 0;
|
||||
int ret;
|
||||
|
||||
ret = sandybridge_pcode_read(dev_priv,
|
||||
ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
|
||||
ICL_PCODE_MEM_SS_READ_GLOBAL_INFO,
|
||||
&val, NULL);
|
||||
ret = snb_pcode_read(dev_priv, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
|
||||
ICL_PCODE_MEM_SS_READ_GLOBAL_INFO, &val, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -51,11 +51,10 @@ static int gen7_check_mailbox_status(u32 mbox)
|
|||
}
|
||||
}
|
||||
|
||||
static int __sandybridge_pcode_rw(struct drm_i915_private *i915,
|
||||
u32 mbox, u32 *val, u32 *val1,
|
||||
int fast_timeout_us,
|
||||
int slow_timeout_ms,
|
||||
bool is_read)
|
||||
static int __snb_pcode_rw(struct drm_i915_private *i915, u32 mbox,
|
||||
u32 *val, u32 *val1,
|
||||
int fast_timeout_us, int slow_timeout_ms,
|
||||
bool is_read)
|
||||
{
|
||||
struct intel_uncore *uncore = &i915->uncore;
|
||||
|
||||
|
@ -94,15 +93,12 @@ static int __sandybridge_pcode_rw(struct drm_i915_private *i915,
|
|||
return gen6_check_mailbox_status(mbox);
|
||||
}
|
||||
|
||||
int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox,
|
||||
u32 *val, u32 *val1)
|
||||
int snb_pcode_read(struct drm_i915_private *i915, u32 mbox, u32 *val, u32 *val1)
|
||||
{
|
||||
int err;
|
||||
|
||||
mutex_lock(&i915->sb_lock);
|
||||
err = __sandybridge_pcode_rw(i915, mbox, val, val1,
|
||||
500, 20,
|
||||
true);
|
||||
err = __snb_pcode_rw(i915, mbox, val, val1, 500, 20, true);
|
||||
mutex_unlock(&i915->sb_lock);
|
||||
|
||||
if (err) {
|
||||
|
@ -114,17 +110,14 @@ int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox,
|
|||
return err;
|
||||
}
|
||||
|
||||
int sandybridge_pcode_write_timeout(struct drm_i915_private *i915,
|
||||
u32 mbox, u32 val,
|
||||
int fast_timeout_us,
|
||||
int slow_timeout_ms)
|
||||
int snb_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox, u32 val,
|
||||
int fast_timeout_us, int slow_timeout_ms)
|
||||
{
|
||||
int err;
|
||||
|
||||
mutex_lock(&i915->sb_lock);
|
||||
err = __sandybridge_pcode_rw(i915, mbox, &val, NULL,
|
||||
fast_timeout_us, slow_timeout_ms,
|
||||
false);
|
||||
err = __snb_pcode_rw(i915, mbox, &val, NULL,
|
||||
fast_timeout_us, slow_timeout_ms, false);
|
||||
mutex_unlock(&i915->sb_lock);
|
||||
|
||||
if (err) {
|
||||
|
@ -140,9 +133,7 @@ static bool skl_pcode_try_request(struct drm_i915_private *i915, u32 mbox,
|
|||
u32 request, u32 reply_mask, u32 reply,
|
||||
u32 *status)
|
||||
{
|
||||
*status = __sandybridge_pcode_rw(i915, mbox, &request, NULL,
|
||||
500, 0,
|
||||
true);
|
||||
*status = __snb_pcode_rw(i915, mbox, &request, NULL, 500, 0, true);
|
||||
|
||||
return *status || ((request & reply_mask) == reply);
|
||||
}
|
||||
|
|
|
@ -10,13 +10,11 @@
|
|||
|
||||
struct drm_i915_private;
|
||||
|
||||
int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox,
|
||||
u32 *val, u32 *val1);
|
||||
int sandybridge_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox,
|
||||
u32 val, int fast_timeout_us,
|
||||
int slow_timeout_ms);
|
||||
#define sandybridge_pcode_write(i915, mbox, val) \
|
||||
sandybridge_pcode_write_timeout(i915, mbox, val, 500, 0)
|
||||
int snb_pcode_read(struct drm_i915_private *i915, u32 mbox, u32 *val, u32 *val1);
|
||||
int snb_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox, u32 val,
|
||||
int fast_timeout_us, int slow_timeout_ms);
|
||||
#define snb_pcode_write(i915, mbox, val) \
|
||||
snb_pcode_write_timeout(i915, mbox, val, 500, 0)
|
||||
|
||||
int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
|
||||
u32 reply_mask, u32 reply, int timeout_base_ms);
|
||||
|
|
|
@ -2890,9 +2890,8 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
|
|||
|
||||
/* read the first set of memory latencies[0:3] */
|
||||
val = 0; /* data0 to be programmed to 0 for first set */
|
||||
ret = sandybridge_pcode_read(dev_priv,
|
||||
GEN9_PCODE_READ_MEM_LATENCY,
|
||||
&val, NULL);
|
||||
ret = snb_pcode_read(dev_priv, GEN9_PCODE_READ_MEM_LATENCY,
|
||||
&val, NULL);
|
||||
|
||||
if (ret) {
|
||||
drm_err(&dev_priv->drm,
|
||||
|
@ -2910,9 +2909,8 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
|
|||
|
||||
/* read the second set of memory latencies[4:7] */
|
||||
val = 1; /* data0 to be programmed to 1 for second set */
|
||||
ret = sandybridge_pcode_read(dev_priv,
|
||||
GEN9_PCODE_READ_MEM_LATENCY,
|
||||
&val, NULL);
|
||||
ret = snb_pcode_read(dev_priv, GEN9_PCODE_READ_MEM_LATENCY,
|
||||
&val, NULL);
|
||||
if (ret) {
|
||||
drm_err(&dev_priv->drm,
|
||||
"SKL Mailbox read error = %d\n", ret);
|
||||
|
@ -3702,9 +3700,9 @@ skl_setup_sagv_block_time(struct drm_i915_private *dev_priv)
|
|||
u32 val = 0;
|
||||
int ret;
|
||||
|
||||
ret = sandybridge_pcode_read(dev_priv,
|
||||
GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
|
||||
&val, NULL);
|
||||
ret = snb_pcode_read(dev_priv,
|
||||
GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
|
||||
&val, NULL);
|
||||
if (!ret) {
|
||||
dev_priv->sagv_block_time_us = val;
|
||||
return;
|
||||
|
@ -3751,8 +3749,8 @@ intel_enable_sagv(struct drm_i915_private *dev_priv)
|
|||
return 0;
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm, "Enabling SAGV\n");
|
||||
ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
|
||||
GEN9_SAGV_ENABLE);
|
||||
ret = snb_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
|
||||
GEN9_SAGV_ENABLE);
|
||||
|
||||
/* We don't need to wait for SAGV when enabling */
|
||||
|
||||
|
|
Loading…
Reference in New Issue