Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm radeon/nouveau/core fixes from Dave Airlie:
 "Mostly radeon fixes, with some nouveau bios parser, ttm fix and a fix
  for AST driver"

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (42 commits)
  drm/fb-helper: don't sleep for screen unblank when an oops is in progress
  drm, ttm Fix uninitialized warning
  drm/ttm: fix the tt_populated check in ttm_tt_destroy()
  drm/nouveau/ttm: prevent double-free in nouveau_sgdma_create_ttm() failure path
  drm/nouveau/bios/init: fix thinko in INIT_CONFIGURE_MEM
  drm/nouveau/kms: enable for non-vga pci classes
  drm/nouveau/bios/init: stub opcode 0xaa
  drm/radeon: avoid UVD corruptions on AGP cards
  drm/radeon: fix panel scaling with eDP and LVDS bridges
  drm/radeon/dpm: rework auto performance level enable
  drm/radeon: Fix hmdi typo
  drm/radeon/dpm/rs780: fix force_performance state for same sclks
  drm/radeon/dpm/rs780: don't enable sclk scaling if not required
  drm/radeon/dpm/rs780: add some sanity checking to sclk scaling
  drm/radeon/dpm/rs780: use drm_mode_vrefresh()
  drm/udl: rip out set_need_resched
  drm/ast: fix the ast open key function
  drm/radeon/dpm: add bapm callback for kb/kv
  drm/radeon/dpm: add bapm callback for trinity
  drm/radeon/dpm: add infrastructure to properly handle bapm
  ...
This commit is contained in:
Linus Torvalds 2013-09-18 21:17:44 -05:00
commit ed24fee24a
55 changed files with 837 additions and 261 deletions

View File

@ -177,7 +177,7 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast,
static inline void ast_open_key(struct ast_private *ast) static inline void ast_open_key(struct ast_private *ast)
{ {
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xA1, 0xFF, 0x04); ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x80, 0xA8);
} }
#define AST_VIDMEM_SIZE_8M 0x00800000 #define AST_VIDMEM_SIZE_8M 0x00800000

View File

@ -407,6 +407,14 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
struct drm_connector *connector; struct drm_connector *connector;
int i, j; int i, j;
/*
* fbdev->blank can be called from irq context in case of a panic.
* Since we already have our own special panic handler which will
* restore the fbdev console mode completely, just bail out early.
*/
if (oops_in_progress)
return;
/* /*
* fbdev->blank can be called from irq context in case of a panic. * fbdev->blank can be called from irq context in case of a panic.
* Since we already have our own special panic handler which will * Since we already have our own special panic handler which will

View File

@ -579,8 +579,22 @@ static void
init_reserved(struct nvbios_init *init) init_reserved(struct nvbios_init *init)
{ {
u8 opcode = nv_ro08(init->bios, init->offset); u8 opcode = nv_ro08(init->bios, init->offset);
trace("RESERVED\t0x%02x\n", opcode); u8 length, i;
init->offset += 1;
switch (opcode) {
case 0xaa:
length = 4;
break;
default:
length = 1;
break;
}
trace("RESERVED 0x%02x\t", opcode);
for (i = 1; i < length; i++)
cont(" 0x%02x", nv_ro08(init->bios, init->offset + i));
cont("\n");
init->offset += length;
} }
/** /**
@ -1437,7 +1451,7 @@ init_configure_mem(struct nvbios_init *init)
data = init_rdvgai(init, 0x03c4, 0x01); data = init_rdvgai(init, 0x03c4, 0x01);
init_wrvgai(init, 0x03c4, 0x01, data | 0x20); init_wrvgai(init, 0x03c4, 0x01, data | 0x20);
while ((addr = nv_ro32(bios, sdata)) != 0xffffffff) { for (; (addr = nv_ro32(bios, sdata)) != 0xffffffff; sdata += 4) {
switch (addr) { switch (addr) {
case 0x10021c: /* CKE_NORMAL */ case 0x10021c: /* CKE_NORMAL */
case 0x1002d0: /* CMD_REFRESH */ case 0x1002d0: /* CMD_REFRESH */
@ -2135,6 +2149,7 @@ static struct nvbios_init_opcode {
[0x99] = { init_zm_auxch }, [0x99] = { init_zm_auxch },
[0x9a] = { init_i2c_long_if }, [0x9a] = { init_i2c_long_if },
[0xa9] = { init_gpio_ne }, [0xa9] = { init_gpio_ne },
[0xaa] = { init_reserved },
}; };
#define init_opcode_nr (sizeof(init_opcode) / sizeof(init_opcode[0])) #define init_opcode_nr (sizeof(init_opcode) / sizeof(init_opcode[0]))

View File

@ -278,7 +278,6 @@ nouveau_display_create(struct drm_device *dev)
{ {
struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_display *disp; struct nouveau_display *disp;
u32 pclass = dev->pdev->class >> 8;
int ret, gen; int ret, gen;
disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL); disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL);
@ -340,29 +339,25 @@ nouveau_display_create(struct drm_device *dev)
drm_kms_helper_poll_init(dev); drm_kms_helper_poll_init(dev);
drm_kms_helper_poll_disable(dev); drm_kms_helper_poll_disable(dev);
if (nouveau_modeset == 1 || if (drm->vbios.dcb.entries) {
(nouveau_modeset < 0 && pclass == PCI_CLASS_DISPLAY_VGA)) { if (nv_device(drm->device)->card_type < NV_50)
if (drm->vbios.dcb.entries) { ret = nv04_display_create(dev);
if (nv_device(drm->device)->card_type < NV_50) else
ret = nv04_display_create(dev); ret = nv50_display_create(dev);
else } else {
ret = nv50_display_create(dev); ret = 0;
} else {
ret = 0;
}
if (ret)
goto disp_create_err;
if (dev->mode_config.num_crtc) {
ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
if (ret)
goto vblank_err;
}
nouveau_backlight_init(dev);
} }
if (ret)
goto disp_create_err;
if (dev->mode_config.num_crtc) {
ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
if (ret)
goto vblank_err;
}
nouveau_backlight_init(dev);
return 0; return 0;
vblank_err: vblank_err:

View File

@ -454,7 +454,8 @@ nouveau_fbcon_init(struct drm_device *dev)
int preferred_bpp; int preferred_bpp;
int ret; int ret;
if (!dev->mode_config.num_crtc) if (!dev->mode_config.num_crtc ||
(dev->pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
return 0; return 0;
fbcon = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL); fbcon = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);

View File

@ -104,9 +104,7 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
else else
nvbe->ttm.ttm.func = &nv50_sgdma_backend; nvbe->ttm.ttm.func = &nv50_sgdma_backend;
if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) { if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page))
kfree(nvbe);
return NULL; return NULL;
}
return &nvbe->ttm.ttm; return &nvbe->ttm.ttm;
} }

View File

@ -707,8 +707,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
switch (connector->connector_type) { switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
if (drm_detect_hdmi_monitor(radeon_connector->edid) && if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
radeon_audio) (drm_detect_hdmi_monitor(radeon_connector->edid) &&
(radeon_connector->audio == RADEON_AUDIO_AUTO)))
return ATOM_ENCODER_MODE_HDMI; return ATOM_ENCODER_MODE_HDMI;
else if (radeon_connector->use_digital) else if (radeon_connector->use_digital)
return ATOM_ENCODER_MODE_DVI; return ATOM_ENCODER_MODE_DVI;
@ -718,8 +719,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
case DRM_MODE_CONNECTOR_DVID: case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA: case DRM_MODE_CONNECTOR_HDMIA:
default: default:
if (drm_detect_hdmi_monitor(radeon_connector->edid) && if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
radeon_audio) (drm_detect_hdmi_monitor(radeon_connector->edid) &&
(radeon_connector->audio == RADEON_AUDIO_AUTO)))
return ATOM_ENCODER_MODE_HDMI; return ATOM_ENCODER_MODE_HDMI;
else else
return ATOM_ENCODER_MODE_DVI; return ATOM_ENCODER_MODE_DVI;
@ -732,8 +734,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return ATOM_ENCODER_MODE_DP; return ATOM_ENCODER_MODE_DP;
else if (drm_detect_hdmi_monitor(radeon_connector->edid) && else if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
radeon_audio) (drm_detect_hdmi_monitor(radeon_connector->edid) &&
(radeon_connector->audio == RADEON_AUDIO_AUTO)))
return ATOM_ENCODER_MODE_HDMI; return ATOM_ENCODER_MODE_HDMI;
else else
return ATOM_ENCODER_MODE_DVI; return ATOM_ENCODER_MODE_DVI;
@ -1647,8 +1650,12 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
/* some early dce3.2 boards have a bug in their transmitter control table */ /* some dce3.x boards have a bug in their transmitter control table.
if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730)) * ACTION_ENABLE_OUTPUT can probably be dropped since ACTION_ENABLE
* does the same thing and more.
*/
if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
(rdev->family != CHIP_RS880))
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
} }
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {

View File

@ -2340,12 +2340,6 @@ int btc_dpm_set_power_state(struct radeon_device *rdev)
return ret; return ret;
} }
ret = rv770_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
if (ret) {
DRM_ERROR("rv770_dpm_force_performance_level failed\n");
return ret;
}
return 0; return 0;
} }

View File

@ -4748,12 +4748,6 @@ int ci_dpm_set_power_state(struct radeon_device *rdev)
if (pi->pcie_performance_request) if (pi->pcie_performance_request)
ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps); ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
ret = ci_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
if (ret) {
DRM_ERROR("ci_dpm_force_performance_level failed\n");
return ret;
}
cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
RADEON_CG_BLOCK_MC | RADEON_CG_BLOCK_MC |
RADEON_CG_BLOCK_SDMA | RADEON_CG_BLOCK_SDMA |

View File

@ -47,10 +47,11 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev,
u32 smc_start_address, u32 smc_start_address,
const u8 *src, u32 byte_count, u32 limit) const u8 *src, u32 byte_count, u32 limit)
{ {
unsigned long flags;
u32 data, original_data; u32 data, original_data;
u32 addr; u32 addr;
u32 extra_shift; u32 extra_shift;
int ret; int ret = 0;
if (smc_start_address & 3) if (smc_start_address & 3)
return -EINVAL; return -EINVAL;
@ -59,13 +60,14 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev,
addr = smc_start_address; addr = smc_start_address;
spin_lock_irqsave(&rdev->smc_idx_lock, flags);
while (byte_count >= 4) { while (byte_count >= 4) {
/* SMC address space is BE */ /* SMC address space is BE */
data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
ret = ci_set_smc_sram_address(rdev, addr, limit); ret = ci_set_smc_sram_address(rdev, addr, limit);
if (ret) if (ret)
return ret; goto done;
WREG32(SMC_IND_DATA_0, data); WREG32(SMC_IND_DATA_0, data);
@ -80,7 +82,7 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev,
ret = ci_set_smc_sram_address(rdev, addr, limit); ret = ci_set_smc_sram_address(rdev, addr, limit);
if (ret) if (ret)
return ret; goto done;
original_data = RREG32(SMC_IND_DATA_0); original_data = RREG32(SMC_IND_DATA_0);
@ -97,11 +99,15 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev,
ret = ci_set_smc_sram_address(rdev, addr, limit); ret = ci_set_smc_sram_address(rdev, addr, limit);
if (ret) if (ret)
return ret; goto done;
WREG32(SMC_IND_DATA_0, data); WREG32(SMC_IND_DATA_0, data);
} }
return 0;
done:
spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
return ret;
} }
void ci_start_smc(struct radeon_device *rdev) void ci_start_smc(struct radeon_device *rdev)
@ -197,6 +203,7 @@ PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev)
int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit) int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
{ {
unsigned long flags;
u32 ucode_start_address; u32 ucode_start_address;
u32 ucode_size; u32 ucode_size;
const u8 *src; const u8 *src;
@ -219,6 +226,7 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
return -EINVAL; return -EINVAL;
src = (const u8 *)rdev->smc_fw->data; src = (const u8 *)rdev->smc_fw->data;
spin_lock_irqsave(&rdev->smc_idx_lock, flags);
WREG32(SMC_IND_INDEX_0, ucode_start_address); WREG32(SMC_IND_INDEX_0, ucode_start_address);
WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0); WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
while (ucode_size >= 4) { while (ucode_size >= 4) {
@ -231,6 +239,7 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
ucode_size -= 4; ucode_size -= 4;
} }
WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
return 0; return 0;
} }
@ -238,25 +247,29 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
int ci_read_smc_sram_dword(struct radeon_device *rdev, int ci_read_smc_sram_dword(struct radeon_device *rdev,
u32 smc_address, u32 *value, u32 limit) u32 smc_address, u32 *value, u32 limit)
{ {
unsigned long flags;
int ret; int ret;
spin_lock_irqsave(&rdev->smc_idx_lock, flags);
ret = ci_set_smc_sram_address(rdev, smc_address, limit); ret = ci_set_smc_sram_address(rdev, smc_address, limit);
if (ret) if (ret == 0)
return ret; *value = RREG32(SMC_IND_DATA_0);
spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
*value = RREG32(SMC_IND_DATA_0); return ret;
return 0;
} }
int ci_write_smc_sram_dword(struct radeon_device *rdev, int ci_write_smc_sram_dword(struct radeon_device *rdev,
u32 smc_address, u32 value, u32 limit) u32 smc_address, u32 value, u32 limit)
{ {
unsigned long flags;
int ret; int ret;
spin_lock_irqsave(&rdev->smc_idx_lock, flags);
ret = ci_set_smc_sram_address(rdev, smc_address, limit); ret = ci_set_smc_sram_address(rdev, smc_address, limit);
if (ret) if (ret == 0)
return ret; WREG32(SMC_IND_DATA_0, value);
spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
WREG32(SMC_IND_DATA_0, value); return ret;
return 0;
} }

View File

@ -77,6 +77,8 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev);
static void cik_program_aspm(struct radeon_device *rdev); static void cik_program_aspm(struct radeon_device *rdev);
static void cik_init_pg(struct radeon_device *rdev); static void cik_init_pg(struct radeon_device *rdev);
static void cik_init_cg(struct radeon_device *rdev); static void cik_init_cg(struct radeon_device *rdev);
static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
bool enable);
/* get temperature in millidegrees */ /* get temperature in millidegrees */
int ci_get_temp(struct radeon_device *rdev) int ci_get_temp(struct radeon_device *rdev)
@ -120,20 +122,27 @@ int kv_get_temp(struct radeon_device *rdev)
*/ */
u32 cik_pciep_rreg(struct radeon_device *rdev, u32 reg) u32 cik_pciep_rreg(struct radeon_device *rdev, u32 reg)
{ {
unsigned long flags;
u32 r; u32 r;
spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
WREG32(PCIE_INDEX, reg); WREG32(PCIE_INDEX, reg);
(void)RREG32(PCIE_INDEX); (void)RREG32(PCIE_INDEX);
r = RREG32(PCIE_DATA); r = RREG32(PCIE_DATA);
spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
return r; return r;
} }
void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{ {
unsigned long flags;
spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
WREG32(PCIE_INDEX, reg); WREG32(PCIE_INDEX, reg);
(void)RREG32(PCIE_INDEX); (void)RREG32(PCIE_INDEX);
WREG32(PCIE_DATA, v); WREG32(PCIE_DATA, v);
(void)RREG32(PCIE_DATA); (void)RREG32(PCIE_DATA);
spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
} }
static const u32 spectre_rlc_save_restore_register_list[] = static const u32 spectre_rlc_save_restore_register_list[] =
@ -2722,7 +2731,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
} else if ((rdev->pdev->device == 0x1309) || } else if ((rdev->pdev->device == 0x1309) ||
(rdev->pdev->device == 0x130A) || (rdev->pdev->device == 0x130A) ||
(rdev->pdev->device == 0x130D) || (rdev->pdev->device == 0x130D) ||
(rdev->pdev->device == 0x1313)) { (rdev->pdev->device == 0x1313) ||
(rdev->pdev->device == 0x131D)) {
rdev->config.cik.max_cu_per_sh = 6; rdev->config.cik.max_cu_per_sh = 6;
rdev->config.cik.max_backends_per_se = 2; rdev->config.cik.max_backends_per_se = 2;
} else if ((rdev->pdev->device == 0x1306) || } else if ((rdev->pdev->device == 0x1306) ||
@ -4013,6 +4023,8 @@ static int cik_cp_resume(struct radeon_device *rdev)
{ {
int r; int r;
cik_enable_gui_idle_interrupt(rdev, false);
r = cik_cp_load_microcode(rdev); r = cik_cp_load_microcode(rdev);
if (r) if (r)
return r; return r;
@ -4024,6 +4036,8 @@ static int cik_cp_resume(struct radeon_device *rdev)
if (r) if (r)
return r; return r;
cik_enable_gui_idle_interrupt(rdev, true);
return 0; return 0;
} }
@ -5376,7 +5390,9 @@ static void cik_enable_hdp_ls(struct radeon_device *rdev,
void cik_update_cg(struct radeon_device *rdev, void cik_update_cg(struct radeon_device *rdev,
u32 block, bool enable) u32 block, bool enable)
{ {
if (block & RADEON_CG_BLOCK_GFX) { if (block & RADEON_CG_BLOCK_GFX) {
cik_enable_gui_idle_interrupt(rdev, false);
/* order matters! */ /* order matters! */
if (enable) { if (enable) {
cik_enable_mgcg(rdev, true); cik_enable_mgcg(rdev, true);
@ -5385,6 +5401,7 @@ void cik_update_cg(struct radeon_device *rdev,
cik_enable_cgcg(rdev, false); cik_enable_cgcg(rdev, false);
cik_enable_mgcg(rdev, false); cik_enable_mgcg(rdev, false);
} }
cik_enable_gui_idle_interrupt(rdev, true);
} }
if (block & RADEON_CG_BLOCK_MC) { if (block & RADEON_CG_BLOCK_MC) {
@ -5541,7 +5558,7 @@ static void cik_enable_gfx_cgpg(struct radeon_device *rdev,
{ {
u32 data, orig; u32 data, orig;
if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) { if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
orig = data = RREG32(RLC_PG_CNTL); orig = data = RREG32(RLC_PG_CNTL);
data |= GFX_PG_ENABLE; data |= GFX_PG_ENABLE;
if (orig != data) if (orig != data)
@ -5805,7 +5822,7 @@ static void cik_init_pg(struct radeon_device *rdev)
if (rdev->pg_flags) { if (rdev->pg_flags) {
cik_enable_sck_slowdown_on_pu(rdev, true); cik_enable_sck_slowdown_on_pu(rdev, true);
cik_enable_sck_slowdown_on_pd(rdev, true); cik_enable_sck_slowdown_on_pd(rdev, true);
if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) { if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
cik_init_gfx_cgpg(rdev); cik_init_gfx_cgpg(rdev);
cik_enable_cp_pg(rdev, true); cik_enable_cp_pg(rdev, true);
cik_enable_gds_pg(rdev, true); cik_enable_gds_pg(rdev, true);
@ -5819,7 +5836,7 @@ static void cik_fini_pg(struct radeon_device *rdev)
{ {
if (rdev->pg_flags) { if (rdev->pg_flags) {
cik_update_gfx_pg(rdev, false); cik_update_gfx_pg(rdev, false);
if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) { if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
cik_enable_cp_pg(rdev, false); cik_enable_cp_pg(rdev, false);
cik_enable_gds_pg(rdev, false); cik_enable_gds_pg(rdev, false);
} }
@ -5895,7 +5912,9 @@ static void cik_disable_interrupt_state(struct radeon_device *rdev)
u32 tmp; u32 tmp;
/* gfx ring */ /* gfx ring */
WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); tmp = RREG32(CP_INT_CNTL_RING0) &
(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
WREG32(CP_INT_CNTL_RING0, tmp);
/* sdma */ /* sdma */
tmp = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; tmp = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, tmp); WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, tmp);
@ -6036,8 +6055,7 @@ static int cik_irq_init(struct radeon_device *rdev)
*/ */
int cik_irq_set(struct radeon_device *rdev) int cik_irq_set(struct radeon_device *rdev)
{ {
u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE | u32 cp_int_cntl;
PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE;
u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3; u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3;
u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3; u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3;
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
@ -6058,6 +6076,10 @@ int cik_irq_set(struct radeon_device *rdev)
return 0; return 0;
} }
cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
cp_int_cntl |= PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE;
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;

View File

@ -2014,12 +2014,6 @@ int cypress_dpm_set_power_state(struct radeon_device *rdev)
if (eg_pi->pcie_performance_request) if (eg_pi->pcie_performance_request)
cypress_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps); cypress_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
ret = rv770_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
if (ret) {
DRM_ERROR("rv770_dpm_force_performance_level failed\n");
return ret;
}
return 0; return 0;
} }

View File

@ -28,22 +28,30 @@
static u32 dce6_endpoint_rreg(struct radeon_device *rdev, static u32 dce6_endpoint_rreg(struct radeon_device *rdev,
u32 block_offset, u32 reg) u32 block_offset, u32 reg)
{ {
unsigned long flags;
u32 r; u32 r;
spin_lock_irqsave(&rdev->end_idx_lock, flags);
WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
r = RREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset); r = RREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset);
spin_unlock_irqrestore(&rdev->end_idx_lock, flags);
return r; return r;
} }
static void dce6_endpoint_wreg(struct radeon_device *rdev, static void dce6_endpoint_wreg(struct radeon_device *rdev,
u32 block_offset, u32 reg, u32 v) u32 block_offset, u32 reg, u32 v)
{ {
unsigned long flags;
spin_lock_irqsave(&rdev->end_idx_lock, flags);
if (ASIC_IS_DCE8(rdev)) if (ASIC_IS_DCE8(rdev))
WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
else else
WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset,
AZ_ENDPOINT_REG_WRITE_EN | AZ_ENDPOINT_REG_INDEX(reg)); AZ_ENDPOINT_REG_WRITE_EN | AZ_ENDPOINT_REG_INDEX(reg));
WREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset, v); WREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset, v);
spin_unlock_irqrestore(&rdev->end_idx_lock, flags);
} }
#define RREG32_ENDPOINT(block, reg) dce6_endpoint_rreg(rdev, (block), (reg)) #define RREG32_ENDPOINT(block, reg) dce6_endpoint_rreg(rdev, (block), (reg))
@ -86,12 +94,12 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
u32 offset = dig->afmt->offset; u32 offset = dig->afmt->offset;
u32 id = dig->afmt->pin->id;
if (!dig->afmt->pin) if (!dig->afmt->pin)
return; return;
WREG32(AFMT_AUDIO_SRC_CONTROL + offset, AFMT_AUDIO_SRC_SELECT(id)); WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
} }
void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder) void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)

View File

@ -40,6 +40,7 @@ static int kv_calculate_dpm_settings(struct radeon_device *rdev);
static void kv_enable_new_levels(struct radeon_device *rdev); static void kv_enable_new_levels(struct radeon_device *rdev);
static void kv_program_nbps_index_settings(struct radeon_device *rdev, static void kv_program_nbps_index_settings(struct radeon_device *rdev,
struct radeon_ps *new_rps); struct radeon_ps *new_rps);
static int kv_set_enabled_level(struct radeon_device *rdev, u32 level);
static int kv_set_enabled_levels(struct radeon_device *rdev); static int kv_set_enabled_levels(struct radeon_device *rdev);
static int kv_force_dpm_highest(struct radeon_device *rdev); static int kv_force_dpm_highest(struct radeon_device *rdev);
static int kv_force_dpm_lowest(struct radeon_device *rdev); static int kv_force_dpm_lowest(struct radeon_device *rdev);
@ -519,7 +520,7 @@ static int kv_set_dpm_boot_state(struct radeon_device *rdev)
static void kv_program_vc(struct radeon_device *rdev) static void kv_program_vc(struct radeon_device *rdev)
{ {
WREG32_SMC(CG_FTV_0, 0x3FFFC000); WREG32_SMC(CG_FTV_0, 0x3FFFC100);
} }
static void kv_clear_vc(struct radeon_device *rdev) static void kv_clear_vc(struct radeon_device *rdev)
@ -638,7 +639,10 @@ static int kv_force_lowest_valid(struct radeon_device *rdev)
static int kv_unforce_levels(struct radeon_device *rdev) static int kv_unforce_levels(struct radeon_device *rdev)
{ {
return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel); if (rdev->family == CHIP_KABINI)
return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
else
return kv_set_enabled_levels(rdev);
} }
static int kv_update_sclk_t(struct radeon_device *rdev) static int kv_update_sclk_t(struct radeon_device *rdev)
@ -667,9 +671,8 @@ static int kv_program_bootup_state(struct radeon_device *rdev)
&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
if (table && table->count) { if (table && table->count) {
for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
if ((table->entries[i].clk == pi->boot_pl.sclk) || if (table->entries[i].clk == pi->boot_pl.sclk)
(i == 0))
break; break;
} }
@ -682,9 +685,8 @@ static int kv_program_bootup_state(struct radeon_device *rdev)
if (table->num_max_dpm_entries == 0) if (table->num_max_dpm_entries == 0)
return -EINVAL; return -EINVAL;
for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
if ((table->entries[i].sclk_frequency == pi->boot_pl.sclk) || if (table->entries[i].sclk_frequency == pi->boot_pl.sclk)
(i == 0))
break; break;
} }
@ -1078,6 +1080,13 @@ static int kv_enable_ulv(struct radeon_device *rdev, bool enable)
PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV); PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV);
} }
static void kv_reset_acp_boot_level(struct radeon_device *rdev)
{
struct kv_power_info *pi = kv_get_pi(rdev);
pi->acp_boot_level = 0xff;
}
static void kv_update_current_ps(struct radeon_device *rdev, static void kv_update_current_ps(struct radeon_device *rdev,
struct radeon_ps *rps) struct radeon_ps *rps)
{ {
@ -1100,6 +1109,18 @@ static void kv_update_requested_ps(struct radeon_device *rdev,
pi->requested_rps.ps_priv = &pi->requested_ps; pi->requested_rps.ps_priv = &pi->requested_ps;
} }
void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable)
{
struct kv_power_info *pi = kv_get_pi(rdev);
int ret;
if (pi->bapm_enable) {
ret = kv_smc_bapm_enable(rdev, enable);
if (ret)
DRM_ERROR("kv_smc_bapm_enable failed\n");
}
}
int kv_dpm_enable(struct radeon_device *rdev) int kv_dpm_enable(struct radeon_device *rdev)
{ {
struct kv_power_info *pi = kv_get_pi(rdev); struct kv_power_info *pi = kv_get_pi(rdev);
@ -1192,6 +1213,8 @@ int kv_dpm_enable(struct radeon_device *rdev)
return ret; return ret;
} }
kv_reset_acp_boot_level(rdev);
if (rdev->irq.installed && if (rdev->irq.installed &&
r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
@ -1203,6 +1226,12 @@ int kv_dpm_enable(struct radeon_device *rdev)
radeon_irq_set(rdev); radeon_irq_set(rdev);
} }
ret = kv_smc_bapm_enable(rdev, false);
if (ret) {
DRM_ERROR("kv_smc_bapm_enable failed\n");
return ret;
}
/* powerdown unused blocks for now */ /* powerdown unused blocks for now */
kv_dpm_powergate_acp(rdev, true); kv_dpm_powergate_acp(rdev, true);
kv_dpm_powergate_samu(rdev, true); kv_dpm_powergate_samu(rdev, true);
@ -1226,6 +1255,8 @@ void kv_dpm_disable(struct radeon_device *rdev)
RADEON_CG_BLOCK_BIF | RADEON_CG_BLOCK_BIF |
RADEON_CG_BLOCK_HDP), false); RADEON_CG_BLOCK_HDP), false);
kv_smc_bapm_enable(rdev, false);
/* powerup blocks */ /* powerup blocks */
kv_dpm_powergate_acp(rdev, false); kv_dpm_powergate_acp(rdev, false);
kv_dpm_powergate_samu(rdev, false); kv_dpm_powergate_samu(rdev, false);
@ -1450,6 +1481,39 @@ static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate)
return kv_enable_samu_dpm(rdev, !gate); return kv_enable_samu_dpm(rdev, !gate);
} }
static u8 kv_get_acp_boot_level(struct radeon_device *rdev)
{
u8 i;
struct radeon_clock_voltage_dependency_table *table =
&rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
for (i = 0; i < table->count; i++) {
if (table->entries[i].clk >= 0) /* XXX */
break;
}
if (i >= table->count)
i = table->count - 1;
return i;
}
static void kv_update_acp_boot_level(struct radeon_device *rdev)
{
struct kv_power_info *pi = kv_get_pi(rdev);
u8 acp_boot_level;
if (!pi->caps_stable_p_state) {
acp_boot_level = kv_get_acp_boot_level(rdev);
if (acp_boot_level != pi->acp_boot_level) {
pi->acp_boot_level = acp_boot_level;
kv_send_msg_to_smc_with_parameter(rdev,
PPSMC_MSG_ACPDPM_SetEnabledMask,
(1 << pi->acp_boot_level));
}
}
}
static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate) static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate)
{ {
struct kv_power_info *pi = kv_get_pi(rdev); struct kv_power_info *pi = kv_get_pi(rdev);
@ -1461,7 +1525,7 @@ static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate)
if (pi->caps_stable_p_state) if (pi->caps_stable_p_state)
pi->acp_boot_level = table->count - 1; pi->acp_boot_level = table->count - 1;
else else
pi->acp_boot_level = 0; pi->acp_boot_level = kv_get_acp_boot_level(rdev);
ret = kv_copy_bytes_to_smc(rdev, ret = kv_copy_bytes_to_smc(rdev,
pi->dpm_table_start + pi->dpm_table_start +
@ -1588,13 +1652,11 @@ static void kv_set_valid_clock_range(struct radeon_device *rdev,
} }
} }
for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
if ((table->entries[i].clk <= new_ps->levels[new_ps->num_levels -1].sclk) || if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk)
(i == 0)) {
pi->highest_valid = i;
break; break;
}
} }
pi->highest_valid = i;
if (pi->lowest_valid > pi->highest_valid) { if (pi->lowest_valid > pi->highest_valid) {
if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) >
@ -1615,14 +1677,12 @@ static void kv_set_valid_clock_range(struct radeon_device *rdev,
} }
} }
for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
if (table->entries[i].sclk_frequency <= if (table->entries[i].sclk_frequency <=
new_ps->levels[new_ps->num_levels - 1].sclk || new_ps->levels[new_ps->num_levels - 1].sclk)
i == 0) {
pi->highest_valid = i;
break; break;
}
} }
pi->highest_valid = i;
if (pi->lowest_valid > pi->highest_valid) { if (pi->lowest_valid > pi->highest_valid) {
if ((new_ps->levels[0].sclk - if ((new_ps->levels[0].sclk -
@ -1724,6 +1784,14 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
RADEON_CG_BLOCK_BIF | RADEON_CG_BLOCK_BIF |
RADEON_CG_BLOCK_HDP), false); RADEON_CG_BLOCK_HDP), false);
if (pi->bapm_enable) {
ret = kv_smc_bapm_enable(rdev, rdev->pm.dpm.ac_power);
if (ret) {
DRM_ERROR("kv_smc_bapm_enable failed\n");
return ret;
}
}
if (rdev->family == CHIP_KABINI) { if (rdev->family == CHIP_KABINI) {
if (pi->enable_dpm) { if (pi->enable_dpm) {
kv_set_valid_clock_range(rdev, new_ps); kv_set_valid_clock_range(rdev, new_ps);
@ -1775,6 +1843,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
return ret; return ret;
} }
#endif #endif
kv_update_acp_boot_level(rdev);
kv_update_sclk_t(rdev); kv_update_sclk_t(rdev);
kv_enable_nb_dpm(rdev); kv_enable_nb_dpm(rdev);
} }
@ -1785,7 +1854,6 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
RADEON_CG_BLOCK_BIF | RADEON_CG_BLOCK_BIF |
RADEON_CG_BLOCK_HDP), true); RADEON_CG_BLOCK_HDP), true);
rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
return 0; return 0;
} }
@ -1806,12 +1874,23 @@ void kv_dpm_setup_asic(struct radeon_device *rdev)
void kv_dpm_reset_asic(struct radeon_device *rdev) void kv_dpm_reset_asic(struct radeon_device *rdev)
{ {
kv_force_lowest_valid(rdev); struct kv_power_info *pi = kv_get_pi(rdev);
kv_init_graphics_levels(rdev);
kv_program_bootup_state(rdev); if (rdev->family == CHIP_KABINI) {
kv_upload_dpm_settings(rdev); kv_force_lowest_valid(rdev);
kv_force_lowest_valid(rdev); kv_init_graphics_levels(rdev);
kv_unforce_levels(rdev); kv_program_bootup_state(rdev);
kv_upload_dpm_settings(rdev);
kv_force_lowest_valid(rdev);
kv_unforce_levels(rdev);
} else {
kv_init_graphics_levels(rdev);
kv_program_bootup_state(rdev);
kv_freeze_sclk_dpm(rdev, true);
kv_upload_dpm_settings(rdev);
kv_freeze_sclk_dpm(rdev, false);
kv_set_enabled_level(rdev, pi->graphics_boot_level);
}
} }
//XXX use sumo_dpm_display_configuration_changed //XXX use sumo_dpm_display_configuration_changed
@ -1871,12 +1950,15 @@ static int kv_force_dpm_highest(struct radeon_device *rdev)
if (ret) if (ret)
return ret; return ret;
for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i >= 0; i--) { for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) {
if (enable_mask & (1 << i)) if (enable_mask & (1 << i))
break; break;
} }
return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); if (rdev->family == CHIP_KABINI)
return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
else
return kv_set_enabled_level(rdev, i);
} }
static int kv_force_dpm_lowest(struct radeon_device *rdev) static int kv_force_dpm_lowest(struct radeon_device *rdev)
@ -1893,7 +1975,10 @@ static int kv_force_dpm_lowest(struct radeon_device *rdev)
break; break;
} }
return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); if (rdev->family == CHIP_KABINI)
return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
else
return kv_set_enabled_level(rdev, i);
} }
static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev, static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
@ -1911,9 +1996,9 @@ static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
if (!pi->caps_sclk_ds) if (!pi->caps_sclk_ds)
return 0; return 0;
for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i <= 0; i--) { for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) {
temp = sclk / sumo_get_sleep_divider_from_id(i); temp = sclk / sumo_get_sleep_divider_from_id(i);
if ((temp >= min) || (i == 0)) if (temp >= min)
break; break;
} }
@ -2039,12 +2124,12 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
ps->dpmx_nb_ps_lo = 0x1; ps->dpmx_nb_ps_lo = 0x1;
ps->dpmx_nb_ps_hi = 0x0; ps->dpmx_nb_ps_hi = 0x0;
} else { } else {
ps->dpm0_pg_nb_ps_lo = 0x1; ps->dpm0_pg_nb_ps_lo = 0x3;
ps->dpm0_pg_nb_ps_hi = 0x0; ps->dpm0_pg_nb_ps_hi = 0x0;
ps->dpmx_nb_ps_lo = 0x2; ps->dpmx_nb_ps_lo = 0x3;
ps->dpmx_nb_ps_hi = 0x1; ps->dpmx_nb_ps_hi = 0x0;
if (pi->sys_info.nb_dpm_enable && pi->battery_state) { if (pi->sys_info.nb_dpm_enable) {
force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) ||
pi->disable_nb_ps3_in_battery; pi->disable_nb_ps3_in_battery;
@ -2210,6 +2295,15 @@ static void kv_enable_new_levels(struct radeon_device *rdev)
} }
} }
static int kv_set_enabled_level(struct radeon_device *rdev, u32 level)
{
u32 new_mask = (1 << level);
return kv_send_msg_to_smc_with_parameter(rdev,
PPSMC_MSG_SCLKDPM_SetEnabledMask,
new_mask);
}
static int kv_set_enabled_levels(struct radeon_device *rdev) static int kv_set_enabled_levels(struct radeon_device *rdev)
{ {
struct kv_power_info *pi = kv_get_pi(rdev); struct kv_power_info *pi = kv_get_pi(rdev);

View File

@ -192,6 +192,7 @@ int kv_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
u32 *value, u32 limit); u32 *value, u32 limit);
int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable); int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable);
int kv_smc_bapm_enable(struct radeon_device *rdev, bool enable);
int kv_copy_bytes_to_smc(struct radeon_device *rdev, int kv_copy_bytes_to_smc(struct radeon_device *rdev,
u32 smc_start_address, u32 smc_start_address,
const u8 *src, u32 byte_count, u32 limit); const u8 *src, u32 byte_count, u32 limit);

View File

@ -107,6 +107,14 @@ int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable)
return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Disable); return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Disable);
} }
int kv_smc_bapm_enable(struct radeon_device *rdev, bool enable)
{
if (enable)
return kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableBAPM);
else
return kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableBAPM);
}
int kv_copy_bytes_to_smc(struct radeon_device *rdev, int kv_copy_bytes_to_smc(struct radeon_device *rdev,
u32 smc_start_address, u32 smc_start_address,
const u8 *src, u32 byte_count, u32 limit) const u8 *src, u32 byte_count, u32 limit)

View File

@ -3865,12 +3865,6 @@ int ni_dpm_set_power_state(struct radeon_device *rdev)
return ret; return ret;
} }
ret = ni_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
if (ret) {
DRM_ERROR("ni_dpm_force_performance_level failed\n");
return ret;
}
return 0; return 0;
} }

View File

@ -163,6 +163,8 @@ typedef uint8_t PPSMC_Result;
#define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f) #define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f)
#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d) #define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d)
#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e) #define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e)
#define PPSMC_MSG_EnableBAPM ((uint32_t) 0x120)
#define PPSMC_MSG_DisableBAPM ((uint32_t) 0x121)
#define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124) #define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124)

View File

@ -2853,21 +2853,28 @@ static void r100_pll_errata_after_data(struct radeon_device *rdev)
uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg) uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
{ {
unsigned long flags;
uint32_t data; uint32_t data;
spin_lock_irqsave(&rdev->pll_idx_lock, flags);
WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f); WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
r100_pll_errata_after_index(rdev); r100_pll_errata_after_index(rdev);
data = RREG32(RADEON_CLOCK_CNTL_DATA); data = RREG32(RADEON_CLOCK_CNTL_DATA);
r100_pll_errata_after_data(rdev); r100_pll_errata_after_data(rdev);
spin_unlock_irqrestore(&rdev->pll_idx_lock, flags);
return data; return data;
} }
void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{ {
unsigned long flags;
spin_lock_irqsave(&rdev->pll_idx_lock, flags);
WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN)); WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
r100_pll_errata_after_index(rdev); r100_pll_errata_after_index(rdev);
WREG32(RADEON_CLOCK_CNTL_DATA, v); WREG32(RADEON_CLOCK_CNTL_DATA, v);
r100_pll_errata_after_data(rdev); r100_pll_errata_after_data(rdev);
spin_unlock_irqrestore(&rdev->pll_idx_lock, flags);
} }
static void r100_set_safe_registers(struct radeon_device *rdev) static void r100_set_safe_registers(struct radeon_device *rdev)

View File

@ -160,18 +160,25 @@ void r420_pipes_init(struct radeon_device *rdev)
u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg) u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg)
{ {
unsigned long flags;
u32 r; u32 r;
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg)); WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg));
r = RREG32(R_0001FC_MC_IND_DATA); r = RREG32(R_0001FC_MC_IND_DATA);
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
return r; return r;
} }
void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v) void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{ {
unsigned long flags;
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) | WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) |
S_0001F8_MC_IND_WR_EN(1)); S_0001F8_MC_IND_WR_EN(1));
WREG32(R_0001FC_MC_IND_DATA, v); WREG32(R_0001FC_MC_IND_DATA, v);
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
} }
static void r420_debugfs(struct radeon_device *rdev) static void r420_debugfs(struct radeon_device *rdev)

View File

@ -119,6 +119,11 @@ u32 r600_get_xclk(struct radeon_device *rdev)
return rdev->clock.spll.reference_freq; return rdev->clock.spll.reference_freq;
} }
int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
{
return 0;
}
/* get temperature in millidegrees */ /* get temperature in millidegrees */
int rv6xx_get_temp(struct radeon_device *rdev) int rv6xx_get_temp(struct radeon_device *rdev)
{ {
@ -1045,20 +1050,27 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev)
uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg) uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{ {
unsigned long flags;
uint32_t r; uint32_t r;
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg)); WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
r = RREG32(R_0028FC_MC_DATA); r = RREG32(R_0028FC_MC_DATA);
WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR); WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
return r; return r;
} }
void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{ {
unsigned long flags;
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) | WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
S_0028F8_MC_IND_WR_EN(1)); S_0028F8_MC_IND_WR_EN(1));
WREG32(R_0028FC_MC_DATA, v); WREG32(R_0028FC_MC_DATA, v);
WREG32(R_0028F8_MC_INDEX, 0x7F); WREG32(R_0028F8_MC_INDEX, 0x7F);
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
} }
static void r600_mc_program(struct radeon_device *rdev) static void r600_mc_program(struct radeon_device *rdev)
@ -2092,20 +2104,27 @@ static void r600_gpu_init(struct radeon_device *rdev)
*/ */
u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg) u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
{ {
unsigned long flags;
u32 r; u32 r;
spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
(void)RREG32(PCIE_PORT_INDEX); (void)RREG32(PCIE_PORT_INDEX);
r = RREG32(PCIE_PORT_DATA); r = RREG32(PCIE_PORT_DATA);
spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
return r; return r;
} }
void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{ {
unsigned long flags;
spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
(void)RREG32(PCIE_PORT_INDEX); (void)RREG32(PCIE_PORT_INDEX);
WREG32(PCIE_PORT_DATA, (v)); WREG32(PCIE_PORT_DATA, (v));
(void)RREG32(PCIE_PORT_DATA); (void)RREG32(PCIE_PORT_DATA);
spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
} }
/* /*

View File

@ -1219,30 +1219,20 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
void r600_free_extended_power_table(struct radeon_device *rdev) void r600_free_extended_power_table(struct radeon_device *rdev)
{ {
if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries) struct radeon_dpm_dynamic_state *dyn_state = &rdev->pm.dpm.dyn_state;
kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) kfree(dyn_state->vddc_dependency_on_sclk.entries);
kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); kfree(dyn_state->vddci_dependency_on_mclk.entries);
if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) kfree(dyn_state->vddc_dependency_on_mclk.entries);
kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); kfree(dyn_state->mvdd_dependency_on_mclk.entries);
if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) kfree(dyn_state->cac_leakage_table.entries);
kfree(rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries); kfree(dyn_state->phase_shedding_limits_table.entries);
if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) kfree(dyn_state->ppm_table);
kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries); kfree(dyn_state->cac_tdp_table);
if (rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
kfree(rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries); kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
if (rdev->pm.dpm.dyn_state.ppm_table) kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
kfree(rdev->pm.dpm.dyn_state.ppm_table); kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
if (rdev->pm.dpm.dyn_state.cac_tdp_table)
kfree(rdev->pm.dpm.dyn_state.cac_tdp_table);
if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries)
kfree(rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries);
if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries)
kfree(rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries);
if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries)
kfree(rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries);
if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries)
kfree(rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries);
} }
enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev, enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,

View File

@ -1040,7 +1040,7 @@
# define HDMI0_AVI_INFO_CONT (1 << 1) # define HDMI0_AVI_INFO_CONT (1 << 1)
# define HDMI0_AUDIO_INFO_SEND (1 << 4) # define HDMI0_AUDIO_INFO_SEND (1 << 4)
# define HDMI0_AUDIO_INFO_CONT (1 << 5) # define HDMI0_AUDIO_INFO_CONT (1 << 5)
# define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */ # define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hdmi regs */
# define HDMI0_AUDIO_INFO_UPDATE (1 << 7) # define HDMI0_AUDIO_INFO_UPDATE (1 << 7)
# define HDMI0_MPEG_INFO_SEND (1 << 8) # define HDMI0_MPEG_INFO_SEND (1 << 8)
# define HDMI0_MPEG_INFO_CONT (1 << 9) # define HDMI0_MPEG_INFO_CONT (1 << 9)

View File

@ -181,7 +181,7 @@ extern int radeon_aspm;
#define RADEON_CG_SUPPORT_HDP_MGCG (1 << 16) #define RADEON_CG_SUPPORT_HDP_MGCG (1 << 16)
/* PG flags */ /* PG flags */
#define RADEON_PG_SUPPORT_GFX_CG (1 << 0) #define RADEON_PG_SUPPORT_GFX_PG (1 << 0)
#define RADEON_PG_SUPPORT_GFX_SMG (1 << 1) #define RADEON_PG_SUPPORT_GFX_SMG (1 << 1)
#define RADEON_PG_SUPPORT_GFX_DMG (1 << 2) #define RADEON_PG_SUPPORT_GFX_DMG (1 << 2)
#define RADEON_PG_SUPPORT_UVD (1 << 3) #define RADEON_PG_SUPPORT_UVD (1 << 3)
@ -1778,6 +1778,7 @@ struct radeon_asic {
int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level); int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level);
bool (*vblank_too_short)(struct radeon_device *rdev); bool (*vblank_too_short)(struct radeon_device *rdev);
void (*powergate_uvd)(struct radeon_device *rdev, bool gate); void (*powergate_uvd)(struct radeon_device *rdev, bool gate);
void (*enable_bapm)(struct radeon_device *rdev, bool enable);
} dpm; } dpm;
/* pageflipping */ /* pageflipping */
struct { struct {
@ -2110,6 +2111,28 @@ struct radeon_device {
resource_size_t rmmio_size; resource_size_t rmmio_size;
/* protects concurrent MM_INDEX/DATA based register access */ /* protects concurrent MM_INDEX/DATA based register access */
spinlock_t mmio_idx_lock; spinlock_t mmio_idx_lock;
/* protects concurrent SMC based register access */
spinlock_t smc_idx_lock;
/* protects concurrent PLL register access */
spinlock_t pll_idx_lock;
/* protects concurrent MC register access */
spinlock_t mc_idx_lock;
/* protects concurrent PCIE register access */
spinlock_t pcie_idx_lock;
/* protects concurrent PCIE_PORT register access */
spinlock_t pciep_idx_lock;
/* protects concurrent PIF register access */
spinlock_t pif_idx_lock;
/* protects concurrent CG register access */
spinlock_t cg_idx_lock;
/* protects concurrent UVD register access */
spinlock_t uvd_idx_lock;
/* protects concurrent RCU register access */
spinlock_t rcu_idx_lock;
/* protects concurrent DIDT register access */
spinlock_t didt_idx_lock;
/* protects concurrent ENDPOINT (audio) register access */
spinlock_t end_idx_lock;
void __iomem *rmmio; void __iomem *rmmio;
radeon_rreg_t mc_rreg; radeon_rreg_t mc_rreg;
radeon_wreg_t mc_wreg; radeon_wreg_t mc_wreg;
@ -2277,123 +2300,179 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
*/ */
static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg) static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
{ {
unsigned long flags;
uint32_t r; uint32_t r;
spin_lock_irqsave(&rdev->pcie_idx_lock, flags);
WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
r = RREG32(RADEON_PCIE_DATA); r = RREG32(RADEON_PCIE_DATA);
spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags);
return r; return r;
} }
static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{ {
unsigned long flags;
spin_lock_irqsave(&rdev->pcie_idx_lock, flags);
WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
WREG32(RADEON_PCIE_DATA, (v)); WREG32(RADEON_PCIE_DATA, (v));
spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags);
} }
static inline u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg) static inline u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg)
{ {
unsigned long flags;
u32 r; u32 r;
spin_lock_irqsave(&rdev->smc_idx_lock, flags);
WREG32(TN_SMC_IND_INDEX_0, (reg)); WREG32(TN_SMC_IND_INDEX_0, (reg));
r = RREG32(TN_SMC_IND_DATA_0); r = RREG32(TN_SMC_IND_DATA_0);
spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
return r; return r;
} }
static inline void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v) static inline void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{ {
unsigned long flags;
spin_lock_irqsave(&rdev->smc_idx_lock, flags);
WREG32(TN_SMC_IND_INDEX_0, (reg)); WREG32(TN_SMC_IND_INDEX_0, (reg));
WREG32(TN_SMC_IND_DATA_0, (v)); WREG32(TN_SMC_IND_DATA_0, (v));
spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
} }
static inline u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg) static inline u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg)
{ {
unsigned long flags;
u32 r; u32 r;
spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
r = RREG32(R600_RCU_DATA); r = RREG32(R600_RCU_DATA);
spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
return r; return r;
} }
static inline void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v) static inline void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{ {
unsigned long flags;
spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
WREG32(R600_RCU_DATA, (v)); WREG32(R600_RCU_DATA, (v));
spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
} }
static inline u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg) static inline u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg)
{ {
unsigned long flags;
u32 r; u32 r;
spin_lock_irqsave(&rdev->cg_idx_lock, flags);
WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
r = RREG32(EVERGREEN_CG_IND_DATA); r = RREG32(EVERGREEN_CG_IND_DATA);
spin_unlock_irqrestore(&rdev->cg_idx_lock, flags);
return r; return r;
} }
static inline void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v) static inline void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{ {
unsigned long flags;
spin_lock_irqsave(&rdev->cg_idx_lock, flags);
WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
WREG32(EVERGREEN_CG_IND_DATA, (v)); WREG32(EVERGREEN_CG_IND_DATA, (v));
spin_unlock_irqrestore(&rdev->cg_idx_lock, flags);
} }
static inline u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg) static inline u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg)
{ {
unsigned long flags;
u32 r; u32 r;
spin_lock_irqsave(&rdev->pif_idx_lock, flags);
WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
r = RREG32(EVERGREEN_PIF_PHY0_DATA); r = RREG32(EVERGREEN_PIF_PHY0_DATA);
spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
return r; return r;
} }
static inline void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v) static inline void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{ {
unsigned long flags;
spin_lock_irqsave(&rdev->pif_idx_lock, flags);
WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
WREG32(EVERGREEN_PIF_PHY0_DATA, (v)); WREG32(EVERGREEN_PIF_PHY0_DATA, (v));
spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
} }
static inline u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg) static inline u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg)
{ {
unsigned long flags;
u32 r; u32 r;
spin_lock_irqsave(&rdev->pif_idx_lock, flags);
WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
r = RREG32(EVERGREEN_PIF_PHY1_DATA); r = RREG32(EVERGREEN_PIF_PHY1_DATA);
spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
return r; return r;
} }
static inline void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v) static inline void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{ {
unsigned long flags;
spin_lock_irqsave(&rdev->pif_idx_lock, flags);
WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
WREG32(EVERGREEN_PIF_PHY1_DATA, (v)); WREG32(EVERGREEN_PIF_PHY1_DATA, (v));
spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
} }
static inline u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg) static inline u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg)
{ {
unsigned long flags;
u32 r; u32 r;
spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
r = RREG32(R600_UVD_CTX_DATA); r = RREG32(R600_UVD_CTX_DATA);
spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
return r; return r;
} }
static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v) static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{ {
unsigned long flags;
spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
WREG32(R600_UVD_CTX_DATA, (v)); WREG32(R600_UVD_CTX_DATA, (v));
spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
} }
static inline u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg) static inline u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg)
{ {
unsigned long flags;
u32 r; u32 r;
spin_lock_irqsave(&rdev->didt_idx_lock, flags);
WREG32(CIK_DIDT_IND_INDEX, (reg)); WREG32(CIK_DIDT_IND_INDEX, (reg));
r = RREG32(CIK_DIDT_IND_DATA); r = RREG32(CIK_DIDT_IND_DATA);
spin_unlock_irqrestore(&rdev->didt_idx_lock, flags);
return r; return r;
} }
static inline void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v) static inline void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{ {
unsigned long flags;
spin_lock_irqsave(&rdev->didt_idx_lock, flags);
WREG32(CIK_DIDT_IND_INDEX, (reg)); WREG32(CIK_DIDT_IND_INDEX, (reg));
WREG32(CIK_DIDT_IND_DATA, (v)); WREG32(CIK_DIDT_IND_DATA, (v));
spin_unlock_irqrestore(&rdev->didt_idx_lock, flags);
} }
void r100_pll_errata_after_index(struct radeon_device *rdev); void r100_pll_errata_after_index(struct radeon_device *rdev);
@ -2569,6 +2648,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l)) #define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l))
#define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev)) #define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev))
#define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g)) #define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g))
#define radeon_dpm_enable_bapm(rdev, e) rdev->asic->dpm.enable_bapm((rdev), (e))
/* Common functions */ /* Common functions */
/* AGP */ /* AGP */

View File

@ -1037,6 +1037,7 @@ static struct radeon_asic rv6xx_asic = {
.set_pcie_lanes = &r600_set_pcie_lanes, .set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = NULL, .set_clock_gating = NULL,
.get_temperature = &rv6xx_get_temp, .get_temperature = &rv6xx_get_temp,
.set_uvd_clocks = &r600_set_uvd_clocks,
}, },
.dpm = { .dpm = {
.init = &rv6xx_dpm_init, .init = &rv6xx_dpm_init,
@ -1126,6 +1127,7 @@ static struct radeon_asic rs780_asic = {
.set_pcie_lanes = NULL, .set_pcie_lanes = NULL,
.set_clock_gating = NULL, .set_clock_gating = NULL,
.get_temperature = &rv6xx_get_temp, .get_temperature = &rv6xx_get_temp,
.set_uvd_clocks = &r600_set_uvd_clocks,
}, },
.dpm = { .dpm = {
.init = &rs780_dpm_init, .init = &rs780_dpm_init,
@ -1141,6 +1143,7 @@ static struct radeon_asic rs780_asic = {
.get_mclk = &rs780_dpm_get_mclk, .get_mclk = &rs780_dpm_get_mclk,
.print_power_state = &rs780_dpm_print_power_state, .print_power_state = &rs780_dpm_print_power_state,
.debugfs_print_current_performance_level = &rs780_dpm_debugfs_print_current_performance_level, .debugfs_print_current_performance_level = &rs780_dpm_debugfs_print_current_performance_level,
.force_performance_level = &rs780_dpm_force_performance_level,
}, },
.pflip = { .pflip = {
.pre_page_flip = &rs600_pre_page_flip, .pre_page_flip = &rs600_pre_page_flip,
@ -1791,6 +1794,7 @@ static struct radeon_asic trinity_asic = {
.print_power_state = &trinity_dpm_print_power_state, .print_power_state = &trinity_dpm_print_power_state,
.debugfs_print_current_performance_level = &trinity_dpm_debugfs_print_current_performance_level, .debugfs_print_current_performance_level = &trinity_dpm_debugfs_print_current_performance_level,
.force_performance_level = &trinity_dpm_force_performance_level, .force_performance_level = &trinity_dpm_force_performance_level,
.enable_bapm = &trinity_dpm_enable_bapm,
}, },
.pflip = { .pflip = {
.pre_page_flip = &evergreen_pre_page_flip, .pre_page_flip = &evergreen_pre_page_flip,
@ -2166,6 +2170,7 @@ static struct radeon_asic kv_asic = {
.debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level, .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level,
.force_performance_level = &kv_dpm_force_performance_level, .force_performance_level = &kv_dpm_force_performance_level,
.powergate_uvd = &kv_dpm_powergate_uvd, .powergate_uvd = &kv_dpm_powergate_uvd,
.enable_bapm = &kv_dpm_enable_bapm,
}, },
.pflip = { .pflip = {
.pre_page_flip = &evergreen_pre_page_flip, .pre_page_flip = &evergreen_pre_page_flip,
@ -2390,7 +2395,7 @@ int radeon_asic_init(struct radeon_device *rdev)
RADEON_CG_SUPPORT_HDP_LS | RADEON_CG_SUPPORT_HDP_LS |
RADEON_CG_SUPPORT_HDP_MGCG; RADEON_CG_SUPPORT_HDP_MGCG;
rdev->pg_flags = 0 | rdev->pg_flags = 0 |
/*RADEON_PG_SUPPORT_GFX_CG | */ /*RADEON_PG_SUPPORT_GFX_PG | */
RADEON_PG_SUPPORT_SDMA; RADEON_PG_SUPPORT_SDMA;
break; break;
case CHIP_OLAND: case CHIP_OLAND:
@ -2479,7 +2484,7 @@ int radeon_asic_init(struct radeon_device *rdev)
RADEON_CG_SUPPORT_HDP_LS | RADEON_CG_SUPPORT_HDP_LS |
RADEON_CG_SUPPORT_HDP_MGCG; RADEON_CG_SUPPORT_HDP_MGCG;
rdev->pg_flags = 0; rdev->pg_flags = 0;
/*RADEON_PG_SUPPORT_GFX_CG | /*RADEON_PG_SUPPORT_GFX_PG |
RADEON_PG_SUPPORT_GFX_SMG | RADEON_PG_SUPPORT_GFX_SMG |
RADEON_PG_SUPPORT_GFX_DMG | RADEON_PG_SUPPORT_GFX_DMG |
RADEON_PG_SUPPORT_UVD | RADEON_PG_SUPPORT_UVD |
@ -2507,7 +2512,7 @@ int radeon_asic_init(struct radeon_device *rdev)
RADEON_CG_SUPPORT_HDP_LS | RADEON_CG_SUPPORT_HDP_LS |
RADEON_CG_SUPPORT_HDP_MGCG; RADEON_CG_SUPPORT_HDP_MGCG;
rdev->pg_flags = 0; rdev->pg_flags = 0;
/*RADEON_PG_SUPPORT_GFX_CG | /*RADEON_PG_SUPPORT_GFX_PG |
RADEON_PG_SUPPORT_GFX_SMG | RADEON_PG_SUPPORT_GFX_SMG |
RADEON_PG_SUPPORT_UVD | RADEON_PG_SUPPORT_UVD |
RADEON_PG_SUPPORT_VCE | RADEON_PG_SUPPORT_VCE |

View File

@ -389,6 +389,7 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev);
u32 r600_get_xclk(struct radeon_device *rdev); u32 r600_get_xclk(struct radeon_device *rdev);
uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev); uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
int rv6xx_get_temp(struct radeon_device *rdev); int rv6xx_get_temp(struct radeon_device *rdev);
int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
int r600_dpm_pre_set_power_state(struct radeon_device *rdev); int r600_dpm_pre_set_power_state(struct radeon_device *rdev);
void r600_dpm_post_set_power_state(struct radeon_device *rdev); void r600_dpm_post_set_power_state(struct radeon_device *rdev);
/* r600 dma */ /* r600 dma */
@ -428,6 +429,8 @@ void rs780_dpm_print_power_state(struct radeon_device *rdev,
struct radeon_ps *ps); struct radeon_ps *ps);
void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m); struct seq_file *m);
int rs780_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
/* /*
* rv770,rv730,rv710,rv740 * rv770,rv730,rv710,rv740
@ -625,6 +628,7 @@ void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *r
struct seq_file *m); struct seq_file *m);
int trinity_dpm_force_performance_level(struct radeon_device *rdev, int trinity_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level); enum radeon_dpm_forced_level level);
void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable);
/* DCE6 - SI */ /* DCE6 - SI */
void dce6_bandwidth_update(struct radeon_device *rdev); void dce6_bandwidth_update(struct radeon_device *rdev);
@ -781,6 +785,7 @@ void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
int kv_dpm_force_performance_level(struct radeon_device *rdev, int kv_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level); enum radeon_dpm_forced_level level);
void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate); void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable);
/* uvd v1.0 */ /* uvd v1.0 */
uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev, uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev,

View File

@ -396,6 +396,21 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
} }
} }
if (property == rdev->mode_info.audio_property) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
/* need to find digital encoder on connector */
encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
if (!encoder)
return 0;
radeon_encoder = to_radeon_encoder(encoder);
if (radeon_connector->audio != val) {
radeon_connector->audio = val;
radeon_property_change_mode(&radeon_encoder->base);
}
}
if (property == rdev->mode_info.underscan_property) { if (property == rdev->mode_info.underscan_property) {
/* need to find digital encoder on connector */ /* need to find digital encoder on connector */
encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
@ -1420,7 +1435,7 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (radeon_dp_getdpcd(radeon_connector)) if (radeon_dp_getdpcd(radeon_connector))
ret = connector_status_connected; ret = connector_status_connected;
} else { } else {
/* try non-aux ddc (DP to DVI/HMDI/etc. adapter) */ /* try non-aux ddc (DP to DVI/HDMI/etc. adapter) */
if (radeon_ddc_probe(radeon_connector, false)) if (radeon_ddc_probe(radeon_connector, false))
ret = connector_status_connected; ret = connector_status_connected;
} }
@ -1489,6 +1504,24 @@ static const struct drm_connector_funcs radeon_dp_connector_funcs = {
.force = radeon_dvi_force, .force = radeon_dvi_force,
}; };
static const struct drm_connector_funcs radeon_edp_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = radeon_lvds_set_property,
.destroy = radeon_dp_connector_destroy,
.force = radeon_dvi_force,
};
static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = radeon_lvds_set_property,
.destroy = radeon_dp_connector_destroy,
.force = radeon_dvi_force,
};
void void
radeon_add_atom_connector(struct drm_device *dev, radeon_add_atom_connector(struct drm_device *dev,
uint32_t connector_id, uint32_t connector_id,
@ -1580,8 +1613,6 @@ radeon_add_atom_connector(struct drm_device *dev,
goto failed; goto failed;
radeon_dig_connector->igp_lane_info = igp_lane_info; radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector; radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
if (i2c_bus->valid) { if (i2c_bus->valid) {
/* add DP i2c bus */ /* add DP i2c bus */
if (connector_type == DRM_MODE_CONNECTOR_eDP) if (connector_type == DRM_MODE_CONNECTOR_eDP)
@ -1598,6 +1629,10 @@ radeon_add_atom_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_VGA: case DRM_MODE_CONNECTOR_VGA:
case DRM_MODE_CONNECTOR_DVIA: case DRM_MODE_CONNECTOR_DVIA:
default: default:
drm_connector_init(dev, &radeon_connector->base,
&radeon_dp_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base,
&radeon_dp_connector_helper_funcs);
connector->interlace_allowed = true; connector->interlace_allowed = true;
connector->doublescan_allowed = true; connector->doublescan_allowed = true;
radeon_connector->dac_load_detect = true; radeon_connector->dac_load_detect = true;
@ -1610,6 +1645,10 @@ radeon_add_atom_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_HDMIA: case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB: case DRM_MODE_CONNECTOR_HDMIB:
case DRM_MODE_CONNECTOR_DisplayPort: case DRM_MODE_CONNECTOR_DisplayPort:
drm_connector_init(dev, &radeon_connector->base,
&radeon_dp_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base,
&radeon_dp_connector_helper_funcs);
drm_object_attach_property(&radeon_connector->base.base, drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_property, rdev->mode_info.underscan_property,
UNDERSCAN_OFF); UNDERSCAN_OFF);
@ -1619,6 +1658,9 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base, drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property, rdev->mode_info.underscan_vborder_property,
0); 0);
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_DISABLE);
subpixel_order = SubPixelHorizontalRGB; subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true; connector->interlace_allowed = true;
if (connector_type == DRM_MODE_CONNECTOR_HDMIB) if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@ -1634,6 +1676,10 @@ radeon_add_atom_connector(struct drm_device *dev,
break; break;
case DRM_MODE_CONNECTOR_LVDS: case DRM_MODE_CONNECTOR_LVDS:
case DRM_MODE_CONNECTOR_eDP: case DRM_MODE_CONNECTOR_eDP:
drm_connector_init(dev, &radeon_connector->base,
&radeon_lvds_bridge_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base,
&radeon_dp_connector_helper_funcs);
drm_object_attach_property(&radeon_connector->base.base, drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property, dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN); DRM_MODE_SCALE_FULLSCREEN);
@ -1708,6 +1754,11 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.underscan_vborder_property, rdev->mode_info.underscan_vborder_property,
0); 0);
} }
if (ASIC_IS_DCE2(rdev)) {
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_DISABLE);
}
if (connector_type == DRM_MODE_CONNECTOR_DVII) { if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true; radeon_connector->dac_load_detect = true;
drm_object_attach_property(&radeon_connector->base.base, drm_object_attach_property(&radeon_connector->base.base,
@ -1748,6 +1799,11 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.underscan_vborder_property, rdev->mode_info.underscan_vborder_property,
0); 0);
} }
if (ASIC_IS_DCE2(rdev)) {
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_DISABLE);
}
subpixel_order = SubPixelHorizontalRGB; subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true; connector->interlace_allowed = true;
if (connector_type == DRM_MODE_CONNECTOR_HDMIB) if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@ -1787,6 +1843,11 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.underscan_vborder_property, rdev->mode_info.underscan_vborder_property,
0); 0);
} }
if (ASIC_IS_DCE2(rdev)) {
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_DISABLE);
}
connector->interlace_allowed = true; connector->interlace_allowed = true;
/* in theory with a DP to VGA converter... */ /* in theory with a DP to VGA converter... */
connector->doublescan_allowed = false; connector->doublescan_allowed = false;
@ -1797,7 +1858,7 @@ radeon_add_atom_connector(struct drm_device *dev,
goto failed; goto failed;
radeon_dig_connector->igp_lane_info = igp_lane_info; radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector; radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); drm_connector_init(dev, &radeon_connector->base, &radeon_edp_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
if (i2c_bus->valid) { if (i2c_bus->valid) {
/* add DP i2c bus */ /* add DP i2c bus */

View File

@ -28,6 +28,7 @@
#include <drm/radeon_drm.h> #include <drm/radeon_drm.h>
#include "radeon_reg.h" #include "radeon_reg.h"
#include "radeon.h" #include "radeon.h"
#include "radeon_trace.h"
static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
{ {
@ -80,9 +81,11 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
p->relocs[i].lobj.bo = p->relocs[i].robj; p->relocs[i].lobj.bo = p->relocs[i].robj;
p->relocs[i].lobj.written = !!r->write_domain; p->relocs[i].lobj.written = !!r->write_domain;
/* the first reloc of an UVD job is the /* the first reloc of an UVD job is the msg and that must be in
msg and that must be in VRAM */ VRAM, also but everything into VRAM on AGP cards to avoid
if (p->ring == R600_RING_TYPE_UVD_INDEX && i == 0) { image corruptions */
if (p->ring == R600_RING_TYPE_UVD_INDEX &&
(i == 0 || p->rdev->flags & RADEON_IS_AGP)) {
/* TODO: is this still needed for NI+ ? */ /* TODO: is this still needed for NI+ ? */
p->relocs[i].lobj.domain = p->relocs[i].lobj.domain =
RADEON_GEM_DOMAIN_VRAM; RADEON_GEM_DOMAIN_VRAM;
@ -559,6 +562,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return r; return r;
} }
trace_radeon_cs(&parser);
r = radeon_cs_ib_chunk(rdev, &parser); r = radeon_cs_ib_chunk(rdev, &parser);
if (r) { if (r) {
goto out; goto out;

View File

@ -1249,6 +1249,17 @@ int radeon_device_init(struct radeon_device *rdev,
/* Registers mapping */ /* Registers mapping */
/* TODO: block userspace mapping of io register */ /* TODO: block userspace mapping of io register */
spin_lock_init(&rdev->mmio_idx_lock); spin_lock_init(&rdev->mmio_idx_lock);
spin_lock_init(&rdev->smc_idx_lock);
spin_lock_init(&rdev->pll_idx_lock);
spin_lock_init(&rdev->mc_idx_lock);
spin_lock_init(&rdev->pcie_idx_lock);
spin_lock_init(&rdev->pciep_idx_lock);
spin_lock_init(&rdev->pif_idx_lock);
spin_lock_init(&rdev->cg_idx_lock);
spin_lock_init(&rdev->uvd_idx_lock);
spin_lock_init(&rdev->rcu_idx_lock);
spin_lock_init(&rdev->didt_idx_lock);
spin_lock_init(&rdev->end_idx_lock);
if (rdev->family >= CHIP_BONAIRE) { if (rdev->family >= CHIP_BONAIRE) {
rdev->rmmio_base = pci_resource_start(rdev->pdev, 5); rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
rdev->rmmio_size = pci_resource_len(rdev->pdev, 5); rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);

View File

@ -1172,6 +1172,12 @@ static struct drm_prop_enum_list radeon_underscan_enum_list[] =
{ UNDERSCAN_AUTO, "auto" }, { UNDERSCAN_AUTO, "auto" },
}; };
static struct drm_prop_enum_list radeon_audio_enum_list[] =
{ { RADEON_AUDIO_DISABLE, "off" },
{ RADEON_AUDIO_ENABLE, "on" },
{ RADEON_AUDIO_AUTO, "auto" },
};
static int radeon_modeset_create_props(struct radeon_device *rdev) static int radeon_modeset_create_props(struct radeon_device *rdev)
{ {
int sz; int sz;
@ -1222,6 +1228,12 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
if (!rdev->mode_info.underscan_vborder_property) if (!rdev->mode_info.underscan_vborder_property)
return -ENOMEM; return -ENOMEM;
sz = ARRAY_SIZE(radeon_audio_enum_list);
rdev->mode_info.audio_property =
drm_property_create_enum(rdev->ddev, 0,
"audio",
radeon_audio_enum_list, sz);
return 0; return 0;
} }

View File

@ -153,7 +153,7 @@ int radeon_benchmarking = 0;
int radeon_testing = 0; int radeon_testing = 0;
int radeon_connector_table = 0; int radeon_connector_table = 0;
int radeon_tv = 1; int radeon_tv = 1;
int radeon_audio = 0; int radeon_audio = 1;
int radeon_disp_priority = 0; int radeon_disp_priority = 0;
int radeon_hw_i2c = 0; int radeon_hw_i2c = 0;
int radeon_pcie_gen2 = -1; int radeon_pcie_gen2 = -1;

View File

@ -247,6 +247,8 @@ struct radeon_mode_info {
struct drm_property *underscan_property; struct drm_property *underscan_property;
struct drm_property *underscan_hborder_property; struct drm_property *underscan_hborder_property;
struct drm_property *underscan_vborder_property; struct drm_property *underscan_vborder_property;
/* audio */
struct drm_property *audio_property;
/* hardcoded DFP edid from BIOS */ /* hardcoded DFP edid from BIOS */
struct edid *bios_hardcoded_edid; struct edid *bios_hardcoded_edid;
int bios_hardcoded_edid_size; int bios_hardcoded_edid_size;
@ -471,6 +473,12 @@ struct radeon_router {
u8 cd_mux_state; u8 cd_mux_state;
}; };
enum radeon_connector_audio {
RADEON_AUDIO_DISABLE = 0,
RADEON_AUDIO_ENABLE = 1,
RADEON_AUDIO_AUTO = 2
};
struct radeon_connector { struct radeon_connector {
struct drm_connector base; struct drm_connector base;
uint32_t connector_id; uint32_t connector_id;
@ -489,6 +497,7 @@ struct radeon_connector {
struct radeon_hpd hpd; struct radeon_hpd hpd;
struct radeon_router router; struct radeon_router router;
struct radeon_i2c_chan *router_bus; struct radeon_i2c_chan *router_bus;
enum radeon_connector_audio audio;
}; };
struct radeon_framebuffer { struct radeon_framebuffer {

View File

@ -67,7 +67,16 @@ int radeon_pm_get_type_index(struct radeon_device *rdev,
void radeon_pm_acpi_event_handler(struct radeon_device *rdev) void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
{ {
if (rdev->pm.pm_method == PM_METHOD_PROFILE) { if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
mutex_lock(&rdev->pm.mutex);
if (power_supply_is_system_supplied() > 0)
rdev->pm.dpm.ac_power = true;
else
rdev->pm.dpm.ac_power = false;
if (rdev->asic->dpm.enable_bapm)
radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
mutex_unlock(&rdev->pm.mutex);
} else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (rdev->pm.profile == PM_PROFILE_AUTO) { if (rdev->pm.profile == PM_PROFILE_AUTO) {
mutex_lock(&rdev->pm.mutex); mutex_lock(&rdev->pm.mutex);
radeon_pm_update_profile(rdev); radeon_pm_update_profile(rdev);
@ -333,7 +342,7 @@ static ssize_t radeon_get_pm_profile(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
char *buf) char *buf)
{ {
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private; struct radeon_device *rdev = ddev->dev_private;
int cp = rdev->pm.profile; int cp = rdev->pm.profile;
@ -349,7 +358,7 @@ static ssize_t radeon_set_pm_profile(struct device *dev,
const char *buf, const char *buf,
size_t count) size_t count)
{ {
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private; struct radeon_device *rdev = ddev->dev_private;
mutex_lock(&rdev->pm.mutex); mutex_lock(&rdev->pm.mutex);
@ -383,7 +392,7 @@ static ssize_t radeon_get_pm_method(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
char *buf) char *buf)
{ {
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private; struct radeon_device *rdev = ddev->dev_private;
int pm = rdev->pm.pm_method; int pm = rdev->pm.pm_method;
@ -397,7 +406,7 @@ static ssize_t radeon_set_pm_method(struct device *dev,
const char *buf, const char *buf,
size_t count) size_t count)
{ {
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private; struct radeon_device *rdev = ddev->dev_private;
/* we don't support the legacy modes with dpm */ /* we don't support the legacy modes with dpm */
@ -433,7 +442,7 @@ static ssize_t radeon_get_dpm_state(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
char *buf) char *buf)
{ {
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private; struct radeon_device *rdev = ddev->dev_private;
enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
@ -447,7 +456,7 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
const char *buf, const char *buf,
size_t count) size_t count)
{ {
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private; struct radeon_device *rdev = ddev->dev_private;
mutex_lock(&rdev->pm.mutex); mutex_lock(&rdev->pm.mutex);
@ -472,7 +481,7 @@ static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
char *buf) char *buf)
{ {
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private; struct radeon_device *rdev = ddev->dev_private;
enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
@ -486,7 +495,7 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
const char *buf, const char *buf,
size_t count) size_t count)
{ {
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private; struct radeon_device *rdev = ddev->dev_private;
enum radeon_dpm_forced_level level; enum radeon_dpm_forced_level level;
int ret = 0; int ret = 0;
@ -524,7 +533,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
char *buf) char *buf)
{ {
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private; struct radeon_device *rdev = ddev->dev_private;
int temp; int temp;
@ -536,6 +545,23 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", temp); return snprintf(buf, PAGE_SIZE, "%d\n", temp);
} }
static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
int hyst = to_sensor_dev_attr(attr)->index;
int temp;
if (hyst)
temp = rdev->pm.dpm.thermal.min_temp;
else
temp = rdev->pm.dpm.thermal.max_temp;
return snprintf(buf, PAGE_SIZE, "%d\n", temp);
}
static ssize_t radeon_hwmon_show_name(struct device *dev, static ssize_t radeon_hwmon_show_name(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
char *buf) char *buf)
@ -544,16 +570,37 @@ static ssize_t radeon_hwmon_show_name(struct device *dev,
} }
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0); static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1);
static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0); static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0);
static struct attribute *hwmon_attributes[] = { static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_crit.dev_attr.attr,
&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
&sensor_dev_attr_name.dev_attr.attr, &sensor_dev_attr_name.dev_attr.attr,
NULL NULL
}; };
static umode_t hwmon_attributes_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
/* Skip limit attributes if DPM is not enabled */
if (rdev->pm.pm_method != PM_METHOD_DPM &&
(attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
return 0;
return attr->mode;
}
static const struct attribute_group hwmon_attrgroup = { static const struct attribute_group hwmon_attrgroup = {
.attrs = hwmon_attributes, .attrs = hwmon_attributes,
.is_visible = hwmon_attributes_visible,
}; };
static int radeon_hwmon_init(struct radeon_device *rdev) static int radeon_hwmon_init(struct radeon_device *rdev)
@ -870,10 +917,13 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
radeon_dpm_post_set_power_state(rdev); radeon_dpm_post_set_power_state(rdev);
/* force low perf level for thermal */ if (rdev->asic->dpm.force_performance_level) {
if (rdev->pm.dpm.thermal_active && if (rdev->pm.dpm.thermal_active)
rdev->asic->dpm.force_performance_level) { /* force low perf level for thermal */
radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW); radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
else
/* otherwise, enable auto */
radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
} }
done: done:
@ -1102,9 +1152,10 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
{ {
int ret; int ret;
/* default to performance state */ /* default to balanced state */
rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
rdev->pm.default_sclk = rdev->clock.default_sclk; rdev->pm.default_sclk = rdev->clock.default_sclk;
rdev->pm.default_mclk = rdev->clock.default_mclk; rdev->pm.default_mclk = rdev->clock.default_mclk;
rdev->pm.current_sclk = rdev->clock.default_sclk; rdev->pm.current_sclk = rdev->clock.default_sclk;

View File

@ -27,6 +27,26 @@ TRACE_EVENT(radeon_bo_create,
TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages) TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
); );
TRACE_EVENT(radeon_cs,
TP_PROTO(struct radeon_cs_parser *p),
TP_ARGS(p),
TP_STRUCT__entry(
__field(u32, ring)
__field(u32, dw)
__field(u32, fences)
),
TP_fast_assign(
__entry->ring = p->ring;
__entry->dw = p->chunks[p->chunk_ib_idx].length_dw;
__entry->fences = radeon_fence_count_emitted(
p->rdev, p->ring);
),
TP_printk("ring=%u, dw=%u, fences=%u",
__entry->ring, __entry->dw,
__entry->fences)
);
DECLARE_EVENT_CLASS(radeon_fence_request, DECLARE_EVENT_CLASS(radeon_fence_request,
TP_PROTO(struct drm_device *dev, u32 seqno), TP_PROTO(struct drm_device *dev, u32 seqno),
@ -53,13 +73,6 @@ DEFINE_EVENT(radeon_fence_request, radeon_fence_emit,
TP_ARGS(dev, seqno) TP_ARGS(dev, seqno)
); );
DEFINE_EVENT(radeon_fence_request, radeon_fence_retire,
TP_PROTO(struct drm_device *dev, u32 seqno),
TP_ARGS(dev, seqno)
);
DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin, DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin,
TP_PROTO(struct drm_device *dev, u32 seqno), TP_PROTO(struct drm_device *dev, u32 seqno),

View File

@ -274,19 +274,26 @@ static void rs400_mc_init(struct radeon_device *rdev)
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{ {
unsigned long flags;
uint32_t r; uint32_t r;
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(RS480_NB_MC_INDEX, reg & 0xff); WREG32(RS480_NB_MC_INDEX, reg & 0xff);
r = RREG32(RS480_NB_MC_DATA); r = RREG32(RS480_NB_MC_DATA);
WREG32(RS480_NB_MC_INDEX, 0xff); WREG32(RS480_NB_MC_INDEX, 0xff);
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
return r; return r;
} }
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{ {
unsigned long flags;
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN); WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN);
WREG32(RS480_NB_MC_DATA, (v)); WREG32(RS480_NB_MC_DATA, (v));
WREG32(RS480_NB_MC_INDEX, 0xff); WREG32(RS480_NB_MC_INDEX, 0xff);
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
} }
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)

View File

@ -847,16 +847,26 @@ void rs600_bandwidth_update(struct radeon_device *rdev)
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{ {
unsigned long flags;
u32 r;
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
S_000070_MC_IND_CITF_ARB0(1)); S_000070_MC_IND_CITF_ARB0(1));
return RREG32(R_000074_MC_IND_DATA); r = RREG32(R_000074_MC_IND_DATA);
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
return r;
} }
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{ {
unsigned long flags;
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1)); S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1));
WREG32(R_000074_MC_IND_DATA, v); WREG32(R_000074_MC_IND_DATA, v);
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
} }
static void rs600_debugfs(struct radeon_device *rdev) static void rs600_debugfs(struct radeon_device *rdev)

View File

@ -631,20 +631,27 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg) uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{ {
unsigned long flags;
uint32_t r; uint32_t r;
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg)); WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg));
r = RREG32(R_00007C_MC_DATA); r = RREG32(R_00007C_MC_DATA);
WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR); WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR);
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
return r; return r;
} }
void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{ {
unsigned long flags;
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) | WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) |
S_000078_MC_IND_WR_EN(1)); S_000078_MC_IND_WR_EN(1));
WREG32(R_00007C_MC_DATA, v); WREG32(R_00007C_MC_DATA, v);
WREG32(R_000078_MC_INDEX, 0x7F); WREG32(R_000078_MC_INDEX, 0x7F);
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
} }
static void rs690_mc_program(struct radeon_device *rdev) static void rs690_mc_program(struct radeon_device *rdev)

View File

@ -62,9 +62,7 @@ static void rs780_get_pm_mode_parameters(struct radeon_device *rdev)
radeon_crtc = to_radeon_crtc(crtc); radeon_crtc = to_radeon_crtc(crtc);
pi->crtc_id = radeon_crtc->crtc_id; pi->crtc_id = radeon_crtc->crtc_id;
if (crtc->mode.htotal && crtc->mode.vtotal) if (crtc->mode.htotal && crtc->mode.vtotal)
pi->refresh_rate = pi->refresh_rate = drm_mode_vrefresh(&crtc->mode);
(crtc->mode.clock * 1000) /
(crtc->mode.htotal * crtc->mode.vtotal);
break; break;
} }
} }
@ -376,9 +374,8 @@ static void rs780_disable_vbios_powersaving(struct radeon_device *rdev)
WREG32_P(CG_INTGFX_MISC, 0, ~0xFFF00000); WREG32_P(CG_INTGFX_MISC, 0, ~0xFFF00000);
} }
static void rs780_force_voltage_to_high(struct radeon_device *rdev) static void rs780_force_voltage(struct radeon_device *rdev, u16 voltage)
{ {
struct igp_power_info *pi = rs780_get_pi(rdev);
struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps); struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps);
if ((current_state->max_voltage == RS780_VDDC_LEVEL_HIGH) && if ((current_state->max_voltage == RS780_VDDC_LEVEL_HIGH) &&
@ -390,7 +387,7 @@ static void rs780_force_voltage_to_high(struct radeon_device *rdev)
udelay(1); udelay(1);
WREG32_P(FVTHROT_PWM_CTRL_REG0, WREG32_P(FVTHROT_PWM_CTRL_REG0,
STARTING_PWM_HIGHTIME(pi->max_voltage), STARTING_PWM_HIGHTIME(voltage),
~STARTING_PWM_HIGHTIME_MASK); ~STARTING_PWM_HIGHTIME_MASK);
WREG32_P(FVTHROT_PWM_CTRL_REG0, WREG32_P(FVTHROT_PWM_CTRL_REG0,
@ -404,6 +401,26 @@ static void rs780_force_voltage_to_high(struct radeon_device *rdev)
WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL); WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL);
} }
static void rs780_force_fbdiv(struct radeon_device *rdev, u32 fb_div)
{
struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps);
if (current_state->sclk_low == current_state->sclk_high)
return;
WREG32_P(GFX_MACRO_BYPASS_CNTL, SPLL_BYPASS_CNTL, ~SPLL_BYPASS_CNTL);
WREG32_P(FVTHROT_FBDIV_REG2, FORCED_FEEDBACK_DIV(fb_div),
~FORCED_FEEDBACK_DIV_MASK);
WREG32_P(FVTHROT_FBDIV_REG1, STARTING_FEEDBACK_DIV(fb_div),
~STARTING_FEEDBACK_DIV_MASK);
WREG32_P(FVTHROT_FBDIV_REG1, FORCE_FEEDBACK_DIV, ~FORCE_FEEDBACK_DIV);
udelay(100);
WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL);
}
static int rs780_set_engine_clock_scaling(struct radeon_device *rdev, static int rs780_set_engine_clock_scaling(struct radeon_device *rdev,
struct radeon_ps *new_ps, struct radeon_ps *new_ps,
struct radeon_ps *old_ps) struct radeon_ps *old_ps)
@ -432,17 +449,13 @@ static int rs780_set_engine_clock_scaling(struct radeon_device *rdev,
if (ret) if (ret)
return ret; return ret;
WREG32_P(GFX_MACRO_BYPASS_CNTL, SPLL_BYPASS_CNTL, ~SPLL_BYPASS_CNTL); if ((min_dividers.ref_div != max_dividers.ref_div) ||
(min_dividers.post_div != max_dividers.post_div) ||
(max_dividers.ref_div != current_max_dividers.ref_div) ||
(max_dividers.post_div != current_max_dividers.post_div))
return -EINVAL;
WREG32_P(FVTHROT_FBDIV_REG2, FORCED_FEEDBACK_DIV(max_dividers.fb_div), rs780_force_fbdiv(rdev, max_dividers.fb_div);
~FORCED_FEEDBACK_DIV_MASK);
WREG32_P(FVTHROT_FBDIV_REG1, STARTING_FEEDBACK_DIV(max_dividers.fb_div),
~STARTING_FEEDBACK_DIV_MASK);
WREG32_P(FVTHROT_FBDIV_REG1, FORCE_FEEDBACK_DIV, ~FORCE_FEEDBACK_DIV);
udelay(100);
WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL);
if (max_dividers.fb_div > min_dividers.fb_div) { if (max_dividers.fb_div > min_dividers.fb_div) {
WREG32_P(FVTHROT_FBDIV_REG0, WREG32_P(FVTHROT_FBDIV_REG0,
@ -486,6 +499,9 @@ static void rs780_activate_engine_clk_scaling(struct radeon_device *rdev,
(new_state->sclk_low == old_state->sclk_low)) (new_state->sclk_low == old_state->sclk_low))
return; return;
if (new_state->sclk_high == new_state->sclk_low)
return;
rs780_clk_scaling_enable(rdev, true); rs780_clk_scaling_enable(rdev, true);
} }
@ -649,7 +665,7 @@ int rs780_dpm_set_power_state(struct radeon_device *rdev)
rs780_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); rs780_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
if (pi->voltage_control) { if (pi->voltage_control) {
rs780_force_voltage_to_high(rdev); rs780_force_voltage(rdev, pi->max_voltage);
mdelay(5); mdelay(5);
} }
@ -717,14 +733,18 @@ static void rs780_parse_pplib_non_clock_info(struct radeon_device *rdev,
if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
} else if (r600_is_uvd_state(rps->class, rps->class2)) {
rps->vclk = RS780_DEFAULT_VCLK_FREQ;
rps->dclk = RS780_DEFAULT_DCLK_FREQ;
} else { } else {
rps->vclk = 0; rps->vclk = 0;
rps->dclk = 0; rps->dclk = 0;
} }
if (r600_is_uvd_state(rps->class, rps->class2)) {
if ((rps->vclk == 0) || (rps->dclk == 0)) {
rps->vclk = RS780_DEFAULT_VCLK_FREQ;
rps->dclk = RS780_DEFAULT_DCLK_FREQ;
}
}
if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
rdev->pm.dpm.boot_ps = rps; rdev->pm.dpm.boot_ps = rps;
if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
@ -986,3 +1006,55 @@ void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rde
seq_printf(m, "power level 1 sclk: %u vddc_index: %d\n", seq_printf(m, "power level 1 sclk: %u vddc_index: %d\n",
ps->sclk_high, ps->max_voltage); ps->sclk_high, ps->max_voltage);
} }
int rs780_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level)
{
struct igp_power_info *pi = rs780_get_pi(rdev);
struct radeon_ps *rps = rdev->pm.dpm.current_ps;
struct igp_ps *ps = rs780_get_ps(rps);
struct atom_clock_dividers dividers;
int ret;
rs780_clk_scaling_enable(rdev, false);
rs780_voltage_scaling_enable(rdev, false);
if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
if (pi->voltage_control)
rs780_force_voltage(rdev, pi->max_voltage);
ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
ps->sclk_high, false, &dividers);
if (ret)
return ret;
rs780_force_fbdiv(rdev, dividers.fb_div);
} else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
ps->sclk_low, false, &dividers);
if (ret)
return ret;
rs780_force_fbdiv(rdev, dividers.fb_div);
if (pi->voltage_control)
rs780_force_voltage(rdev, pi->min_voltage);
} else {
if (pi->voltage_control)
rs780_force_voltage(rdev, pi->max_voltage);
if (ps->sclk_high != ps->sclk_low) {
WREG32_P(FVTHROT_FBDIV_REG1, 0, ~FORCE_FEEDBACK_DIV);
rs780_clk_scaling_enable(rdev, true);
}
if (pi->voltage_control) {
rs780_voltage_scaling_enable(rdev, true);
rs780_enable_voltage_scaling(rdev, rps);
}
}
rdev->pm.dpm.forced_level = level;
return 0;
}

View File

@ -209,19 +209,27 @@ static void rv515_mc_init(struct radeon_device *rdev)
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg) uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{ {
unsigned long flags;
uint32_t r; uint32_t r;
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff)); WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff));
r = RREG32(MC_IND_DATA); r = RREG32(MC_IND_DATA);
WREG32(MC_IND_INDEX, 0); WREG32(MC_IND_INDEX, 0);
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
return r; return r;
} }
void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{ {
unsigned long flags;
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff)); WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff));
WREG32(MC_IND_DATA, (v)); WREG32(MC_IND_DATA, (v));
WREG32(MC_IND_INDEX, 0); WREG32(MC_IND_INDEX, 0);
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
} }
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)

View File

@ -1758,8 +1758,6 @@ int rv6xx_dpm_set_power_state(struct radeon_device *rdev)
rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
return 0; return 0;
} }

View File

@ -2064,12 +2064,6 @@ int rv770_dpm_set_power_state(struct radeon_device *rdev)
rv770_program_dcodt_after_state_switch(rdev, new_ps, old_ps); rv770_program_dcodt_after_state_switch(rdev, new_ps, old_ps);
rv770_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); rv770_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
ret = rv770_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
if (ret) {
DRM_ERROR("rv770_dpm_force_performance_level failed\n");
return ret;
}
return 0; return 0;
} }
@ -2147,14 +2141,18 @@ static void rv7xx_parse_pplib_non_clock_info(struct radeon_device *rdev,
if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
} else if (r600_is_uvd_state(rps->class, rps->class2)) {
rps->vclk = RV770_DEFAULT_VCLK_FREQ;
rps->dclk = RV770_DEFAULT_DCLK_FREQ;
} else { } else {
rps->vclk = 0; rps->vclk = 0;
rps->dclk = 0; rps->dclk = 0;
} }
if (r600_is_uvd_state(rps->class, rps->class2)) {
if ((rps->vclk == 0) || (rps->dclk == 0)) {
rps->vclk = RV770_DEFAULT_VCLK_FREQ;
rps->dclk = RV770_DEFAULT_DCLK_FREQ;
}
}
if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
rdev->pm.dpm.boot_ps = rps; rdev->pm.dpm.boot_ps = rps;
if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)

View File

@ -274,8 +274,8 @@ static const u8 cayman_smc_int_vectors[] =
0x08, 0x72, 0x08, 0x72 0x08, 0x72, 0x08, 0x72
}; };
int rv770_set_smc_sram_address(struct radeon_device *rdev, static int rv770_set_smc_sram_address(struct radeon_device *rdev,
u16 smc_address, u16 limit) u16 smc_address, u16 limit)
{ {
u32 addr; u32 addr;
@ -296,9 +296,10 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
u16 smc_start_address, const u8 *src, u16 smc_start_address, const u8 *src,
u16 byte_count, u16 limit) u16 byte_count, u16 limit)
{ {
unsigned long flags;
u32 data, original_data, extra_shift; u32 data, original_data, extra_shift;
u16 addr; u16 addr;
int ret; int ret = 0;
if (smc_start_address & 3) if (smc_start_address & 3)
return -EINVAL; return -EINVAL;
@ -307,13 +308,14 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
addr = smc_start_address; addr = smc_start_address;
spin_lock_irqsave(&rdev->smc_idx_lock, flags);
while (byte_count >= 4) { while (byte_count >= 4) {
/* SMC address space is BE */ /* SMC address space is BE */
data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
ret = rv770_set_smc_sram_address(rdev, addr, limit); ret = rv770_set_smc_sram_address(rdev, addr, limit);
if (ret) if (ret)
return ret; goto done;
WREG32(SMC_SRAM_DATA, data); WREG32(SMC_SRAM_DATA, data);
@ -328,7 +330,7 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
ret = rv770_set_smc_sram_address(rdev, addr, limit); ret = rv770_set_smc_sram_address(rdev, addr, limit);
if (ret) if (ret)
return ret; goto done;
original_data = RREG32(SMC_SRAM_DATA); original_data = RREG32(SMC_SRAM_DATA);
@ -346,12 +348,15 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
ret = rv770_set_smc_sram_address(rdev, addr, limit); ret = rv770_set_smc_sram_address(rdev, addr, limit);
if (ret) if (ret)
return ret; goto done;
WREG32(SMC_SRAM_DATA, data); WREG32(SMC_SRAM_DATA, data);
} }
return 0; done:
spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
return ret;
} }
static int rv770_program_interrupt_vectors(struct radeon_device *rdev, static int rv770_program_interrupt_vectors(struct radeon_device *rdev,
@ -461,12 +466,15 @@ PPSMC_Result rv770_wait_for_smc_inactive(struct radeon_device *rdev)
static void rv770_clear_smc_sram(struct radeon_device *rdev, u16 limit) static void rv770_clear_smc_sram(struct radeon_device *rdev, u16 limit)
{ {
unsigned long flags;
u16 i; u16 i;
spin_lock_irqsave(&rdev->smc_idx_lock, flags);
for (i = 0; i < limit; i += 4) { for (i = 0; i < limit; i += 4) {
rv770_set_smc_sram_address(rdev, i, limit); rv770_set_smc_sram_address(rdev, i, limit);
WREG32(SMC_SRAM_DATA, 0); WREG32(SMC_SRAM_DATA, 0);
} }
spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
} }
int rv770_load_smc_ucode(struct radeon_device *rdev, int rv770_load_smc_ucode(struct radeon_device *rdev,
@ -595,27 +603,29 @@ int rv770_load_smc_ucode(struct radeon_device *rdev,
int rv770_read_smc_sram_dword(struct radeon_device *rdev, int rv770_read_smc_sram_dword(struct radeon_device *rdev,
u16 smc_address, u32 *value, u16 limit) u16 smc_address, u32 *value, u16 limit)
{ {
unsigned long flags;
int ret; int ret;
spin_lock_irqsave(&rdev->smc_idx_lock, flags);
ret = rv770_set_smc_sram_address(rdev, smc_address, limit); ret = rv770_set_smc_sram_address(rdev, smc_address, limit);
if (ret) if (ret == 0)
return ret; *value = RREG32(SMC_SRAM_DATA);
spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
*value = RREG32(SMC_SRAM_DATA); return ret;
return 0;
} }
int rv770_write_smc_sram_dword(struct radeon_device *rdev, int rv770_write_smc_sram_dword(struct radeon_device *rdev,
u16 smc_address, u32 value, u16 limit) u16 smc_address, u32 value, u16 limit)
{ {
unsigned long flags;
int ret; int ret;
spin_lock_irqsave(&rdev->smc_idx_lock, flags);
ret = rv770_set_smc_sram_address(rdev, smc_address, limit); ret = rv770_set_smc_sram_address(rdev, smc_address, limit);
if (ret) if (ret == 0)
return ret; WREG32(SMC_SRAM_DATA, value);
spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
WREG32(SMC_SRAM_DATA, value); return ret;
return 0;
} }

View File

@ -187,8 +187,6 @@ typedef struct RV770_SMC_STATETABLE RV770_SMC_STATETABLE;
#define RV770_SMC_SOFT_REGISTER_uvd_enabled 0x9C #define RV770_SMC_SOFT_REGISTER_uvd_enabled 0x9C
#define RV770_SMC_SOFT_REGISTER_is_asic_lombok 0xA0 #define RV770_SMC_SOFT_REGISTER_is_asic_lombok 0xA0
int rv770_set_smc_sram_address(struct radeon_device *rdev,
u16 smc_address, u16 limit);
int rv770_copy_bytes_to_smc(struct radeon_device *rdev, int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
u16 smc_start_address, const u8 *src, u16 smc_start_address, const u8 *src,
u16 byte_count, u16 limit); u16 byte_count, u16 limit);

View File

@ -852,7 +852,7 @@
#define AFMT_VBI_PACKET_CONTROL 0x7608 #define AFMT_VBI_PACKET_CONTROL 0x7608
# define AFMT_GENERIC0_UPDATE (1 << 2) # define AFMT_GENERIC0_UPDATE (1 << 2)
#define AFMT_INFOFRAME_CONTROL0 0x760c #define AFMT_INFOFRAME_CONTROL0 0x760c
# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */ # define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hdmi regs */
# define AFMT_AUDIO_INFO_UPDATE (1 << 7) # define AFMT_AUDIO_INFO_UPDATE (1 << 7)
# define AFMT_MPEG_INFO_UPDATE (1 << 10) # define AFMT_MPEG_INFO_UPDATE (1 << 10)
#define AFMT_GENERIC0_7 0x7610 #define AFMT_GENERIC0_7 0x7610

View File

@ -83,6 +83,8 @@ extern void si_dma_vm_set_page(struct radeon_device *rdev,
uint64_t pe, uint64_t pe,
uint64_t addr, unsigned count, uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags); uint32_t incr, uint32_t flags);
static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
bool enable);
static const u32 verde_rlc_save_restore_register_list[] = static const u32 verde_rlc_save_restore_register_list[] =
{ {
@ -3386,6 +3388,8 @@ static int si_cp_resume(struct radeon_device *rdev)
u32 rb_bufsz; u32 rb_bufsz;
int r; int r;
si_enable_gui_idle_interrupt(rdev, false);
WREG32(CP_SEM_WAIT_TIMER, 0x0); WREG32(CP_SEM_WAIT_TIMER, 0x0);
WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
@ -3501,6 +3505,8 @@ static int si_cp_resume(struct radeon_device *rdev)
rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
} }
si_enable_gui_idle_interrupt(rdev, true);
return 0; return 0;
} }
@ -4888,7 +4894,7 @@ static void si_enable_gfx_cgpg(struct radeon_device *rdev,
{ {
u32 tmp; u32 tmp;
if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) { if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10); tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10);
WREG32(RLC_TTOP_D, tmp); WREG32(RLC_TTOP_D, tmp);
@ -5250,6 +5256,7 @@ void si_update_cg(struct radeon_device *rdev,
u32 block, bool enable) u32 block, bool enable)
{ {
if (block & RADEON_CG_BLOCK_GFX) { if (block & RADEON_CG_BLOCK_GFX) {
si_enable_gui_idle_interrupt(rdev, false);
/* order matters! */ /* order matters! */
if (enable) { if (enable) {
si_enable_mgcg(rdev, true); si_enable_mgcg(rdev, true);
@ -5258,6 +5265,7 @@ void si_update_cg(struct radeon_device *rdev,
si_enable_cgcg(rdev, false); si_enable_cgcg(rdev, false);
si_enable_mgcg(rdev, false); si_enable_mgcg(rdev, false);
} }
si_enable_gui_idle_interrupt(rdev, true);
} }
if (block & RADEON_CG_BLOCK_MC) { if (block & RADEON_CG_BLOCK_MC) {
@ -5408,7 +5416,7 @@ static void si_init_pg(struct radeon_device *rdev)
si_init_dma_pg(rdev); si_init_dma_pg(rdev);
} }
si_init_ao_cu_mask(rdev); si_init_ao_cu_mask(rdev);
if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) { if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
si_init_gfx_cgpg(rdev); si_init_gfx_cgpg(rdev);
} }
si_enable_dma_pg(rdev, true); si_enable_dma_pg(rdev, true);
@ -5560,7 +5568,9 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
{ {
u32 tmp; u32 tmp;
WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); tmp = RREG32(CP_INT_CNTL_RING0) &
(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
WREG32(CP_INT_CNTL_RING0, tmp);
WREG32(CP_INT_CNTL_RING1, 0); WREG32(CP_INT_CNTL_RING1, 0);
WREG32(CP_INT_CNTL_RING2, 0); WREG32(CP_INT_CNTL_RING2, 0);
tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
@ -5685,7 +5695,7 @@ static int si_irq_init(struct radeon_device *rdev)
int si_irq_set(struct radeon_device *rdev) int si_irq_set(struct radeon_device *rdev)
{ {
u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; u32 cp_int_cntl;
u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0; u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0; u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0;
@ -5706,6 +5716,9 @@ int si_irq_set(struct radeon_device *rdev)
return 0; return 0;
} }
cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
if (!ASIC_IS_NODCE(rdev)) { if (!ASIC_IS_NODCE(rdev)) {
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;

View File

@ -6075,12 +6075,6 @@ int si_dpm_set_power_state(struct radeon_device *rdev)
return ret; return ret;
} }
ret = si_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
if (ret) {
DRM_ERROR("si_dpm_force_performance_level failed\n");
return ret;
}
si_update_cg(rdev, (RADEON_CG_BLOCK_GFX | si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
RADEON_CG_BLOCK_MC | RADEON_CG_BLOCK_MC |
RADEON_CG_BLOCK_SDMA | RADEON_CG_BLOCK_SDMA |

View File

@ -29,8 +29,8 @@
#include "ppsmc.h" #include "ppsmc.h"
#include "radeon_ucode.h" #include "radeon_ucode.h"
int si_set_smc_sram_address(struct radeon_device *rdev, static int si_set_smc_sram_address(struct radeon_device *rdev,
u32 smc_address, u32 limit) u32 smc_address, u32 limit)
{ {
if (smc_address & 3) if (smc_address & 3)
return -EINVAL; return -EINVAL;
@ -47,7 +47,8 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev,
u32 smc_start_address, u32 smc_start_address,
const u8 *src, u32 byte_count, u32 limit) const u8 *src, u32 byte_count, u32 limit)
{ {
int ret; unsigned long flags;
int ret = 0;
u32 data, original_data, addr, extra_shift; u32 data, original_data, addr, extra_shift;
if (smc_start_address & 3) if (smc_start_address & 3)
@ -57,13 +58,14 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev,
addr = smc_start_address; addr = smc_start_address;
spin_lock_irqsave(&rdev->smc_idx_lock, flags);
while (byte_count >= 4) { while (byte_count >= 4) {
/* SMC address space is BE */ /* SMC address space is BE */
data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
ret = si_set_smc_sram_address(rdev, addr, limit); ret = si_set_smc_sram_address(rdev, addr, limit);
if (ret) if (ret)
return ret; goto done;
WREG32(SMC_IND_DATA_0, data); WREG32(SMC_IND_DATA_0, data);
@ -78,7 +80,7 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev,
ret = si_set_smc_sram_address(rdev, addr, limit); ret = si_set_smc_sram_address(rdev, addr, limit);
if (ret) if (ret)
return ret; goto done;
original_data = RREG32(SMC_IND_DATA_0); original_data = RREG32(SMC_IND_DATA_0);
@ -96,11 +98,15 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev,
ret = si_set_smc_sram_address(rdev, addr, limit); ret = si_set_smc_sram_address(rdev, addr, limit);
if (ret) if (ret)
return ret; goto done;
WREG32(SMC_IND_DATA_0, data); WREG32(SMC_IND_DATA_0, data);
} }
return 0;
done:
spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
return ret;
} }
void si_start_smc(struct radeon_device *rdev) void si_start_smc(struct radeon_device *rdev)
@ -203,6 +209,7 @@ PPSMC_Result si_wait_for_smc_inactive(struct radeon_device *rdev)
int si_load_smc_ucode(struct radeon_device *rdev, u32 limit) int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
{ {
unsigned long flags;
u32 ucode_start_address; u32 ucode_start_address;
u32 ucode_size; u32 ucode_size;
const u8 *src; const u8 *src;
@ -241,6 +248,7 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
return -EINVAL; return -EINVAL;
src = (const u8 *)rdev->smc_fw->data; src = (const u8 *)rdev->smc_fw->data;
spin_lock_irqsave(&rdev->smc_idx_lock, flags);
WREG32(SMC_IND_INDEX_0, ucode_start_address); WREG32(SMC_IND_INDEX_0, ucode_start_address);
WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0); WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
while (ucode_size >= 4) { while (ucode_size >= 4) {
@ -253,6 +261,7 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
ucode_size -= 4; ucode_size -= 4;
} }
WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
return 0; return 0;
} }
@ -260,25 +269,29 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
int si_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, int si_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
u32 *value, u32 limit) u32 *value, u32 limit)
{ {
unsigned long flags;
int ret; int ret;
spin_lock_irqsave(&rdev->smc_idx_lock, flags);
ret = si_set_smc_sram_address(rdev, smc_address, limit); ret = si_set_smc_sram_address(rdev, smc_address, limit);
if (ret) if (ret == 0)
return ret; *value = RREG32(SMC_IND_DATA_0);
spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
*value = RREG32(SMC_IND_DATA_0); return ret;
return 0;
} }
int si_write_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, int si_write_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
u32 value, u32 limit) u32 value, u32 limit)
{ {
unsigned long flags;
int ret; int ret;
spin_lock_irqsave(&rdev->smc_idx_lock, flags);
ret = si_set_smc_sram_address(rdev, smc_address, limit); ret = si_set_smc_sram_address(rdev, smc_address, limit);
if (ret) if (ret == 0)
return ret; WREG32(SMC_IND_DATA_0, value);
spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
WREG32(SMC_IND_DATA_0, value); return ret;
return 0;
} }

View File

@ -1319,8 +1319,6 @@ int sumo_dpm_set_power_state(struct radeon_device *rdev)
if (pi->enable_dpm) if (pi->enable_dpm)
sumo_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); sumo_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
return 0; return 0;
} }

View File

@ -1068,6 +1068,17 @@ static void trinity_update_requested_ps(struct radeon_device *rdev,
pi->requested_rps.ps_priv = &pi->requested_ps; pi->requested_rps.ps_priv = &pi->requested_ps;
} }
void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable)
{
struct trinity_power_info *pi = trinity_get_pi(rdev);
if (pi->enable_bapm) {
trinity_acquire_mutex(rdev);
trinity_dpm_bapm_enable(rdev, enable);
trinity_release_mutex(rdev);
}
}
int trinity_dpm_enable(struct radeon_device *rdev) int trinity_dpm_enable(struct radeon_device *rdev)
{ {
struct trinity_power_info *pi = trinity_get_pi(rdev); struct trinity_power_info *pi = trinity_get_pi(rdev);
@ -1091,6 +1102,7 @@ int trinity_dpm_enable(struct radeon_device *rdev)
trinity_program_sclk_dpm(rdev); trinity_program_sclk_dpm(rdev);
trinity_start_dpm(rdev); trinity_start_dpm(rdev);
trinity_wait_for_dpm_enabled(rdev); trinity_wait_for_dpm_enabled(rdev);
trinity_dpm_bapm_enable(rdev, false);
trinity_release_mutex(rdev); trinity_release_mutex(rdev);
if (rdev->irq.installed && if (rdev->irq.installed &&
@ -1116,6 +1128,7 @@ void trinity_dpm_disable(struct radeon_device *rdev)
trinity_release_mutex(rdev); trinity_release_mutex(rdev);
return; return;
} }
trinity_dpm_bapm_enable(rdev, false);
trinity_disable_clock_power_gating(rdev); trinity_disable_clock_power_gating(rdev);
sumo_clear_vc(rdev); sumo_clear_vc(rdev);
trinity_wait_for_level_0(rdev); trinity_wait_for_level_0(rdev);
@ -1212,6 +1225,8 @@ int trinity_dpm_set_power_state(struct radeon_device *rdev)
trinity_acquire_mutex(rdev); trinity_acquire_mutex(rdev);
if (pi->enable_dpm) { if (pi->enable_dpm) {
if (pi->enable_bapm)
trinity_dpm_bapm_enable(rdev, rdev->pm.dpm.ac_power);
trinity_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); trinity_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
trinity_enable_power_level_0(rdev); trinity_enable_power_level_0(rdev);
trinity_force_level_0(rdev); trinity_force_level_0(rdev);
@ -1221,7 +1236,6 @@ int trinity_dpm_set_power_state(struct radeon_device *rdev)
trinity_force_level_0(rdev); trinity_force_level_0(rdev);
trinity_unforce_levels(rdev); trinity_unforce_levels(rdev);
trinity_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); trinity_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
} }
trinity_release_mutex(rdev); trinity_release_mutex(rdev);
@ -1854,6 +1868,7 @@ int trinity_dpm_init(struct radeon_device *rdev)
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
pi->at[i] = TRINITY_AT_DFLT; pi->at[i] = TRINITY_AT_DFLT;
pi->enable_bapm = true;
pi->enable_nbps_policy = true; pi->enable_nbps_policy = true;
pi->enable_sclk_ds = true; pi->enable_sclk_ds = true;
pi->enable_gfx_power_gating = true; pi->enable_gfx_power_gating = true;

View File

@ -108,6 +108,7 @@ struct trinity_power_info {
bool enable_auto_thermal_throttling; bool enable_auto_thermal_throttling;
bool enable_dpm; bool enable_dpm;
bool enable_sclk_ds; bool enable_sclk_ds;
bool enable_bapm;
bool uvd_dpm; bool uvd_dpm;
struct radeon_ps current_rps; struct radeon_ps current_rps;
struct trinity_ps current_ps; struct trinity_ps current_ps;
@ -118,6 +119,7 @@ struct trinity_power_info {
#define TRINITY_AT_DFLT 30 #define TRINITY_AT_DFLT 30
/* trinity_smc.c */ /* trinity_smc.c */
int trinity_dpm_bapm_enable(struct radeon_device *rdev, bool enable);
int trinity_dpm_config(struct radeon_device *rdev, bool enable); int trinity_dpm_config(struct radeon_device *rdev, bool enable);
int trinity_uvd_dpm_config(struct radeon_device *rdev); int trinity_uvd_dpm_config(struct radeon_device *rdev);
int trinity_dpm_force_state(struct radeon_device *rdev, u32 n); int trinity_dpm_force_state(struct radeon_device *rdev, u32 n);

View File

@ -56,6 +56,14 @@ static int trinity_notify_message_to_smu(struct radeon_device *rdev, u32 id)
return 0; return 0;
} }
int trinity_dpm_bapm_enable(struct radeon_device *rdev, bool enable)
{
if (enable)
return trinity_notify_message_to_smu(rdev, PPSMC_MSG_EnableBAPM);
else
return trinity_notify_message_to_smu(rdev, PPSMC_MSG_DisableBAPM);
}
int trinity_dpm_config(struct radeon_device *rdev, bool enable) int trinity_dpm_config(struct radeon_device *rdev, bool enable)
{ {
if (enable) if (enable)

View File

@ -218,7 +218,7 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
uint32_t key) uint32_t key)
{ {
struct ttm_object_device *tdev = tfile->tdev; struct ttm_object_device *tdev = tfile->tdev;
struct ttm_base_object *base; struct ttm_base_object *uninitialized_var(base);
struct drm_hash_item *hash; struct drm_hash_item *hash;
int ret; int ret;

View File

@ -170,7 +170,7 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
ttm_tt_unbind(ttm); ttm_tt_unbind(ttm);
} }
if (likely(ttm->pages != NULL)) { if (ttm->state == tt_unbound) {
ttm->bdev->driver->ttm_tt_unpopulate(ttm); ttm->bdev->driver->ttm_tt_unpopulate(ttm);
} }

View File

@ -97,7 +97,6 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page); ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
switch (ret) { switch (ret) {
case -EAGAIN: case -EAGAIN:
set_need_resched();
case 0: case 0:
case -ERESTARTSYS: case -ERESTARTSYS:
return VM_FAULT_NOPAGE; return VM_FAULT_NOPAGE;

View File

@ -12,11 +12,14 @@
{0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
{0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \