Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: drm: Do not force 1024x768 modes on unknown connectors drm/kms: Add a module parameter to disable polling drm/radeon/kms: fix tv-out on avivo asics drm/radeon/kms/evergreen: fix gpu hangs in userspace accel code drm/nv50: initialize ramht_refs list for faked 0 channel drm/nouveau: Don't take struct_mutex around the pushbuf IOCTL. drm/nouveau: Take fence spinlock before reading the last sequence. drm/radeon/kms/evergreen: work around bad data in some i2c tables drm/radeon/kms: properly set crtc high base on r7xx drm/radeon/kms: fix tv module parameter drm/radeon/kms: force legacy pll algo for RV515 LVDS drm/radeon/kms: remove useless clock code drm/radeon/kms: fix a regression on r7xx AGP due to the HDP flush fix drm/radeon/kms: use tracked values for sclk and mclk
This commit is contained in:
commit
6300d6d755
|
@ -34,6 +34,9 @@
|
|||
#include "drm_crtc_helper.h"
|
||||
#include "drm_fb_helper.h"
|
||||
|
||||
static bool drm_kms_helper_poll = true;
|
||||
module_param_named(poll, drm_kms_helper_poll, bool, 0600);
|
||||
|
||||
static void drm_mode_validate_flag(struct drm_connector *connector,
|
||||
int flags)
|
||||
{
|
||||
|
@ -99,8 +102,10 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
|
|||
connector->status = connector_status_disconnected;
|
||||
if (connector->funcs->force)
|
||||
connector->funcs->force(connector);
|
||||
} else
|
||||
} else {
|
||||
connector->status = connector->funcs->detect(connector);
|
||||
drm_helper_hpd_irq_event(dev);
|
||||
}
|
||||
|
||||
if (connector->status == connector_status_disconnected) {
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
|
||||
|
@ -110,11 +115,10 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
|
|||
}
|
||||
|
||||
count = (*connector_funcs->get_modes)(connector);
|
||||
if (!count) {
|
||||
if (count == 0 && connector->status == connector_status_connected)
|
||||
count = drm_add_modes_noedid(connector, 1024, 768);
|
||||
if (!count)
|
||||
return 0;
|
||||
}
|
||||
if (count == 0)
|
||||
goto prune;
|
||||
|
||||
drm_mode_connector_list_update(connector);
|
||||
|
||||
|
@ -840,6 +844,9 @@ static void output_poll_execute(struct work_struct *work)
|
|||
enum drm_connector_status old_status, status;
|
||||
bool repoll = false, changed = false;
|
||||
|
||||
if (!drm_kms_helper_poll)
|
||||
return;
|
||||
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
|
||||
|
@ -890,6 +897,9 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
|
|||
bool poll = false;
|
||||
struct drm_connector *connector;
|
||||
|
||||
if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
|
||||
return;
|
||||
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
if (connector->polled)
|
||||
poll = true;
|
||||
|
@ -919,8 +929,10 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
|
|||
{
|
||||
if (!dev->mode_config.poll_enabled)
|
||||
return;
|
||||
|
||||
/* kill timer and schedule immediate execution, this doesn't block */
|
||||
cancel_delayed_work(&dev->mode_config.output_poll_work);
|
||||
queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
|
||||
if (drm_kms_helper_poll)
|
||||
queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
|
||||
|
|
|
@ -64,16 +64,17 @@ nouveau_fence_update(struct nouveau_channel *chan)
|
|||
struct nouveau_fence *fence;
|
||||
uint32_t sequence;
|
||||
|
||||
spin_lock(&chan->fence.lock);
|
||||
|
||||
if (USE_REFCNT)
|
||||
sequence = nvchan_rd32(chan, 0x48);
|
||||
else
|
||||
sequence = atomic_read(&chan->fence.last_sequence_irq);
|
||||
|
||||
if (chan->fence.sequence_ack == sequence)
|
||||
return;
|
||||
goto out;
|
||||
chan->fence.sequence_ack = sequence;
|
||||
|
||||
spin_lock(&chan->fence.lock);
|
||||
list_for_each_safe(entry, tmp, &chan->fence.pending) {
|
||||
fence = list_entry(entry, struct nouveau_fence, entry);
|
||||
|
||||
|
@ -85,6 +86,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
|
|||
if (sequence == chan->fence.sequence_ack)
|
||||
break;
|
||||
}
|
||||
out:
|
||||
spin_unlock(&chan->fence.lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -245,7 +245,7 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
|
|||
list_del(&nvbo->entry);
|
||||
nvbo->reserved_by = NULL;
|
||||
ttm_bo_unreserve(&nvbo->bo);
|
||||
drm_gem_object_unreference(nvbo->gem);
|
||||
drm_gem_object_unreference_unlocked(nvbo->gem);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -300,7 +300,7 @@ retry:
|
|||
validate_fini(op, NULL);
|
||||
if (ret == -EAGAIN)
|
||||
ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
|
||||
drm_gem_object_unreference(gem);
|
||||
drm_gem_object_unreference_unlocked(gem);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "fail reserve\n");
|
||||
return ret;
|
||||
|
@ -616,8 +616,6 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
|
|||
return PTR_ERR(bo);
|
||||
}
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
/* Mark push buffers as being used on PFIFO, the validation code
|
||||
* will then make sure that if the pushbuf bo moves, that they
|
||||
* happen on the kernel channel, which will in turn cause a sync
|
||||
|
@ -731,7 +729,6 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
|
|||
out:
|
||||
validate_fini(&op, fence);
|
||||
nouveau_fence_unref((void**)&fence);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
kfree(bo);
|
||||
kfree(push);
|
||||
|
||||
|
|
|
@ -139,6 +139,8 @@ nv50_instmem_init(struct drm_device *dev)
|
|||
chan->file_priv = (struct drm_file *)-2;
|
||||
dev_priv->fifos[0] = dev_priv->fifos[127] = chan;
|
||||
|
||||
INIT_LIST_HEAD(&chan->ramht_refs);
|
||||
|
||||
/* Channel's PRAMIN object + heap */
|
||||
ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0,
|
||||
NULL, &chan->ramin);
|
||||
|
|
|
@ -332,6 +332,11 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
|
|||
args.usV_SyncWidth =
|
||||
cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
|
||||
|
||||
args.ucOverscanRight = radeon_crtc->h_border;
|
||||
args.ucOverscanLeft = radeon_crtc->h_border;
|
||||
args.ucOverscanBottom = radeon_crtc->v_border;
|
||||
args.ucOverscanTop = radeon_crtc->v_border;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
|
||||
misc |= ATOM_VSYNC_POLARITY;
|
||||
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
|
||||
|
@ -534,6 +539,20 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
|
|||
pll->algo = PLL_ALGO_LEGACY;
|
||||
pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
|
||||
}
|
||||
/* There is some evidence (often anecdotal) that RV515 LVDS
|
||||
* (on some boards at least) prefers the legacy algo. I'm not
|
||||
* sure whether this should handled generically or on a
|
||||
* case-by-case quirk basis. Both algos should work fine in the
|
||||
* majority of cases.
|
||||
*/
|
||||
if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) &&
|
||||
(rdev->family == CHIP_RV515)) {
|
||||
/* allow the user to overrride just in case */
|
||||
if (radeon_new_pll == 1)
|
||||
pll->algo = PLL_ALGO_NEW;
|
||||
else
|
||||
pll->algo = PLL_ALGO_LEGACY;
|
||||
}
|
||||
} else {
|
||||
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
|
||||
pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
|
||||
|
@ -1056,11 +1075,11 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
|
|||
|
||||
if (rdev->family >= CHIP_RV770) {
|
||||
if (radeon_crtc->crtc_id) {
|
||||
WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0);
|
||||
WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0);
|
||||
WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
|
||||
WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
|
||||
} else {
|
||||
WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0);
|
||||
WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0);
|
||||
WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
|
||||
WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
|
||||
}
|
||||
}
|
||||
WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
|
||||
|
@ -1197,8 +1216,18 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
|
|||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct drm_encoder *encoder;
|
||||
bool is_tvcv = false;
|
||||
|
||||
/* TODO color tiling */
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||
/* find tv std */
|
||||
if (encoder->crtc == crtc) {
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
if (radeon_encoder->active_device &
|
||||
(ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
|
||||
is_tvcv = true;
|
||||
}
|
||||
}
|
||||
|
||||
atombios_disable_ss(crtc);
|
||||
/* always set DCPLL */
|
||||
|
@ -1207,9 +1236,14 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
|
|||
atombios_crtc_set_pll(crtc, adjusted_mode);
|
||||
atombios_enable_ss(crtc);
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev))
|
||||
if (ASIC_IS_DCE4(rdev))
|
||||
atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
|
||||
else {
|
||||
else if (ASIC_IS_AVIVO(rdev)) {
|
||||
if (is_tvcv)
|
||||
atombios_crtc_set_timing(crtc, adjusted_mode);
|
||||
else
|
||||
atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
|
||||
} else {
|
||||
atombios_crtc_set_timing(crtc, adjusted_mode);
|
||||
if (radeon_crtc->crtc_id == 0)
|
||||
atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
|
||||
|
|
|
@ -675,6 +675,43 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int evergreen_cp_start(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
uint32_t cp_me;
|
||||
|
||||
r = radeon_ring_lock(rdev, 7);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
|
||||
radeon_ring_write(rdev, 0x1);
|
||||
radeon_ring_write(rdev, 0x0);
|
||||
radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
|
||||
radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
|
||||
radeon_ring_write(rdev, 0);
|
||||
radeon_ring_write(rdev, 0);
|
||||
radeon_ring_unlock_commit(rdev);
|
||||
|
||||
cp_me = 0xff;
|
||||
WREG32(CP_ME_CNTL, cp_me);
|
||||
|
||||
r = radeon_ring_lock(rdev, 4);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
/* init some VGT regs */
|
||||
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
|
||||
radeon_ring_write(rdev, (VGT_VERTEX_REUSE_BLOCK_CNTL - PACKET3_SET_CONTEXT_REG_START) >> 2);
|
||||
radeon_ring_write(rdev, 0xe);
|
||||
radeon_ring_write(rdev, 0x10);
|
||||
radeon_ring_unlock_commit(rdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int evergreen_cp_resume(struct radeon_device *rdev)
|
||||
{
|
||||
u32 tmp;
|
||||
|
@ -719,7 +756,7 @@ int evergreen_cp_resume(struct radeon_device *rdev)
|
|||
rdev->cp.rptr = RREG32(CP_RB_RPTR);
|
||||
rdev->cp.wptr = RREG32(CP_RB_WPTR);
|
||||
|
||||
r600_cp_start(rdev);
|
||||
evergreen_cp_start(rdev);
|
||||
rdev->cp.ready = true;
|
||||
r = radeon_ring_test(rdev);
|
||||
if (r) {
|
||||
|
@ -2054,11 +2091,6 @@ int evergreen_resume(struct radeon_device *rdev)
|
|||
*/
|
||||
/* post card */
|
||||
atom_asic_init(rdev->mode_info.atom_context);
|
||||
/* Initialize clocks */
|
||||
r = radeon_clocks_init(rdev);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
r = evergreen_startup(rdev);
|
||||
if (r) {
|
||||
|
@ -2164,9 +2196,6 @@ int evergreen_init(struct radeon_device *rdev)
|
|||
radeon_surface_init(rdev);
|
||||
/* Initialize clocks */
|
||||
radeon_get_clock_info(rdev->ddev);
|
||||
r = radeon_clocks_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
/* Fence driver */
|
||||
r = radeon_fence_driver_init(rdev);
|
||||
if (r)
|
||||
|
@ -2236,7 +2265,6 @@ void evergreen_fini(struct radeon_device *rdev)
|
|||
evergreen_pcie_gart_fini(rdev);
|
||||
radeon_gem_fini(rdev);
|
||||
radeon_fence_driver_fini(rdev);
|
||||
radeon_clocks_fini(rdev);
|
||||
radeon_agp_fini(rdev);
|
||||
radeon_bo_fini(rdev);
|
||||
radeon_atombios_fini(rdev);
|
||||
|
|
|
@ -2119,10 +2119,7 @@ int r600_cp_start(struct radeon_device *rdev)
|
|||
}
|
||||
radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
|
||||
radeon_ring_write(rdev, 0x1);
|
||||
if (rdev->family >= CHIP_CEDAR) {
|
||||
radeon_ring_write(rdev, 0x0);
|
||||
radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
|
||||
} else if (rdev->family >= CHIP_RV770) {
|
||||
if (rdev->family >= CHIP_RV770) {
|
||||
radeon_ring_write(rdev, 0x0);
|
||||
radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
|
||||
} else {
|
||||
|
@ -2489,11 +2486,6 @@ int r600_resume(struct radeon_device *rdev)
|
|||
*/
|
||||
/* post card */
|
||||
atom_asic_init(rdev->mode_info.atom_context);
|
||||
/* Initialize clocks */
|
||||
r = radeon_clocks_init(rdev);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
r = r600_startup(rdev);
|
||||
if (r) {
|
||||
|
@ -2586,9 +2578,6 @@ int r600_init(struct radeon_device *rdev)
|
|||
radeon_surface_init(rdev);
|
||||
/* Initialize clocks */
|
||||
radeon_get_clock_info(rdev->ddev);
|
||||
r = radeon_clocks_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
/* Fence driver */
|
||||
r = radeon_fence_driver_init(rdev);
|
||||
if (r)
|
||||
|
@ -2663,7 +2652,6 @@ void r600_fini(struct radeon_device *rdev)
|
|||
radeon_agp_fini(rdev);
|
||||
radeon_gem_fini(rdev);
|
||||
radeon_fence_driver_fini(rdev);
|
||||
radeon_clocks_fini(rdev);
|
||||
radeon_bo_fini(rdev);
|
||||
radeon_atombios_fini(rdev);
|
||||
kfree(rdev->bios);
|
||||
|
@ -3541,7 +3529,7 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
|
|||
* rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
|
||||
*/
|
||||
if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) {
|
||||
void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
|
||||
void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
|
||||
u32 tmp;
|
||||
|
||||
WREG32(HDP_DEBUG1, 0);
|
||||
|
|
|
@ -1013,6 +1013,11 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
|
|||
int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp);
|
||||
|
||||
/* VRAM scratch page for HDP bug */
|
||||
struct r700_vram_scratch {
|
||||
struct radeon_bo *robj;
|
||||
volatile uint32_t *ptr;
|
||||
};
|
||||
|
||||
/*
|
||||
* Core structure, functions and helpers.
|
||||
|
@ -1079,6 +1084,7 @@ struct radeon_device {
|
|||
const struct firmware *pfp_fw; /* r6/700 PFP firmware */
|
||||
const struct firmware *rlc_fw; /* r6/700 RLC firmware */
|
||||
struct r600_blit r600_blit;
|
||||
struct r700_vram_scratch vram_scratch;
|
||||
int msi_enabled; /* msi enabled */
|
||||
struct r600_ih ih; /* r6/700 interrupt ring */
|
||||
struct workqueue_struct *wq;
|
||||
|
@ -1333,8 +1339,6 @@ extern bool radeon_card_posted(struct radeon_device *rdev);
|
|||
extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
|
||||
extern void radeon_update_display_priority(struct radeon_device *rdev);
|
||||
extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
|
||||
extern int radeon_clocks_init(struct radeon_device *rdev);
|
||||
extern void radeon_clocks_fini(struct radeon_device *rdev);
|
||||
extern void radeon_scratch_init(struct radeon_device *rdev);
|
||||
extern void radeon_surface_init(struct radeon_device *rdev);
|
||||
extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
|
||||
|
|
|
@ -858,21 +858,3 @@ int radeon_asic_init(struct radeon_device *rdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wrapper around modesetting bits. Move to radeon_clocks.c?
|
||||
*/
|
||||
int radeon_clocks_init(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = radeon_static_clocks_init(rdev->ddev);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
DRM_INFO("Clocks initialized !\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_clocks_fini(struct radeon_device *rdev)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -85,6 +85,19 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
|
|||
for (i = 0; i < num_indices; i++) {
|
||||
gpio = &i2c_info->asGPIO_Info[i];
|
||||
|
||||
/* some evergreen boards have bad data for this entry */
|
||||
if (ASIC_IS_DCE4(rdev)) {
|
||||
if ((i == 7) &&
|
||||
(gpio->usClkMaskRegisterIndex == 0x1936) &&
|
||||
(gpio->sucI2cId.ucAccess == 0)) {
|
||||
gpio->sucI2cId.ucAccess = 0x97;
|
||||
gpio->ucDataMaskShift = 8;
|
||||
gpio->ucDataEnShift = 8;
|
||||
gpio->ucDataY_Shift = 8;
|
||||
gpio->ucDataA_Shift = 8;
|
||||
}
|
||||
}
|
||||
|
||||
if (gpio->sucI2cId.ucAccess == id) {
|
||||
i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
|
||||
i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
|
||||
|
@ -147,6 +160,20 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
|
|||
for (i = 0; i < num_indices; i++) {
|
||||
gpio = &i2c_info->asGPIO_Info[i];
|
||||
i2c.valid = false;
|
||||
|
||||
/* some evergreen boards have bad data for this entry */
|
||||
if (ASIC_IS_DCE4(rdev)) {
|
||||
if ((i == 7) &&
|
||||
(gpio->usClkMaskRegisterIndex == 0x1936) &&
|
||||
(gpio->sucI2cId.ucAccess == 0)) {
|
||||
gpio->sucI2cId.ucAccess = 0x97;
|
||||
gpio->ucDataMaskShift = 8;
|
||||
gpio->ucDataEnShift = 8;
|
||||
gpio->ucDataY_Shift = 8;
|
||||
gpio->ucDataA_Shift = 8;
|
||||
}
|
||||
}
|
||||
|
||||
i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
|
||||
i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
|
||||
i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
|
||||
|
|
|
@ -327,6 +327,14 @@ void radeon_get_clock_info(struct drm_device *dev)
|
|||
mpll->max_feedback_div = 0xff;
|
||||
mpll->best_vco = 0;
|
||||
|
||||
if (!rdev->clock.default_sclk)
|
||||
rdev->clock.default_sclk = radeon_get_engine_clock(rdev);
|
||||
if ((!rdev->clock.default_mclk) && rdev->asic->get_memory_clock)
|
||||
rdev->clock.default_mclk = radeon_get_memory_clock(rdev);
|
||||
|
||||
rdev->pm.current_sclk = rdev->clock.default_sclk;
|
||||
rdev->pm.current_mclk = rdev->clock.default_mclk;
|
||||
|
||||
}
|
||||
|
||||
/* 10 khz */
|
||||
|
@ -897,53 +905,3 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
|
|||
}
|
||||
}
|
||||
|
||||
static void radeon_apply_clock_quirks(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t tmp;
|
||||
|
||||
/* XXX make sure engine is idle */
|
||||
|
||||
if (rdev->family < CHIP_RS600) {
|
||||
tmp = RREG32_PLL(RADEON_SCLK_CNTL);
|
||||
if (ASIC_IS_R300(rdev) || ASIC_IS_RV100(rdev))
|
||||
tmp |= RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_VIP;
|
||||
if ((rdev->family == CHIP_RV250)
|
||||
|| (rdev->family == CHIP_RV280))
|
||||
tmp |=
|
||||
RADEON_SCLK_FORCE_DISP1 | RADEON_SCLK_FORCE_DISP2;
|
||||
if ((rdev->family == CHIP_RV350)
|
||||
|| (rdev->family == CHIP_RV380))
|
||||
tmp |= R300_SCLK_FORCE_VAP;
|
||||
if (rdev->family == CHIP_R420)
|
||||
tmp |= R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX;
|
||||
WREG32_PLL(RADEON_SCLK_CNTL, tmp);
|
||||
} else if (rdev->family < CHIP_R600) {
|
||||
tmp = RREG32_PLL(AVIVO_CP_DYN_CNTL);
|
||||
tmp |= AVIVO_CP_FORCEON;
|
||||
WREG32_PLL(AVIVO_CP_DYN_CNTL, tmp);
|
||||
|
||||
tmp = RREG32_PLL(AVIVO_E2_DYN_CNTL);
|
||||
tmp |= AVIVO_E2_FORCEON;
|
||||
WREG32_PLL(AVIVO_E2_DYN_CNTL, tmp);
|
||||
|
||||
tmp = RREG32_PLL(AVIVO_IDCT_DYN_CNTL);
|
||||
tmp |= AVIVO_IDCT_FORCEON;
|
||||
WREG32_PLL(AVIVO_IDCT_DYN_CNTL, tmp);
|
||||
}
|
||||
}
|
||||
|
||||
int radeon_static_clocks_init(struct drm_device *dev)
|
||||
{
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
|
||||
/* XXX make sure engine is idle */
|
||||
|
||||
if (radeon_dynclks != -1) {
|
||||
if (radeon_dynclks) {
|
||||
if (rdev->asic->set_clock_gating)
|
||||
radeon_set_clock_gating(rdev, 1);
|
||||
}
|
||||
}
|
||||
radeon_apply_clock_quirks(rdev);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1051,10 +1051,16 @@ radeon_add_atom_connector(struct drm_device *dev,
|
|||
uint32_t subpixel_order = SubPixelNone;
|
||||
bool shared_ddc = false;
|
||||
|
||||
/* fixme - tv/cv/din */
|
||||
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
|
||||
return;
|
||||
|
||||
/* if the user selected tv=0 don't try and add the connector */
|
||||
if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
|
||||
(connector_type == DRM_MODE_CONNECTOR_Composite) ||
|
||||
(connector_type == DRM_MODE_CONNECTOR_9PinDIN)) &&
|
||||
(radeon_tv == 0))
|
||||
return;
|
||||
|
||||
/* see if we already added it */
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
radeon_connector = to_radeon_connector(connector);
|
||||
|
@ -1209,19 +1215,17 @@ radeon_add_atom_connector(struct drm_device *dev,
|
|||
case DRM_MODE_CONNECTOR_SVIDEO:
|
||||
case DRM_MODE_CONNECTOR_Composite:
|
||||
case DRM_MODE_CONNECTOR_9PinDIN:
|
||||
if (radeon_tv == 1) {
|
||||
drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
|
||||
drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
|
||||
radeon_connector->dac_load_detect = true;
|
||||
drm_connector_attach_property(&radeon_connector->base,
|
||||
rdev->mode_info.load_detect_property,
|
||||
1);
|
||||
drm_connector_attach_property(&radeon_connector->base,
|
||||
rdev->mode_info.tv_std_property,
|
||||
radeon_atombios_get_tv_info(rdev));
|
||||
/* no HPD on analog connectors */
|
||||
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
|
||||
}
|
||||
drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
|
||||
drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
|
||||
radeon_connector->dac_load_detect = true;
|
||||
drm_connector_attach_property(&radeon_connector->base,
|
||||
rdev->mode_info.load_detect_property,
|
||||
1);
|
||||
drm_connector_attach_property(&radeon_connector->base,
|
||||
rdev->mode_info.tv_std_property,
|
||||
radeon_atombios_get_tv_info(rdev));
|
||||
/* no HPD on analog connectors */
|
||||
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_LVDS:
|
||||
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
|
||||
|
@ -1272,10 +1276,16 @@ radeon_add_legacy_connector(struct drm_device *dev,
|
|||
struct radeon_connector *radeon_connector;
|
||||
uint32_t subpixel_order = SubPixelNone;
|
||||
|
||||
/* fixme - tv/cv/din */
|
||||
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
|
||||
return;
|
||||
|
||||
/* if the user selected tv=0 don't try and add the connector */
|
||||
if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
|
||||
(connector_type == DRM_MODE_CONNECTOR_Composite) ||
|
||||
(connector_type == DRM_MODE_CONNECTOR_9PinDIN)) &&
|
||||
(radeon_tv == 0))
|
||||
return;
|
||||
|
||||
/* see if we already added it */
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
radeon_connector = to_radeon_connector(connector);
|
||||
|
@ -1347,26 +1357,24 @@ radeon_add_legacy_connector(struct drm_device *dev,
|
|||
case DRM_MODE_CONNECTOR_SVIDEO:
|
||||
case DRM_MODE_CONNECTOR_Composite:
|
||||
case DRM_MODE_CONNECTOR_9PinDIN:
|
||||
if (radeon_tv == 1) {
|
||||
drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
|
||||
drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
|
||||
radeon_connector->dac_load_detect = true;
|
||||
/* RS400,RC410,RS480 chipset seems to report a lot
|
||||
* of false positive on load detect, we haven't yet
|
||||
* found a way to make load detect reliable on those
|
||||
* chipset, thus just disable it for TV.
|
||||
*/
|
||||
if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
|
||||
radeon_connector->dac_load_detect = false;
|
||||
drm_connector_attach_property(&radeon_connector->base,
|
||||
rdev->mode_info.load_detect_property,
|
||||
radeon_connector->dac_load_detect);
|
||||
drm_connector_attach_property(&radeon_connector->base,
|
||||
rdev->mode_info.tv_std_property,
|
||||
radeon_combios_get_tv_info(rdev));
|
||||
/* no HPD on analog connectors */
|
||||
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
|
||||
}
|
||||
drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
|
||||
drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
|
||||
radeon_connector->dac_load_detect = true;
|
||||
/* RS400,RC410,RS480 chipset seems to report a lot
|
||||
* of false positive on load detect, we haven't yet
|
||||
* found a way to make load detect reliable on those
|
||||
* chipset, thus just disable it for TV.
|
||||
*/
|
||||
if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
|
||||
radeon_connector->dac_load_detect = false;
|
||||
drm_connector_attach_property(&radeon_connector->base,
|
||||
rdev->mode_info.load_detect_property,
|
||||
radeon_connector->dac_load_detect);
|
||||
drm_connector_attach_property(&radeon_connector->base,
|
||||
rdev->mode_info.tv_std_property,
|
||||
radeon_combios_get_tv_info(rdev));
|
||||
/* no HPD on analog connectors */
|
||||
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_LVDS:
|
||||
drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
|
||||
|
|
|
@ -293,30 +293,20 @@ bool radeon_card_posted(struct radeon_device *rdev)
|
|||
void radeon_update_bandwidth_info(struct radeon_device *rdev)
|
||||
{
|
||||
fixed20_12 a;
|
||||
u32 sclk, mclk;
|
||||
u32 sclk = rdev->pm.current_sclk;
|
||||
u32 mclk = rdev->pm.current_mclk;
|
||||
|
||||
/* sclk/mclk in Mhz */
|
||||
a.full = dfixed_const(100);
|
||||
rdev->pm.sclk.full = dfixed_const(sclk);
|
||||
rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
|
||||
rdev->pm.mclk.full = dfixed_const(mclk);
|
||||
rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
|
||||
|
||||
if (rdev->flags & RADEON_IS_IGP) {
|
||||
sclk = radeon_get_engine_clock(rdev);
|
||||
mclk = rdev->clock.default_mclk;
|
||||
|
||||
a.full = dfixed_const(100);
|
||||
rdev->pm.sclk.full = dfixed_const(sclk);
|
||||
rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
|
||||
rdev->pm.mclk.full = dfixed_const(mclk);
|
||||
rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
|
||||
|
||||
a.full = dfixed_const(16);
|
||||
/* core_bandwidth = sclk(Mhz) * 16 */
|
||||
rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
|
||||
} else {
|
||||
sclk = radeon_get_engine_clock(rdev);
|
||||
mclk = radeon_get_memory_clock(rdev);
|
||||
|
||||
a.full = dfixed_const(100);
|
||||
rdev->pm.sclk.full = dfixed_const(sclk);
|
||||
rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
|
||||
rdev->pm.mclk.full = dfixed_const(mclk);
|
||||
rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -213,7 +213,7 @@ static void post_xfer(struct i2c_adapter *i2c_adap)
|
|||
|
||||
static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
|
||||
{
|
||||
u32 sclk = radeon_get_engine_clock(rdev);
|
||||
u32 sclk = rdev->pm.current_sclk;
|
||||
u32 prescale = 0;
|
||||
u32 nm;
|
||||
u8 n, m, loop;
|
||||
|
|
|
@ -600,7 +600,6 @@ extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct d
|
|||
void radeon_enc_destroy(struct drm_encoder *encoder);
|
||||
void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
|
||||
void radeon_combios_asic_init(struct drm_device *dev);
|
||||
extern int radeon_static_clocks_init(struct drm_device *dev);
|
||||
bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode);
|
||||
|
|
|
@ -905,6 +905,54 @@ static void rv770_gpu_init(struct radeon_device *rdev)
|
|||
|
||||
}
|
||||
|
||||
static int rv770_vram_scratch_init(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
u64 gpu_addr;
|
||||
|
||||
if (rdev->vram_scratch.robj == NULL) {
|
||||
r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE,
|
||||
true, RADEON_GEM_DOMAIN_VRAM,
|
||||
&rdev->vram_scratch.robj);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
r = radeon_bo_pin(rdev->vram_scratch.robj,
|
||||
RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
|
||||
if (r) {
|
||||
radeon_bo_unreserve(rdev->vram_scratch.robj);
|
||||
return r;
|
||||
}
|
||||
r = radeon_bo_kmap(rdev->vram_scratch.robj,
|
||||
(void **)&rdev->vram_scratch.ptr);
|
||||
if (r)
|
||||
radeon_bo_unpin(rdev->vram_scratch.robj);
|
||||
radeon_bo_unreserve(rdev->vram_scratch.robj);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static void rv770_vram_scratch_fini(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (rdev->vram_scratch.robj == NULL) {
|
||||
return;
|
||||
}
|
||||
r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
|
||||
if (likely(r == 0)) {
|
||||
radeon_bo_kunmap(rdev->vram_scratch.robj);
|
||||
radeon_bo_unpin(rdev->vram_scratch.robj);
|
||||
radeon_bo_unreserve(rdev->vram_scratch.robj);
|
||||
}
|
||||
radeon_bo_unref(&rdev->vram_scratch.robj);
|
||||
}
|
||||
|
||||
int rv770_mc_init(struct radeon_device *rdev)
|
||||
{
|
||||
u32 tmp;
|
||||
|
@ -970,6 +1018,9 @@ static int rv770_startup(struct radeon_device *rdev)
|
|||
if (r)
|
||||
return r;
|
||||
}
|
||||
r = rv770_vram_scratch_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
rv770_gpu_init(rdev);
|
||||
r = r600_blit_init(rdev);
|
||||
if (r) {
|
||||
|
@ -1023,11 +1074,6 @@ int rv770_resume(struct radeon_device *rdev)
|
|||
*/
|
||||
/* post card */
|
||||
atom_asic_init(rdev->mode_info.atom_context);
|
||||
/* Initialize clocks */
|
||||
r = radeon_clocks_init(rdev);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
r = rv770_startup(rdev);
|
||||
if (r) {
|
||||
|
@ -1118,9 +1164,6 @@ int rv770_init(struct radeon_device *rdev)
|
|||
radeon_surface_init(rdev);
|
||||
/* Initialize clocks */
|
||||
radeon_get_clock_info(rdev->ddev);
|
||||
r = radeon_clocks_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
/* Fence driver */
|
||||
r = radeon_fence_driver_init(rdev);
|
||||
if (r)
|
||||
|
@ -1195,9 +1238,9 @@ void rv770_fini(struct radeon_device *rdev)
|
|||
r600_irq_fini(rdev);
|
||||
radeon_irq_kms_fini(rdev);
|
||||
rv770_pcie_gart_fini(rdev);
|
||||
rv770_vram_scratch_fini(rdev);
|
||||
radeon_gem_fini(rdev);
|
||||
radeon_fence_driver_fini(rdev);
|
||||
radeon_clocks_fini(rdev);
|
||||
radeon_agp_fini(rdev);
|
||||
radeon_bo_fini(rdev);
|
||||
radeon_atombios_fini(rdev);
|
||||
|
|
Loading…
Reference in New Issue