amd, i915, qxl, nouveau, sun4i, atmel, and bridge fixes.

-----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJbLE+9AAoJEAx081l5xIa+/CwP/3d7GUvlxpN/pBjysbhEc8iK
 xt+SrwX0HYq5RRSzecuru7iV1ZK8DRVtiLlRoUfeszKtznQ4bQlZAxGmlV6gCvML
 jkpjbXwsYOF6ot/va9KMu9CRoqHJSMuAMd9aDAu67aTt4Z+j77mjlT06w28JV0Rl
 seu0EgkKd5qVQl3K4b6cMz0NJZLCPlJFguN07urlCGxBVHd52CHDXVaaeMBs53o+
 p5F1/2zp4WIIzpN17zNC/EyOZwERDGfDEitayMFNGXnKyuAwrCq9T9RbQguCk5Ab
 0c8AIRFB6allcdYVpF6w08OYbo6CZGvaUCKryt4otcP1IhgjwghLiGiTCQnYVz+H
 OEmVLTwz5CudkAiFpkUidig7oBmUoTxslBUZxUw3JB6xH15aClWojSnn/1UvyjvW
 q15tA6EZGADpp4RS4/cglzuty/agEex8xy2Rllo9uZti4jsh/U6F0sSTuoLhHQxq
 r9UDatSIQ2TvWhEuUsX6IAfxxrXwuG2QyHUnP2GZxYV/drOYFfMu5BvJC/uKT6Ru
 RLo/T1Iz213Q6aD5k/dyLq4SFGO5l2FYhhA04r2ovrK/yYCgTXXqsANWQKw1o1ac
 fCGT495SNuxJKIWqqJQq06z8NTqPWf9zoZ+XaNYRDWMEMCO5orqx5s9V6/Lp84V7
 Ja8LhNdztr4fwrxzg7LN
 =i9I6
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-2018-06-22' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Just run of the mill fixes,

  core:
   - regression fix in device unplug

  qxl:
   - regression fix for might sleep in cursor handling

  nouveau:
   - regression fix in multi-screen cursor handling

  amdgpu:
   - switch off DC by default on Kaveri and older
   - some minor fixes

  i915:
   - some GEM regression fixes
   - doublescan mode fixes

  sun4i:
   - revert fix for a regression

  sii8620 bridge:
   - misc fixes"

* tag 'drm-fixes-2018-06-22' of git://anongit.freedesktop.org/drm/drm: (28 commits)
  drm/bridge/sii8620: fix display of packed pixel modes in MHL2
  drm/amdgpu: Make amdgpu_vram_mgr_bo_invisible_size always accurate
  drm/amdgpu: Refactor amdgpu_vram_mgr_bo_invisible_size helper
  drm/amdgpu: Update pin_size values before unpinning BO
  drm/amdgpu:All UVD instances share one idle_work handle
  drm/amdgpu: Don't default to DC support for Kaveri and older
  drm/amdgpu: Use kvmalloc_array for allocating VRAM manager nodes array
  drm/amd/pp: Fix uninitialized variable
  drm/i915: Enable provoking vertex fix on Gen9 systems.
  drm/i915: Fix context ban and hang accounting for client
  drm/i915: Turn off g4x DP port in .post_disable()
  drm/i915: Disallow interlaced modes on g4x DP outputs
  drm/i915: Fix PIPESTAT irq ack on i965/g4x
  drm/i915: Allow DBLSCAN user modes with eDP/LVDS/DSI
  drm/i915/execlists: Avoid putting the error pointer
  drm/i915: Apply batch location restrictions before pinning
  drm/nouveau/kms/nv50-: cursors always use core channel vram ctxdma
  Revert "drm/sun4i: Handle DRM_BUS_FLAG_PIXDATA_*EDGE"
  drm/atmel-hlcdc: check stride values in the first plane
  drm/bridge/sii8620: fix HDMI cable connection to dongle
  ...
This commit is contained in:
Linus Torvalds 2018-06-22 12:32:09 +09:00
commit 1cfea546b1
31 changed files with 407 additions and 336 deletions

View File

@ -2158,10 +2158,18 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
switch (asic_type) {
#if defined(CONFIG_DRM_AMD_DC)
case CHIP_BONAIRE:
case CHIP_HAWAII:
case CHIP_KAVERI:
case CHIP_KABINI:
case CHIP_MULLINS:
/*
* We have systems in the wild with these ASICs that require
* LVDS and VGA support which is not supported with DC.
*
* Fallback to the non-DC driver here by default so as not to
* cause regressions.
*/
return amdgpu_dc > 0;
case CHIP_HAWAII:
case CHIP_CARRIZO:
case CHIP_STONEY:
case CHIP_POLARIS10:

View File

@ -762,8 +762,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
adev->vram_pin_size += amdgpu_bo_size(bo);
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
adev->invisible_pin_size += amdgpu_bo_size(bo);
adev->invisible_pin_size += amdgpu_vram_mgr_bo_invisible_size(bo);
} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
adev->gart_pin_size += amdgpu_bo_size(bo);
}
@ -790,25 +789,22 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
bo->pin_count--;
if (bo->pin_count)
return 0;
if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
adev->vram_pin_size -= amdgpu_bo_size(bo);
adev->invisible_pin_size -= amdgpu_vram_mgr_bo_invisible_size(bo);
} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
adev->gart_pin_size -= amdgpu_bo_size(bo);
}
for (i = 0; i < bo->placement.num_placement; i++) {
bo->placements[i].lpfn = 0;
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
}
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (unlikely(r)) {
if (unlikely(r))
dev_err(adev->dev, "%p validate failed for unpin\n", bo);
goto error;
}
if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
adev->vram_pin_size -= amdgpu_bo_size(bo);
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
adev->invisible_pin_size -= amdgpu_bo_size(bo);
} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
adev->gart_pin_size -= amdgpu_bo_size(bo);
}
error:
return r;
}

View File

@ -73,6 +73,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem);
uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo);
uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);

View File

@ -130,7 +130,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
unsigned version_major, version_minor, family_id;
int i, j, r;
INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler);
INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
switch (adev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_CIK
@ -314,12 +314,12 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
void *ptr;
int i, j;
cancel_delayed_work_sync(&adev->uvd.idle_work);
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
if (adev->uvd.inst[j].vcpu_bo == NULL)
continue;
cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work);
/* only valid for physical mode */
if (adev->asic_type < CHIP_POLARIS10) {
for (i = 0; i < adev->uvd.max_handles; ++i)
@ -1145,7 +1145,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, uvd.inst->idle_work.work);
container_of(work, struct amdgpu_device, uvd.idle_work.work);
unsigned fences = 0, i, j;
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
@ -1167,7 +1167,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
AMD_CG_STATE_GATE);
}
} else {
schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
}
}
@ -1179,7 +1179,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
if (amdgpu_sriov_vf(adev))
return;
set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work);
set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
if (set_clocks) {
if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_uvd(adev, true);
@ -1196,7 +1196,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
{
if (!amdgpu_sriov_vf(ring->adev))
schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
}
/**

View File

@ -44,7 +44,6 @@ struct amdgpu_uvd_inst {
void *saved_bo;
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
struct delayed_work idle_work;
struct amdgpu_ring ring;
struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
struct amdgpu_irq_src irq;
@ -62,6 +61,7 @@ struct amdgpu_uvd {
bool address_64_bit;
bool use_ctx_buf;
struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES];
struct delayed_work idle_work;
};
int amdgpu_uvd_sw_init(struct amdgpu_device *adev);

View File

@ -96,6 +96,38 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
adev->gmc.visible_vram_size : end) - start;
}
/**
* amdgpu_vram_mgr_bo_invisible_size - CPU invisible BO size
*
* @bo: &amdgpu_bo buffer object (must be in VRAM)
*
* Returns:
* How much of the given &amdgpu_bo buffer object lies in CPU invisible VRAM.
*/
u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_mem_reg *mem = &bo->tbo.mem;
struct drm_mm_node *nodes = mem->mm_node;
unsigned pages = mem->num_pages;
u64 usage = 0;
if (adev->gmc.visible_vram_size == adev->gmc.real_vram_size)
return 0;
if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
return amdgpu_bo_size(bo);
while (nodes && pages) {
usage += nodes->size << PAGE_SHIFT;
usage -= amdgpu_vram_mgr_vis_size(adev, nodes);
pages -= nodes->size;
++nodes;
}
return usage;
}
/**
* amdgpu_vram_mgr_new - allocate new ranges
*
@ -135,7 +167,8 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
}
nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL);
nodes = kvmalloc_array(num_nodes, sizeof(*nodes),
GFP_KERNEL | __GFP_ZERO);
if (!nodes)
return -ENOMEM;
@ -190,7 +223,7 @@ error:
drm_mm_remove_node(&nodes[i]);
spin_unlock(&mgr->lock);
kfree(nodes);
kvfree(nodes);
return r == -ENOSPC ? 0 : r;
}
@ -229,7 +262,7 @@ static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
atomic64_sub(usage, &mgr->usage);
atomic64_sub(vis_usage, &mgr->vis_usage);
kfree(mem->mm_node);
kvfree(mem->mm_node);
mem->mm_node = NULL;
}

View File

@ -1090,7 +1090,7 @@ static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr)
static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
int result;
int result = 0;
uint32_t num_se = 0;
uint32_t count, data;

View File

@ -839,7 +839,7 @@ static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane)
return ret;
}
if (desc->layout.xstride && desc->layout.pstride) {
if (desc->layout.xstride[0] && desc->layout.pstride[0]) {
int ret;
ret = drm_plane_create_rotation_property(&plane->base,

View File

@ -36,8 +36,11 @@
#define SII8620_BURST_BUF_LEN 288
#define VAL_RX_HDMI_CTRL2_DEFVAL VAL_RX_HDMI_CTRL2_IDLE_CNT(3)
#define MHL1_MAX_LCLK 225000
#define MHL3_MAX_LCLK 600000
#define MHL1_MAX_PCLK 75000
#define MHL1_MAX_PCLK_PP_MODE 150000
#define MHL3_MAX_PCLK 200000
#define MHL3_MAX_PCLK_PP_MODE 300000
enum sii8620_mode {
CM_DISCONNECTED,
@ -80,6 +83,9 @@ struct sii8620 {
u8 devcap[MHL_DCAP_SIZE];
u8 xdevcap[MHL_XDC_SIZE];
u8 avif[HDMI_INFOFRAME_SIZE(AVI)];
bool feature_complete;
bool devcap_read;
bool sink_detected;
struct edid *edid;
unsigned int gen2_write_burst:1;
enum sii8620_mt_state mt_state;
@ -476,7 +482,7 @@ static void sii8620_update_array(u8 *dst, u8 *src, int count)
}
}
static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
static void sii8620_identify_sink(struct sii8620 *ctx)
{
static const char * const sink_str[] = {
[SINK_NONE] = "NONE",
@ -487,7 +493,7 @@ static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
char sink_name[20];
struct device *dev = ctx->dev;
if (ret < 0)
if (!ctx->sink_detected || !ctx->devcap_read)
return;
sii8620_fetch_edid(ctx);
@ -496,6 +502,7 @@ static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
sii8620_mhl_disconnected(ctx);
return;
}
sii8620_set_upstream_edid(ctx);
if (drm_detect_hdmi_monitor(ctx->edid))
ctx->sink_type = SINK_HDMI;
@ -508,53 +515,6 @@ static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
sink_str[ctx->sink_type], sink_name);
}
static void sii8620_hsic_init(struct sii8620 *ctx)
{
if (!sii8620_is_mhl3(ctx))
return;
sii8620_write(ctx, REG_FCGC,
BIT_FCGC_HSIC_HOSTMODE | BIT_FCGC_HSIC_ENABLE);
sii8620_setbits(ctx, REG_HRXCTRL3,
BIT_HRXCTRL3_HRX_STAY_RESET | BIT_HRXCTRL3_STATUS_EN, ~0);
sii8620_setbits(ctx, REG_TTXNUMB, MSK_TTXNUMB_TTX_NUMBPS, 4);
sii8620_setbits(ctx, REG_TRXCTRL, BIT_TRXCTRL_TRX_FROM_SE_COC, ~0);
sii8620_setbits(ctx, REG_HTXCTRL, BIT_HTXCTRL_HTX_DRVCONN1, 0);
sii8620_setbits(ctx, REG_KEEPER, MSK_KEEPER_MODE, VAL_KEEPER_MODE_HOST);
sii8620_write_seq_static(ctx,
REG_TDMLLCTL, 0,
REG_UTSRST, BIT_UTSRST_HRX_SRST | BIT_UTSRST_HTX_SRST |
BIT_UTSRST_KEEPER_SRST | BIT_UTSRST_FC_SRST,
REG_UTSRST, BIT_UTSRST_HRX_SRST | BIT_UTSRST_HTX_SRST,
REG_HRXINTL, 0xff,
REG_HRXINTH, 0xff,
REG_TTXINTL, 0xff,
REG_TTXINTH, 0xff,
REG_TRXINTL, 0xff,
REG_TRXINTH, 0xff,
REG_HTXINTL, 0xff,
REG_HTXINTH, 0xff,
REG_FCINTR0, 0xff,
REG_FCINTR1, 0xff,
REG_FCINTR2, 0xff,
REG_FCINTR3, 0xff,
REG_FCINTR4, 0xff,
REG_FCINTR5, 0xff,
REG_FCINTR6, 0xff,
REG_FCINTR7, 0xff
);
}
static void sii8620_edid_read(struct sii8620 *ctx, int ret)
{
if (ret < 0)
return;
sii8620_set_upstream_edid(ctx);
sii8620_hsic_init(ctx);
sii8620_enable_hpd(ctx);
}
static void sii8620_mr_devcap(struct sii8620 *ctx)
{
u8 dcap[MHL_DCAP_SIZE];
@ -570,6 +530,8 @@ static void sii8620_mr_devcap(struct sii8620 *ctx)
dcap[MHL_DCAP_ADOPTER_ID_H], dcap[MHL_DCAP_ADOPTER_ID_L],
dcap[MHL_DCAP_DEVICE_ID_H], dcap[MHL_DCAP_DEVICE_ID_L]);
sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE);
ctx->devcap_read = true;
sii8620_identify_sink(ctx);
}
static void sii8620_mr_xdevcap(struct sii8620 *ctx)
@ -807,6 +769,7 @@ static void sii8620_burst_rx_all(struct sii8620 *ctx)
static void sii8620_fetch_edid(struct sii8620 *ctx)
{
u8 lm_ddc, ddc_cmd, int3, cbus;
unsigned long timeout;
int fetched, i;
int edid_len = EDID_LENGTH;
u8 *edid;
@ -856,23 +819,31 @@ static void sii8620_fetch_edid(struct sii8620 *ctx)
REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_ENH_DDC_READ_NO_ACK
);
do {
int3 = sii8620_readb(ctx, REG_INTR3);
int3 = 0;
timeout = jiffies + msecs_to_jiffies(200);
for (;;) {
cbus = sii8620_readb(ctx, REG_CBUS_STATUS);
if (int3 & BIT_DDC_CMD_DONE)
break;
if (!(cbus & BIT_CBUS_STATUS_CBUS_CONNECTED)) {
if (~cbus & BIT_CBUS_STATUS_CBUS_CONNECTED) {
kfree(edid);
edid = NULL;
goto end;
}
if (int3 & BIT_DDC_CMD_DONE) {
if (sii8620_readb(ctx, REG_DDC_DOUT_CNT)
>= FETCH_SIZE)
break;
} else {
int3 = sii8620_readb(ctx, REG_INTR3);
}
if (time_is_before_jiffies(timeout)) {
ctx->error = -ETIMEDOUT;
dev_err(ctx->dev, "timeout during EDID read\n");
kfree(edid);
edid = NULL;
goto end;
}
} while (1);
sii8620_readb(ctx, REG_DDC_STATUS);
while (sii8620_readb(ctx, REG_DDC_DOUT_CNT) < FETCH_SIZE)
usleep_range(10, 20);
}
sii8620_read_buf(ctx, REG_DDC_DATA, edid + fetched, FETCH_SIZE);
if (fetched + FETCH_SIZE == EDID_LENGTH) {
@ -971,8 +942,17 @@ static int sii8620_hw_on(struct sii8620 *ctx)
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
if (ret)
return ret;
usleep_range(10000, 20000);
return clk_prepare_enable(ctx->clk_xtal);
ret = clk_prepare_enable(ctx->clk_xtal);
if (ret)
return ret;
msleep(100);
gpiod_set_value(ctx->gpio_reset, 0);
msleep(100);
return 0;
}
static int sii8620_hw_off(struct sii8620 *ctx)
@ -982,17 +962,6 @@ static int sii8620_hw_off(struct sii8620 *ctx)
return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
}
static void sii8620_hw_reset(struct sii8620 *ctx)
{
usleep_range(10000, 20000);
gpiod_set_value(ctx->gpio_reset, 0);
usleep_range(5000, 20000);
gpiod_set_value(ctx->gpio_reset, 1);
usleep_range(10000, 20000);
gpiod_set_value(ctx->gpio_reset, 0);
msleep(300);
}
static void sii8620_cbus_reset(struct sii8620 *ctx)
{
sii8620_write(ctx, REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST
@ -1048,20 +1017,11 @@ static void sii8620_stop_video(struct sii8620 *ctx)
static void sii8620_set_format(struct sii8620 *ctx)
{
u8 out_fmt;
if (sii8620_is_mhl3(ctx)) {
sii8620_setbits(ctx, REG_M3_P0CTRL,
BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED,
ctx->use_packed_pixel ? ~0 : 0);
} else {
if (ctx->use_packed_pixel)
sii8620_write_seq_static(ctx,
REG_VID_MODE, BIT_VID_MODE_M1080P,
REG_MHL_TOP_CTL, BIT_MHL_TOP_CTL_MHL_PP_SEL | 1,
REG_MHLTX_CTL6, 0x60
);
else
sii8620_write_seq_static(ctx,
REG_VID_MODE, 0,
REG_MHL_TOP_CTL, 1,
@ -1069,15 +1029,9 @@ static void sii8620_set_format(struct sii8620 *ctx)
);
}
if (ctx->use_packed_pixel)
out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL) |
BIT_TPI_OUTPUT_CSCMODE709;
else
out_fmt = VAL_TPI_FORMAT(RGB, FULL);
sii8620_write_seq(ctx,
REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL),
REG_TPI_OUTPUT, out_fmt,
REG_TPI_OUTPUT, VAL_TPI_FORMAT(RGB, FULL),
);
}
@ -1216,7 +1170,7 @@ static void sii8620_start_video(struct sii8620 *ctx)
int clk = ctx->pixel_clock * (ctx->use_packed_pixel ? 2 : 3);
int i;
for (i = 0; i < ARRAY_SIZE(clk_spec); ++i)
for (i = 0; i < ARRAY_SIZE(clk_spec) - 1; ++i)
if (clk < clk_spec[i].max_clk)
break;
@ -1534,6 +1488,16 @@ static void sii8620_set_mode(struct sii8620 *ctx, enum sii8620_mode mode)
);
}
static void sii8620_hpd_unplugged(struct sii8620 *ctx)
{
sii8620_disable_hpd(ctx);
ctx->sink_type = SINK_NONE;
ctx->sink_detected = false;
ctx->feature_complete = false;
kfree(ctx->edid);
ctx->edid = NULL;
}
static void sii8620_disconnect(struct sii8620 *ctx)
{
sii8620_disable_gen2_write_burst(ctx);
@ -1561,7 +1525,7 @@ static void sii8620_disconnect(struct sii8620 *ctx)
REG_MHL_DP_CTL6, 0x2A,
REG_MHL_DP_CTL7, 0x03
);
sii8620_disable_hpd(ctx);
sii8620_hpd_unplugged(ctx);
sii8620_write_seq_static(ctx,
REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE,
REG_MHL_COC_CTL1, 0x07,
@ -1609,10 +1573,8 @@ static void sii8620_disconnect(struct sii8620 *ctx)
memset(ctx->xstat, 0, sizeof(ctx->xstat));
memset(ctx->devcap, 0, sizeof(ctx->devcap));
memset(ctx->xdevcap, 0, sizeof(ctx->xdevcap));
ctx->devcap_read = false;
ctx->cbus_status = 0;
ctx->sink_type = SINK_NONE;
kfree(ctx->edid);
ctx->edid = NULL;
sii8620_mt_cleanup(ctx);
}
@ -1703,9 +1665,6 @@ static void sii8620_status_changed_path(struct sii8620 *ctx)
sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
MHL_DST_LM_CLK_MODE_NORMAL
| MHL_DST_LM_PATH_ENABLED);
if (!sii8620_is_mhl3(ctx))
sii8620_mt_read_devcap(ctx, false);
sii8620_mt_set_cont(ctx, sii8620_sink_detected);
} else {
sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
MHL_DST_LM_CLK_MODE_NORMAL);
@ -1722,9 +1681,14 @@ static void sii8620_msc_mr_write_stat(struct sii8620 *ctx)
sii8620_update_array(ctx->stat, st, MHL_DST_SIZE);
sii8620_update_array(ctx->xstat, xst, MHL_XDS_SIZE);
if (ctx->stat[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY)
if (ctx->stat[MHL_DST_CONNECTED_RDY] & st[MHL_DST_CONNECTED_RDY] &
MHL_DST_CONN_DCAP_RDY) {
sii8620_status_dcap_ready(ctx);
if (!sii8620_is_mhl3(ctx))
sii8620_mt_read_devcap(ctx, false);
}
if (st[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED)
sii8620_status_changed_path(ctx);
}
@ -1808,8 +1772,11 @@ static void sii8620_msc_mr_set_int(struct sii8620 *ctx)
}
if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_REQ)
sii8620_send_features(ctx);
if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_COMPLETE)
sii8620_edid_read(ctx, 0);
if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_COMPLETE) {
ctx->feature_complete = true;
if (ctx->edid)
sii8620_enable_hpd(ctx);
}
}
static struct sii8620_mt_msg *sii8620_msc_msg_first(struct sii8620 *ctx)
@ -1884,6 +1851,15 @@ static void sii8620_irq_msc(struct sii8620 *ctx)
if (stat & BIT_CBUS_MSC_MR_WRITE_STAT)
sii8620_msc_mr_write_stat(ctx);
if (stat & BIT_CBUS_HPD_CHG) {
if (ctx->cbus_status & BIT_CBUS_STATUS_CBUS_HPD) {
ctx->sink_detected = true;
sii8620_identify_sink(ctx);
} else {
sii8620_hpd_unplugged(ctx);
}
}
if (stat & BIT_CBUS_MSC_MR_SET_INT)
sii8620_msc_mr_set_int(ctx);
@ -1931,14 +1907,6 @@ static void sii8620_irq_edid(struct sii8620 *ctx)
ctx->mt_state = MT_STATE_DONE;
}
static void sii8620_scdt_high(struct sii8620 *ctx)
{
sii8620_write_seq_static(ctx,
REG_INTR8_MASK, BIT_CEA_NEW_AVI | BIT_CEA_NEW_VSI,
REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI,
);
}
static void sii8620_irq_scdt(struct sii8620 *ctx)
{
u8 stat = sii8620_readb(ctx, REG_INTR5);
@ -1946,53 +1914,13 @@ static void sii8620_irq_scdt(struct sii8620 *ctx)
if (stat & BIT_INTR_SCDT_CHANGE) {
u8 cstat = sii8620_readb(ctx, REG_TMDS_CSTAT_P3);
if (cstat & BIT_TMDS_CSTAT_P3_SCDT) {
if (ctx->sink_type == SINK_HDMI)
/* enable infoframe interrupt */
sii8620_scdt_high(ctx);
else
sii8620_start_video(ctx);
}
if (cstat & BIT_TMDS_CSTAT_P3_SCDT)
sii8620_start_video(ctx);
}
sii8620_write(ctx, REG_INTR5, stat);
}
static void sii8620_new_vsi(struct sii8620 *ctx)
{
u8 vsif[11];
sii8620_write(ctx, REG_RX_HDMI_CTRL2,
VAL_RX_HDMI_CTRL2_DEFVAL |
BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI);
sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, vsif,
ARRAY_SIZE(vsif));
}
static void sii8620_new_avi(struct sii8620 *ctx)
{
sii8620_write(ctx, REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL);
sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, ctx->avif,
ARRAY_SIZE(ctx->avif));
}
static void sii8620_irq_infr(struct sii8620 *ctx)
{
u8 stat = sii8620_readb(ctx, REG_INTR8)
& (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI);
sii8620_write(ctx, REG_INTR8, stat);
if (stat & BIT_CEA_NEW_VSI)
sii8620_new_vsi(ctx);
if (stat & BIT_CEA_NEW_AVI)
sii8620_new_avi(ctx);
if (stat & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI))
sii8620_start_video(ctx);
}
static void sii8620_got_xdevcap(struct sii8620 *ctx, int ret)
{
if (ret < 0)
@ -2043,11 +1971,11 @@ static void sii8620_irq_ddc(struct sii8620 *ctx)
if (stat & BIT_DDC_CMD_DONE) {
sii8620_write(ctx, REG_INTR3_MASK, 0);
if (sii8620_is_mhl3(ctx))
if (sii8620_is_mhl3(ctx) && !ctx->feature_complete)
sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE),
MHL_INT_RC_FEAT_REQ);
else
sii8620_edid_read(ctx, 0);
sii8620_enable_hpd(ctx);
}
sii8620_write(ctx, REG_INTR3, stat);
}
@ -2074,7 +2002,6 @@ static irqreturn_t sii8620_irq_thread(int irq, void *data)
{ BIT_FAST_INTR_STAT_EDID, sii8620_irq_edid },
{ BIT_FAST_INTR_STAT_DDC, sii8620_irq_ddc },
{ BIT_FAST_INTR_STAT_SCDT, sii8620_irq_scdt },
{ BIT_FAST_INTR_STAT_INFR, sii8620_irq_infr },
};
struct sii8620 *ctx = data;
u8 stats[LEN_FAST_INTR_STAT];
@ -2112,7 +2039,6 @@ static void sii8620_cable_in(struct sii8620 *ctx)
dev_err(dev, "Error powering on, %d.\n", ret);
return;
}
sii8620_hw_reset(ctx);
sii8620_read_buf(ctx, REG_VND_IDL, ver, ARRAY_SIZE(ver));
ret = sii8620_clear_error(ctx);
@ -2268,17 +2194,43 @@ static void sii8620_detach(struct drm_bridge *bridge)
rc_unregister_device(ctx->rc_dev);
}
static int sii8620_is_packing_required(struct sii8620 *ctx,
const struct drm_display_mode *mode)
{
int max_pclk, max_pclk_pp_mode;
if (sii8620_is_mhl3(ctx)) {
max_pclk = MHL3_MAX_PCLK;
max_pclk_pp_mode = MHL3_MAX_PCLK_PP_MODE;
} else {
max_pclk = MHL1_MAX_PCLK;
max_pclk_pp_mode = MHL1_MAX_PCLK_PP_MODE;
}
if (mode->clock < max_pclk)
return 0;
else if (mode->clock < max_pclk_pp_mode)
return 1;
else
return -1;
}
static enum drm_mode_status sii8620_mode_valid(struct drm_bridge *bridge,
const struct drm_display_mode *mode)
{
struct sii8620 *ctx = bridge_to_sii8620(bridge);
int pack_required = sii8620_is_packing_required(ctx, mode);
bool can_pack = ctx->devcap[MHL_DCAP_VID_LINK_MODE] &
MHL_DCAP_VID_LINK_PPIXEL;
unsigned int max_pclk = sii8620_is_mhl3(ctx) ? MHL3_MAX_LCLK :
MHL1_MAX_LCLK;
max_pclk /= can_pack ? 2 : 3;
return (mode->clock > max_pclk) ? MODE_CLOCK_HIGH : MODE_OK;
switch (pack_required) {
case 0:
return MODE_OK;
case 1:
return (can_pack) ? MODE_OK : MODE_CLOCK_HIGH;
default:
return MODE_CLOCK_HIGH;
}
}
static bool sii8620_mode_fixup(struct drm_bridge *bridge,
@ -2286,43 +2238,16 @@ static bool sii8620_mode_fixup(struct drm_bridge *bridge,
struct drm_display_mode *adjusted_mode)
{
struct sii8620 *ctx = bridge_to_sii8620(bridge);
int max_lclk;
bool ret = true;
mutex_lock(&ctx->lock);
max_lclk = sii8620_is_mhl3(ctx) ? MHL3_MAX_LCLK : MHL1_MAX_LCLK;
if (max_lclk > 3 * adjusted_mode->clock) {
ctx->use_packed_pixel = 0;
goto end;
}
if ((ctx->devcap[MHL_DCAP_VID_LINK_MODE] & MHL_DCAP_VID_LINK_PPIXEL) &&
max_lclk > 2 * adjusted_mode->clock) {
ctx->use_packed_pixel = 1;
goto end;
}
ret = false;
end:
if (ret) {
u8 vic = drm_match_cea_mode(adjusted_mode);
ctx->use_packed_pixel = sii8620_is_packing_required(ctx, adjusted_mode);
ctx->video_code = drm_match_cea_mode(adjusted_mode);
ctx->pixel_clock = adjusted_mode->clock;
if (!vic) {
union hdmi_infoframe frm;
u8 mhl_vic[] = { 0, 95, 94, 93, 98 };
/* FIXME: We need the connector here */
drm_hdmi_vendor_infoframe_from_display_mode(
&frm.vendor.hdmi, NULL, adjusted_mode);
vic = frm.vendor.hdmi.vic;
if (vic >= ARRAY_SIZE(mhl_vic))
vic = 0;
vic = mhl_vic[vic];
}
ctx->video_code = vic;
ctx->pixel_clock = adjusted_mode->clock;
}
mutex_unlock(&ctx->lock);
return ret;
return true;
}
static const struct drm_bridge_funcs sii8620_bridge_funcs = {

View File

@ -369,13 +369,6 @@ EXPORT_SYMBOL(drm_dev_exit);
*/
void drm_dev_unplug(struct drm_device *dev)
{
drm_dev_unregister(dev);
mutex_lock(&drm_global_mutex);
if (dev->open_count == 0)
drm_dev_put(dev);
mutex_unlock(&drm_global_mutex);
/*
* After synchronizing any critical read section is guaranteed to see
* the new value of ->unplugged, and any critical section which might
@ -384,6 +377,13 @@ void drm_dev_unplug(struct drm_device *dev)
*/
dev->unplugged = true;
synchronize_srcu(&drm_unplug_srcu);
drm_dev_unregister(dev);
mutex_lock(&drm_global_mutex);
if (dev->open_count == 0)
drm_dev_put(dev);
mutex_unlock(&drm_global_mutex);
}
EXPORT_SYMBOL(drm_dev_unplug);

View File

@ -340,14 +340,21 @@ struct drm_i915_file_private {
unsigned int bsd_engine;
/* Client can have a maximum of 3 contexts banned before
* it is denied of creating new contexts. As one context
* ban needs 4 consecutive hangs, and more if there is
* progress in between, this is a last resort stop gap measure
* to limit the badly behaving clients access to gpu.
/*
* Every context ban increments per client ban score. Also
* hangs in short succession increments ban score. If ban threshold
* is reached, client is considered banned and submitting more work
* will fail. This is a stop gap measure to limit the badly behaving
* clients access to gpu. Note that unbannable contexts never increment
* the client ban score.
*/
#define I915_MAX_CLIENT_CONTEXT_BANS 3
atomic_t context_bans;
#define I915_CLIENT_SCORE_HANG_FAST 1
#define I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ)
#define I915_CLIENT_SCORE_CONTEXT_BAN 3
#define I915_CLIENT_SCORE_BANNED 9
/** ban_score: Accumulated score of all ctx bans and fast hangs. */
atomic_t ban_score;
unsigned long hang_timestamp;
};
/* Interface history:

View File

@ -2933,32 +2933,54 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
return 0;
}
static void i915_gem_client_mark_guilty(struct drm_i915_file_private *file_priv,
const struct i915_gem_context *ctx)
{
unsigned int score;
unsigned long prev_hang;
if (i915_gem_context_is_banned(ctx))
score = I915_CLIENT_SCORE_CONTEXT_BAN;
else
score = 0;
prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
score += I915_CLIENT_SCORE_HANG_FAST;
if (score) {
atomic_add(score, &file_priv->ban_score);
DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
ctx->name, score,
atomic_read(&file_priv->ban_score));
}
}
static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
{
bool banned;
unsigned int score;
bool banned, bannable;
atomic_inc(&ctx->guilty_count);
banned = false;
if (i915_gem_context_is_bannable(ctx)) {
unsigned int score;
bannable = i915_gem_context_is_bannable(ctx);
score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
score = atomic_add_return(CONTEXT_SCORE_GUILTY,
&ctx->ban_score);
banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, ban %s\n",
ctx->name, atomic_read(&ctx->guilty_count),
score, yesno(banned && bannable));
DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n",
ctx->name, score, yesno(banned));
}
if (!banned)
/* Cool contexts don't accumulate client ban score */
if (!bannable)
return;
i915_gem_context_set_banned(ctx);
if (!IS_ERR_OR_NULL(ctx->file_priv)) {
atomic_inc(&ctx->file_priv->context_bans);
DRM_DEBUG_DRIVER("client %s has had %d context banned\n",
ctx->name, atomic_read(&ctx->file_priv->context_bans));
}
if (banned)
i915_gem_context_set_banned(ctx);
if (!IS_ERR_OR_NULL(ctx->file_priv))
i915_gem_client_mark_guilty(ctx->file_priv, ctx);
}
static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
@ -5736,6 +5758,7 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
INIT_LIST_HEAD(&file_priv->mm.request_list);
file_priv->bsd_engine = -1;
file_priv->hang_timestamp = jiffies;
ret = i915_gem_context_open(i915, file);
if (ret)

View File

@ -652,7 +652,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
static bool client_is_banned(struct drm_i915_file_private *file_priv)
{
return atomic_read(&file_priv->context_bans) > I915_MAX_CLIENT_CONTEXT_BANS;
return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
}
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,

View File

@ -489,7 +489,9 @@ eb_validate_vma(struct i915_execbuffer *eb,
}
static int
eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)
eb_add_vma(struct i915_execbuffer *eb,
unsigned int i, unsigned batch_idx,
struct i915_vma *vma)
{
struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
int err;
@ -522,6 +524,24 @@ eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)
eb->flags[i] = entry->flags;
vma->exec_flags = &eb->flags[i];
/*
* SNA is doing fancy tricks with compressing batch buffers, which leads
* to negative relocation deltas. Usually that works out ok since the
* relocate address is still positive, except when the batch is placed
* very low in the GTT. Ensure this doesn't happen.
*
* Note that actual hangs have only been observed on gen7, but for
* paranoia do it everywhere.
*/
if (i == batch_idx) {
if (!(eb->flags[i] & EXEC_OBJECT_PINNED))
eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
if (eb->reloc_cache.has_fence)
eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
eb->batch = vma;
}
err = 0;
if (eb_pin_vma(eb, entry, vma)) {
if (entry->offset != vma->node.start) {
@ -716,7 +736,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
{
struct radix_tree_root *handles_vma = &eb->ctx->handles_vma;
struct drm_i915_gem_object *obj;
unsigned int i;
unsigned int i, batch;
int err;
if (unlikely(i915_gem_context_is_closed(eb->ctx)))
@ -728,6 +748,8 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
INIT_LIST_HEAD(&eb->relocs);
INIT_LIST_HEAD(&eb->unbound);
batch = eb_batch_index(eb);
for (i = 0; i < eb->buffer_count; i++) {
u32 handle = eb->exec[i].handle;
struct i915_lut_handle *lut;
@ -770,33 +792,16 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
lut->handle = handle;
add_vma:
err = eb_add_vma(eb, i, vma);
err = eb_add_vma(eb, i, batch, vma);
if (unlikely(err))
goto err_vma;
GEM_BUG_ON(vma != eb->vma[i]);
GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i]));
}
/* take note of the batch buffer before we might reorder the lists */
i = eb_batch_index(eb);
eb->batch = eb->vma[i];
GEM_BUG_ON(eb->batch->exec_flags != &eb->flags[i]);
/*
* SNA is doing fancy tricks with compressing batch buffers, which leads
* to negative relocation deltas. Usually that works out ok since the
* relocate address is still positive, except when the batch is placed
* very low in the GTT. Ensure this doesn't happen.
*
* Note that actual hangs have only been observed on gen7, but for
* paranoia do it everywhere.
*/
if (!(eb->flags[i] & EXEC_OBJECT_PINNED))
eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
if (eb->reloc_cache.has_fence)
eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
eb->args->flags |= __EXEC_VALIDATED;
return eb_reserve(eb);

View File

@ -1893,9 +1893,17 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
/*
* Clear the PIPE*STAT regs before the IIR
*
* Toggle the enable bits to make sure we get an
* edge in the ISR pipe event bit if we don't clear
* all the enabled status bits. Otherwise the edge
* triggered IIR on i965/g4x wouldn't notice that
* an interrupt is still pending.
*/
if (pipe_stats[pipe])
I915_WRITE(reg, enable_mask | pipe_stats[pipe]);
if (pipe_stats[pipe]) {
I915_WRITE(reg, pipe_stats[pipe]);
I915_WRITE(reg, enable_mask);
}
}
spin_unlock(&dev_priv->irq_lock);
}

View File

@ -2425,12 +2425,17 @@ enum i915_power_well_id {
#define _3D_CHICKEN _MMIO(0x2084)
#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
#define _3D_CHICKEN2 _MMIO(0x208c)
#define FF_SLICE_CHICKEN _MMIO(0x2088)
#define FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX (1 << 1)
/* Disables pipelining of read flushes past the SF-WIZ interface.
* Required on all Ironlake steppings according to the B-Spec, but the
* particular danger of not doing so is not specified.
*/
# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
#define _3D_CHICKEN3 _MMIO(0x2090)
#define _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX (1 << 12)
#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
#define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5)
#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)

View File

@ -304,6 +304,9 @@ intel_crt_mode_valid(struct drm_connector *connector,
int max_dotclk = dev_priv->max_dotclk_freq;
int max_clock;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
if (mode->clock < 25000)
return MODE_CLOCK_LOW;
@ -337,6 +340,12 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
struct drm_display_mode *adjusted_mode =
&pipe_config->base.adjusted_mode;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return false;
return true;
}
@ -344,6 +353,12 @@ static bool pch_crt_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
struct drm_display_mode *adjusted_mode =
&pipe_config->base.adjusted_mode;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return false;
pipe_config->has_pch_encoder = true;
return true;
@ -354,6 +369,11 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode =
&pipe_config->base.adjusted_mode;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return false;
pipe_config->has_pch_encoder = true;

View File

@ -14469,12 +14469,22 @@ static enum drm_mode_status
intel_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
{
/*
* Can't reject DBLSCAN here because Xorg ddxen can add piles
* of DBLSCAN modes to the output's mode list when they detect
* the scaling mode property on the connector. And they don't
* ask the kernel to validate those modes in any way until
* modeset time at which point the client gets a protocol error.
* So in order to not upset those clients we silently ignore the
* DBLSCAN flag on such connectors. For other connectors we will
* reject modes with the DBLSCAN flag in encoder->compute_config().
* And we always reject DBLSCAN modes in connector->mode_valid()
* as we never want such modes on the connector's mode list.
*/
if (mode->vscan > 1)
return MODE_NO_VSCAN;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
if (mode->flags & DRM_MODE_FLAG_HSKEW)
return MODE_H_ILLEGAL;

View File

@ -420,6 +420,9 @@ intel_dp_mode_valid(struct drm_connector *connector,
int max_rate, mode_rate, max_lanes, max_link_clock;
int max_dotclk;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
if (intel_dp_is_edp(intel_dp) && fixed_mode) {
@ -1862,7 +1865,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
conn_state->scaling_mode);
}
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return false;
if (HAS_GMCH_DISPLAY(dev_priv) &&
adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
return false;
@ -2782,16 +2788,6 @@ static void intel_disable_dp(struct intel_encoder *encoder,
static void g4x_disable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
intel_disable_dp(encoder, old_crtc_state, old_conn_state);
/* disable the port before the pipe on g4x */
intel_dp_link_down(encoder, old_crtc_state);
}
static void ilk_disable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
intel_disable_dp(encoder, old_crtc_state, old_conn_state);
}
@ -2807,13 +2803,19 @@ static void vlv_disable_dp(struct intel_encoder *encoder,
intel_disable_dp(encoder, old_crtc_state, old_conn_state);
}
static void ilk_post_disable_dp(struct intel_encoder *encoder,
static void g4x_post_disable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = encoder->port;
/*
* Bspec does not list a specific disable sequence for g4x DP.
* Follow the ilk+ sequence (disable pipe before the port) for
* g4x DP as it does not suffer from underruns like the normal
* g4x modeset sequence (disable pipe after the port).
*/
intel_dp_link_down(encoder, old_crtc_state);
/* Only ilk+ has port A */
@ -6337,7 +6339,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
if (!HAS_GMCH_DISPLAY(dev_priv))
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
@ -6436,15 +6438,11 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->enable = vlv_enable_dp;
intel_encoder->disable = vlv_disable_dp;
intel_encoder->post_disable = vlv_post_disable_dp;
} else if (INTEL_GEN(dev_priv) >= 5) {
intel_encoder->pre_enable = g4x_pre_enable_dp;
intel_encoder->enable = g4x_enable_dp;
intel_encoder->disable = ilk_disable_dp;
intel_encoder->post_disable = ilk_post_disable_dp;
} else {
intel_encoder->pre_enable = g4x_pre_enable_dp;
intel_encoder->enable = g4x_enable_dp;
intel_encoder->disable = g4x_disable_dp;
intel_encoder->post_disable = g4x_post_disable_dp;
}
intel_dig_port->dp.output_reg = output_reg;

View File

@ -48,6 +48,9 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
DP_DPCD_QUIRK_LIMITED_M_N);
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return false;
pipe_config->has_pch_encoder = false;
bpp = 24;
if (intel_dp->compliance.test_data.bpc) {
@ -366,6 +369,9 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
if (!intel_dp)
return MODE_ERROR;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
max_link_clock = intel_dp_max_link_rate(intel_dp);
max_lanes = intel_dp_max_lane_count(intel_dp);

View File

@ -326,6 +326,9 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
conn_state->scaling_mode);
}
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return false;
/* DSI uses short packets for sync events, so clear mode flags for DSI */
adjusted_mode->flags = 0;
@ -1266,6 +1269,9 @@ intel_dsi_mode_valid(struct drm_connector *connector,
DRM_DEBUG_KMS("\n");
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
if (fixed_mode) {
if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;

View File

@ -219,6 +219,9 @@ intel_dvo_mode_valid(struct drm_connector *connector,
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
int target_clock = mode->clock;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
/* XXX: Validate clock range */
if (fixed_mode) {
@ -254,6 +257,9 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
if (fixed_mode)
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return false;
return true;
}

View File

@ -1557,6 +1557,9 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
bool force_dvi =
READ_ONCE(to_intel_digital_connector_state(connector->state)->force_audio) == HDMI_AUDIO_OFF_DVI;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
clock = mode->clock;
if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
@ -1677,6 +1680,9 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
int desired_bpp;
bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return false;
pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink;
if (pipe_config->has_hdmi_sink)

View File

@ -1545,11 +1545,21 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
batch = gen8_emit_flush_coherentl3_wa(engine, batch);
*batch++ = MI_LOAD_REGISTER_IMM(3);
/* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
*batch++ = MI_LOAD_REGISTER_IMM(1);
*batch++ = i915_mmio_reg_offset(COMMON_SLICE_CHICKEN2);
*batch++ = _MASKED_BIT_DISABLE(
GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE);
/* BSpec: 11391 */
*batch++ = i915_mmio_reg_offset(FF_SLICE_CHICKEN);
*batch++ = _MASKED_BIT_ENABLE(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX);
/* BSpec: 11299 */
*batch++ = i915_mmio_reg_offset(_3D_CHICKEN3);
*batch++ = _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX);
*batch++ = MI_NOOP;
/* WaClearSlmSpaceAtContextSwitch:kbl */
@ -2641,10 +2651,8 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
context_size += LRC_HEADER_PAGES * PAGE_SIZE;
ctx_obj = i915_gem_object_create(ctx->i915, context_size);
if (IS_ERR(ctx_obj)) {
ret = PTR_ERR(ctx_obj);
goto error_deref_obj;
}
if (IS_ERR(ctx_obj))
return PTR_ERR(ctx_obj);
vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL);
if (IS_ERR(vma)) {

View File

@ -380,6 +380,8 @@ intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
int max_pixclk = to_i915(connector->dev)->max_dotclk_freq;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
if (mode->vdisplay > fixed_mode->vdisplay)
@ -429,6 +431,9 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
adjusted_mode);
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return false;
if (HAS_PCH_SPLIT(dev_priv)) {
pipe_config->has_pch_encoder = true;

View File

@ -1160,6 +1160,9 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
adjusted_mode);
}
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return false;
/*
* Make the CRTC code factor in the SDVO pixel multiplier. The
* SDVO device will factor out the multiplier during mode_set.
@ -1621,6 +1624,9 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
if (intel_sdvo->pixel_clock_min > mode->clock)
return MODE_CLOCK_LOW;

View File

@ -850,6 +850,9 @@ intel_tv_mode_valid(struct drm_connector *connector,
const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
if (mode->clock > max_dotclk)
return MODE_CLOCK_HIGH;
@ -877,16 +880,21 @@ intel_tv_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state);
struct drm_display_mode *adjusted_mode =
&pipe_config->base.adjusted_mode;
if (!tv_mode)
return false;
pipe_config->base.adjusted_mode.crtc_clock = tv_mode->clock;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return false;
adjusted_mode->crtc_clock = tv_mode->clock;
DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
pipe_config->pipe_bpp = 8*3;
/* TV has it's own notion of sync and other mode flags, so clear them. */
pipe_config->base.adjusted_mode.flags = 0;
adjusted_mode->flags = 0;
/*
* FIXME: We don't check whether the input mode is actually what we want

View File

@ -132,7 +132,7 @@ curs507a_new_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
nvif_object_map(&wndw->wimm.base.user, NULL, 0);
wndw->immd = func;
wndw->ctxdma.parent = &disp->core->chan.base.user;
wndw->ctxdma.parent = NULL;
return 0;
}

View File

@ -444,14 +444,17 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
if (ret)
return ret;
ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
if (IS_ERR(ctxdma)) {
nouveau_bo_unpin(fb->nvbo);
return PTR_ERR(ctxdma);
if (wndw->ctxdma.parent) {
ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
if (IS_ERR(ctxdma)) {
nouveau_bo_unpin(fb->nvbo);
return PTR_ERR(ctxdma);
}
asyw->image.handle[0] = ctxdma->object.handle;
}
asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv);
asyw->image.handle[0] = ctxdma->object.handle;
asyw->image.offset[0] = fb->nvbo->bo.offset;
if (wndw->func->prepare) {

View File

@ -623,7 +623,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
struct qxl_cursor_cmd *cmd;
struct qxl_cursor *cursor;
struct drm_gem_object *obj;
struct qxl_bo *cursor_bo = NULL, *user_bo = NULL;
struct qxl_bo *cursor_bo = NULL, *user_bo = NULL, *old_cursor_bo = NULL;
int ret;
void *user_ptr;
int size = 64*64*4;
@ -677,7 +677,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
cursor_bo, 0);
cmd->type = QXL_CURSOR_SET;
qxl_bo_unref(&qcrtc->cursor_bo);
old_cursor_bo = qcrtc->cursor_bo;
qcrtc->cursor_bo = cursor_bo;
cursor_bo = NULL;
} else {
@ -697,6 +697,9 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
qxl_release_fence_buffer_objects(release);
if (old_cursor_bo)
qxl_bo_unref(&old_cursor_bo);
qxl_bo_unref(&cursor_bo);
return;

View File

@ -17,7 +17,6 @@
#include <drm/drm_encoder.h>
#include <drm/drm_modes.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <uapi/drm/drm_mode.h>
@ -418,9 +417,6 @@ static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon,
static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
const struct drm_display_mode *mode)
{
struct drm_panel *panel = tcon->panel;
struct drm_connector *connector = panel->connector;
struct drm_display_info display_info = connector->display_info;
unsigned int bp, hsync, vsync;
u8 clk_delay;
u32 val = 0;
@ -478,27 +474,6 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
/*
* On A20 and similar SoCs, the only way to achieve Positive Edge
* (Rising Edge), is setting dclk clock phase to 2/3(240°).
* By default TCON works in Negative Edge(Falling Edge),
* this is why phase is set to 0 in that case.
* Unfortunately there's no way to logically invert dclk through
* IO_POL register.
* The only acceptable way to work, triple checked with scope,
* is using clock phase set to 0° for Negative Edge and set to 240°
* for Positive Edge.
* On A33 and similar SoCs there would be a 90° phase option,
* but it divides also dclk by 2.
* Following code is a way to avoid quirks all around TCON
* and DOTCLOCK drivers.
*/
if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
clk_set_phase(tcon->dclk, 240);
if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
clk_set_phase(tcon->dclk, 0);
regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | SUN4I_TCON0_IO_POL_VSYNC_POSITIVE,
val);