Merge tag 'drm-fixes-for-4.8-rc4' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie: "A bunch of fixes covering i915, amdgpu, one tegra and some core DRM ones. Nothing too strange at this point" * tag 'drm-fixes-for-4.8-rc4' of git://people.freedesktop.org/~airlied/linux: (21 commits) drm/atomic: Don't potentially reset color_mgmt_changed on successive property updates. drm: Protect fb_defio in drivers with CONFIG_KMS_FBDEV_EMULATION drm/amdgpu: skip TV/CV in display parsing drm/amdgpu: avoid a possible array overflow drm/amdgpu: fix lru size grouping v2 drm/tegra: dsi: Enhance runtime power management drm/i915: Fix botched merge that downgrades CSR versions. drm/i915/skl: Ensure pipes with changed wms get added to the state drm/i915/gen9: Only copy WM results for changed pipes to skl_hw drm/i915/skl: Add support for the SAGV, fix underrun hangs drm/i915/gen6+: Interpret mailbox error flags drm/i915: Reattach comment, complete type specification drm/i915: Unconditionally flush any chipset buffers before execbuf drm/i915/gen9: Drop invalid WARN() during data rate calculation drm/i915/gen9: Initialize intel_state->active_crtcs during WM sanitization (v2) drm: Reject page_flip for !DRIVER_MODESET drm/amdgpu: fix timeout value check in amd_sched_job_recovery drm/amdgpu: fix sdma_v2_4_ring_test_ib drm/amdgpu: fix amdgpu_move_blit on 32bit systems drm/radeon: fix radeon_move_blit on 32bit systems ...
This commit is contained in:
commit
25d0d91af7
|
@ -426,6 +426,8 @@ struct amdgpu_mman {
|
||||||
|
|
||||||
/* custom LRU management */
|
/* custom LRU management */
|
||||||
struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE];
|
struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE];
|
||||||
|
/* guard for log2_size array, don't add anything in between */
|
||||||
|
struct amdgpu_mman_lru guard;
|
||||||
};
|
};
|
||||||
|
|
||||||
int amdgpu_copy_buffer(struct amdgpu_ring *ring,
|
int amdgpu_copy_buffer(struct amdgpu_ring *ring,
|
||||||
|
|
|
@ -321,6 +321,19 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
|
||||||
(le16_to_cpu(path->usConnObjectId) &
|
(le16_to_cpu(path->usConnObjectId) &
|
||||||
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
|
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
|
||||||
|
|
||||||
|
/* Skip TV/CV support */
|
||||||
|
if ((le16_to_cpu(path->usDeviceTag) ==
|
||||||
|
ATOM_DEVICE_TV1_SUPPORT) ||
|
||||||
|
(le16_to_cpu(path->usDeviceTag) ==
|
||||||
|
ATOM_DEVICE_CV_SUPPORT))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (con_obj_id >= ARRAY_SIZE(object_connector_convert)) {
|
||||||
|
DRM_ERROR("invalid con_obj_id %d for device tag 0x%04x\n",
|
||||||
|
con_obj_id, le16_to_cpu(path->usDeviceTag));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
connector_type =
|
connector_type =
|
||||||
object_connector_convert[con_obj_id];
|
object_connector_convert[con_obj_id];
|
||||||
connector_object_id = con_obj_id;
|
connector_object_id = con_obj_id;
|
||||||
|
|
|
@ -251,8 +251,8 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
|
||||||
|
|
||||||
adev = amdgpu_get_adev(bo->bdev);
|
adev = amdgpu_get_adev(bo->bdev);
|
||||||
ring = adev->mman.buffer_funcs_ring;
|
ring = adev->mman.buffer_funcs_ring;
|
||||||
old_start = old_mem->start << PAGE_SHIFT;
|
old_start = (u64)old_mem->start << PAGE_SHIFT;
|
||||||
new_start = new_mem->start << PAGE_SHIFT;
|
new_start = (u64)new_mem->start << PAGE_SHIFT;
|
||||||
|
|
||||||
switch (old_mem->mem_type) {
|
switch (old_mem->mem_type) {
|
||||||
case TTM_PL_VRAM:
|
case TTM_PL_VRAM:
|
||||||
|
@ -950,6 +950,8 @@ static struct list_head *amdgpu_ttm_lru_tail(struct ttm_buffer_object *tbo)
|
||||||
struct list_head *res = lru->lru[tbo->mem.mem_type];
|
struct list_head *res = lru->lru[tbo->mem.mem_type];
|
||||||
|
|
||||||
lru->lru[tbo->mem.mem_type] = &tbo->lru;
|
lru->lru[tbo->mem.mem_type] = &tbo->lru;
|
||||||
|
while ((++lru)->lru[tbo->mem.mem_type] == res)
|
||||||
|
lru->lru[tbo->mem.mem_type] = &tbo->lru;
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
@ -960,6 +962,8 @@ static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo)
|
||||||
struct list_head *res = lru->swap_lru;
|
struct list_head *res = lru->swap_lru;
|
||||||
|
|
||||||
lru->swap_lru = &tbo->swap;
|
lru->swap_lru = &tbo->swap;
|
||||||
|
while ((++lru)->swap_lru == res)
|
||||||
|
lru->swap_lru = &tbo->swap;
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
@ -1011,6 +1015,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
||||||
lru->swap_lru = &adev->mman.bdev.glob->swap_lru;
|
lru->swap_lru = &adev->mman.bdev.glob->swap_lru;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
|
||||||
|
adev->mman.guard.lru[j] = NULL;
|
||||||
|
adev->mman.guard.swap_lru = NULL;
|
||||||
|
|
||||||
adev->mman.initialized = true;
|
adev->mman.initialized = true;
|
||||||
r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
|
r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
|
||||||
adev->mc.real_vram_size >> PAGE_SHIFT);
|
adev->mc.real_vram_size >> PAGE_SHIFT);
|
||||||
|
|
|
@ -714,7 +714,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||||
DRM_ERROR("amdgpu: IB test timed out\n");
|
DRM_ERROR("amdgpu: IB test timed out\n");
|
||||||
r = -ETIMEDOUT;
|
r = -ETIMEDOUT;
|
||||||
goto err1;
|
goto err1;
|
||||||
} else if (r) {
|
} else if (r < 0) {
|
||||||
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
|
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
|
||||||
goto err1;
|
goto err1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -405,7 +405,7 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
|
||||||
spin_lock(&sched->job_list_lock);
|
spin_lock(&sched->job_list_lock);
|
||||||
s_job = list_first_entry_or_null(&sched->ring_mirror_list,
|
s_job = list_first_entry_or_null(&sched->ring_mirror_list,
|
||||||
struct amd_sched_job, node);
|
struct amd_sched_job, node);
|
||||||
if (s_job)
|
if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
|
||||||
schedule_delayed_work(&s_job->work_tdr, sched->timeout);
|
schedule_delayed_work(&s_job->work_tdr, sched->timeout);
|
||||||
|
|
||||||
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
|
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
|
||||||
|
|
|
@ -475,7 +475,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
|
||||||
val,
|
val,
|
||||||
-1,
|
-1,
|
||||||
&replaced);
|
&replaced);
|
||||||
state->color_mgmt_changed = replaced;
|
state->color_mgmt_changed |= replaced;
|
||||||
return ret;
|
return ret;
|
||||||
} else if (property == config->ctm_property) {
|
} else if (property == config->ctm_property) {
|
||||||
ret = drm_atomic_replace_property_blob_from_id(crtc,
|
ret = drm_atomic_replace_property_blob_from_id(crtc,
|
||||||
|
@ -483,7 +483,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
|
||||||
val,
|
val,
|
||||||
sizeof(struct drm_color_ctm),
|
sizeof(struct drm_color_ctm),
|
||||||
&replaced);
|
&replaced);
|
||||||
state->color_mgmt_changed = replaced;
|
state->color_mgmt_changed |= replaced;
|
||||||
return ret;
|
return ret;
|
||||||
} else if (property == config->gamma_lut_property) {
|
} else if (property == config->gamma_lut_property) {
|
||||||
ret = drm_atomic_replace_property_blob_from_id(crtc,
|
ret = drm_atomic_replace_property_blob_from_id(crtc,
|
||||||
|
@ -491,7 +491,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
|
||||||
val,
|
val,
|
||||||
-1,
|
-1,
|
||||||
&replaced);
|
&replaced);
|
||||||
state->color_mgmt_changed = replaced;
|
state->color_mgmt_changed |= replaced;
|
||||||
return ret;
|
return ret;
|
||||||
} else if (crtc->funcs->atomic_set_property)
|
} else if (crtc->funcs->atomic_set_property)
|
||||||
return crtc->funcs->atomic_set_property(crtc, state, property, val);
|
return crtc->funcs->atomic_set_property(crtc, state, property, val);
|
||||||
|
|
|
@ -5404,6 +5404,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
|
||||||
struct drm_pending_vblank_event *e = NULL;
|
struct drm_pending_vblank_event *e = NULL;
|
||||||
int ret = -EINVAL;
|
int ret = -EINVAL;
|
||||||
|
|
||||||
|
if (!drm_core_check_feature(dev, DRIVER_MODESET))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
|
if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
|
||||||
page_flip->reserved != 0)
|
page_flip->reserved != 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
|
@ -882,11 +882,12 @@ struct i915_gem_context {
|
||||||
|
|
||||||
struct i915_ctx_hang_stats hang_stats;
|
struct i915_ctx_hang_stats hang_stats;
|
||||||
|
|
||||||
/* Unique identifier for this context, used by the hw for tracking */
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
#define CONTEXT_NO_ZEROMAP BIT(0)
|
#define CONTEXT_NO_ZEROMAP BIT(0)
|
||||||
#define CONTEXT_NO_ERROR_CAPTURE BIT(1)
|
#define CONTEXT_NO_ERROR_CAPTURE BIT(1)
|
||||||
unsigned hw_id;
|
|
||||||
|
/* Unique identifier for this context, used by the hw for tracking */
|
||||||
|
unsigned int hw_id;
|
||||||
u32 user_handle;
|
u32 user_handle;
|
||||||
|
|
||||||
u32 ggtt_alignment;
|
u32 ggtt_alignment;
|
||||||
|
@ -1963,6 +1964,13 @@ struct drm_i915_private {
|
||||||
struct i915_suspend_saved_registers regfile;
|
struct i915_suspend_saved_registers regfile;
|
||||||
struct vlv_s0ix_state vlv_s0ix_state;
|
struct vlv_s0ix_state vlv_s0ix_state;
|
||||||
|
|
||||||
|
enum {
|
||||||
|
I915_SKL_SAGV_UNKNOWN = 0,
|
||||||
|
I915_SKL_SAGV_DISABLED,
|
||||||
|
I915_SKL_SAGV_ENABLED,
|
||||||
|
I915_SKL_SAGV_NOT_CONTROLLED
|
||||||
|
} skl_sagv_status;
|
||||||
|
|
||||||
struct {
|
struct {
|
||||||
/*
|
/*
|
||||||
* Raw watermark latency values:
|
* Raw watermark latency values:
|
||||||
|
@ -3591,6 +3599,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
|
||||||
/* belongs in i915_gem_gtt.h */
|
/* belongs in i915_gem_gtt.h */
|
||||||
static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
|
static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
|
wmb();
|
||||||
if (INTEL_GEN(dev_priv) < 6)
|
if (INTEL_GEN(dev_priv) < 6)
|
||||||
intel_gtt_chipset_flush();
|
intel_gtt_chipset_flush();
|
||||||
}
|
}
|
||||||
|
|
|
@ -943,8 +943,6 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
|
||||||
{
|
{
|
||||||
const unsigned other_rings = ~intel_engine_flag(req->engine);
|
const unsigned other_rings = ~intel_engine_flag(req->engine);
|
||||||
struct i915_vma *vma;
|
struct i915_vma *vma;
|
||||||
uint32_t flush_domains = 0;
|
|
||||||
bool flush_chipset = false;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
list_for_each_entry(vma, vmas, exec_list) {
|
list_for_each_entry(vma, vmas, exec_list) {
|
||||||
|
@ -957,16 +955,11 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
|
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
|
||||||
flush_chipset |= i915_gem_clflush_object(obj, false);
|
i915_gem_clflush_object(obj, false);
|
||||||
|
|
||||||
flush_domains |= obj->base.write_domain;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (flush_chipset)
|
/* Unconditionally flush any chipset caches (for streaming writes). */
|
||||||
i915_gem_chipset_flush(req->engine->i915);
|
i915_gem_chipset_flush(req->engine->i915);
|
||||||
|
|
||||||
if (flush_domains & I915_GEM_DOMAIN_GTT)
|
|
||||||
wmb();
|
|
||||||
|
|
||||||
/* Unconditionally invalidate gpu caches and ensure that we do flush
|
/* Unconditionally invalidate gpu caches and ensure that we do flush
|
||||||
* any residual writes from the previous batch.
|
* any residual writes from the previous batch.
|
||||||
|
|
|
@ -7145,6 +7145,15 @@ enum {
|
||||||
|
|
||||||
#define GEN6_PCODE_MAILBOX _MMIO(0x138124)
|
#define GEN6_PCODE_MAILBOX _MMIO(0x138124)
|
||||||
#define GEN6_PCODE_READY (1<<31)
|
#define GEN6_PCODE_READY (1<<31)
|
||||||
|
#define GEN6_PCODE_ERROR_MASK 0xFF
|
||||||
|
#define GEN6_PCODE_SUCCESS 0x0
|
||||||
|
#define GEN6_PCODE_ILLEGAL_CMD 0x1
|
||||||
|
#define GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x2
|
||||||
|
#define GEN6_PCODE_TIMEOUT 0x3
|
||||||
|
#define GEN6_PCODE_UNIMPLEMENTED_CMD 0xFF
|
||||||
|
#define GEN7_PCODE_TIMEOUT 0x2
|
||||||
|
#define GEN7_PCODE_ILLEGAL_DATA 0x3
|
||||||
|
#define GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x10
|
||||||
#define GEN6_PCODE_WRITE_RC6VIDS 0x4
|
#define GEN6_PCODE_WRITE_RC6VIDS 0x4
|
||||||
#define GEN6_PCODE_READ_RC6VIDS 0x5
|
#define GEN6_PCODE_READ_RC6VIDS 0x5
|
||||||
#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
|
#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
|
||||||
|
@ -7166,6 +7175,10 @@ enum {
|
||||||
#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17
|
#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17
|
||||||
#define DISPLAY_IPS_CONTROL 0x19
|
#define DISPLAY_IPS_CONTROL 0x19
|
||||||
#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
|
#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
|
||||||
|
#define GEN9_PCODE_SAGV_CONTROL 0x21
|
||||||
|
#define GEN9_SAGV_DISABLE 0x0
|
||||||
|
#define GEN9_SAGV_IS_DISABLED 0x1
|
||||||
|
#define GEN9_SAGV_ENABLE 0x3
|
||||||
#define GEN6_PCODE_DATA _MMIO(0x138128)
|
#define GEN6_PCODE_DATA _MMIO(0x138128)
|
||||||
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
|
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
|
||||||
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
|
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
|
||||||
|
|
|
@ -41,15 +41,15 @@
|
||||||
* be moved to FW_FAILED.
|
* be moved to FW_FAILED.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define I915_CSR_KBL "i915/kbl_dmc_ver1.bin"
|
#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
|
||||||
MODULE_FIRMWARE(I915_CSR_KBL);
|
MODULE_FIRMWARE(I915_CSR_KBL);
|
||||||
#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
|
#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
|
||||||
|
|
||||||
#define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
|
#define I915_CSR_SKL "i915/skl_dmc_ver1_26.bin"
|
||||||
MODULE_FIRMWARE(I915_CSR_SKL);
|
MODULE_FIRMWARE(I915_CSR_SKL);
|
||||||
#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
|
#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 26)
|
||||||
|
|
||||||
#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
|
#define I915_CSR_BXT "i915/bxt_dmc_ver1_07.bin"
|
||||||
MODULE_FIRMWARE(I915_CSR_BXT);
|
MODULE_FIRMWARE(I915_CSR_BXT);
|
||||||
#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
|
#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
|
||||||
|
|
||||||
|
|
|
@ -13759,6 +13759,13 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
|
||||||
intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco))
|
intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco))
|
||||||
dev_priv->display.modeset_commit_cdclk(state);
|
dev_priv->display.modeset_commit_cdclk(state);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* SKL workaround: bspec recommends we disable the SAGV when we
|
||||||
|
* have more then one pipe enabled
|
||||||
|
*/
|
||||||
|
if (IS_SKYLAKE(dev_priv) && !skl_can_enable_sagv(state))
|
||||||
|
skl_disable_sagv(dev_priv);
|
||||||
|
|
||||||
intel_modeset_verify_disabled(dev);
|
intel_modeset_verify_disabled(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -13832,6 +13839,10 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
|
||||||
intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
|
intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (IS_SKYLAKE(dev_priv) && intel_state->modeset &&
|
||||||
|
skl_can_enable_sagv(state))
|
||||||
|
skl_enable_sagv(dev_priv);
|
||||||
|
|
||||||
drm_atomic_helper_commit_hw_done(state);
|
drm_atomic_helper_commit_hw_done(state);
|
||||||
|
|
||||||
if (intel_state->modeset)
|
if (intel_state->modeset)
|
||||||
|
|
|
@ -1716,6 +1716,9 @@ void ilk_wm_get_hw_state(struct drm_device *dev);
|
||||||
void skl_wm_get_hw_state(struct drm_device *dev);
|
void skl_wm_get_hw_state(struct drm_device *dev);
|
||||||
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
|
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
|
||||||
struct skl_ddb_allocation *ddb /* out */);
|
struct skl_ddb_allocation *ddb /* out */);
|
||||||
|
bool skl_can_enable_sagv(struct drm_atomic_state *state);
|
||||||
|
int skl_enable_sagv(struct drm_i915_private *dev_priv);
|
||||||
|
int skl_disable_sagv(struct drm_i915_private *dev_priv);
|
||||||
uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
|
uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
|
||||||
bool ilk_disable_lp_wm(struct drm_device *dev);
|
bool ilk_disable_lp_wm(struct drm_device *dev);
|
||||||
int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
|
int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
|
||||||
|
|
|
@ -2852,6 +2852,7 @@ bool ilk_disable_lp_wm(struct drm_device *dev)
|
||||||
|
|
||||||
#define SKL_DDB_SIZE 896 /* in blocks */
|
#define SKL_DDB_SIZE 896 /* in blocks */
|
||||||
#define BXT_DDB_SIZE 512
|
#define BXT_DDB_SIZE 512
|
||||||
|
#define SKL_SAGV_BLOCK_TIME 30 /* µs */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return the index of a plane in the SKL DDB and wm result arrays. Primary
|
* Return the index of a plane in the SKL DDB and wm result arrays. Primary
|
||||||
|
@ -2875,6 +2876,153 @@ skl_wm_plane_id(const struct intel_plane *plane)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* SAGV dynamically adjusts the system agent voltage and clock frequencies
|
||||||
|
* depending on power and performance requirements. The display engine access
|
||||||
|
* to system memory is blocked during the adjustment time. Because of the
|
||||||
|
* blocking time, having this enabled can cause full system hangs and/or pipe
|
||||||
|
* underruns if we don't meet all of the following requirements:
|
||||||
|
*
|
||||||
|
* - <= 1 pipe enabled
|
||||||
|
* - All planes can enable watermarks for latencies >= SAGV engine block time
|
||||||
|
* - We're not using an interlaced display configuration
|
||||||
|
*/
|
||||||
|
int
|
||||||
|
skl_enable_sagv(struct drm_i915_private *dev_priv)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED ||
|
||||||
|
dev_priv->skl_sagv_status == I915_SKL_SAGV_ENABLED)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
DRM_DEBUG_KMS("Enabling the SAGV\n");
|
||||||
|
mutex_lock(&dev_priv->rps.hw_lock);
|
||||||
|
|
||||||
|
ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
|
||||||
|
GEN9_SAGV_ENABLE);
|
||||||
|
|
||||||
|
/* We don't need to wait for the SAGV when enabling */
|
||||||
|
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Some skl systems, pre-release machines in particular,
|
||||||
|
* don't actually have an SAGV.
|
||||||
|
*/
|
||||||
|
if (ret == -ENXIO) {
|
||||||
|
DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
|
||||||
|
dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED;
|
||||||
|
return 0;
|
||||||
|
} else if (ret < 0) {
|
||||||
|
DRM_ERROR("Failed to enable the SAGV\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
dev_priv->skl_sagv_status = I915_SKL_SAGV_ENABLED;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
skl_do_sagv_disable(struct drm_i915_private *dev_priv)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
uint32_t temp = GEN9_SAGV_DISABLE;
|
||||||
|
|
||||||
|
ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_SAGV_CONTROL,
|
||||||
|
&temp);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
else
|
||||||
|
return temp & GEN9_SAGV_IS_DISABLED;
|
||||||
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
skl_disable_sagv(struct drm_i915_private *dev_priv)
|
||||||
|
{
|
||||||
|
int ret, result;
|
||||||
|
|
||||||
|
if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED ||
|
||||||
|
dev_priv->skl_sagv_status == I915_SKL_SAGV_DISABLED)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
DRM_DEBUG_KMS("Disabling the SAGV\n");
|
||||||
|
mutex_lock(&dev_priv->rps.hw_lock);
|
||||||
|
|
||||||
|
/* bspec says to keep retrying for at least 1 ms */
|
||||||
|
ret = wait_for(result = skl_do_sagv_disable(dev_priv), 1);
|
||||||
|
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||||
|
|
||||||
|
if (ret == -ETIMEDOUT) {
|
||||||
|
DRM_ERROR("Request to disable SAGV timed out\n");
|
||||||
|
return -ETIMEDOUT;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Some skl systems, pre-release machines in particular,
|
||||||
|
* don't actually have an SAGV.
|
||||||
|
*/
|
||||||
|
if (result == -ENXIO) {
|
||||||
|
DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
|
||||||
|
dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED;
|
||||||
|
return 0;
|
||||||
|
} else if (result < 0) {
|
||||||
|
DRM_ERROR("Failed to disable the SAGV\n");
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
dev_priv->skl_sagv_status = I915_SKL_SAGV_DISABLED;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool skl_can_enable_sagv(struct drm_atomic_state *state)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = state->dev;
|
||||||
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||||
|
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
|
||||||
|
struct drm_crtc *crtc;
|
||||||
|
enum pipe pipe;
|
||||||
|
int level, plane;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* SKL workaround: bspec recommends we disable the SAGV when we have
|
||||||
|
* more then one pipe enabled
|
||||||
|
*
|
||||||
|
* If there are no active CRTCs, no additional checks need be performed
|
||||||
|
*/
|
||||||
|
if (hweight32(intel_state->active_crtcs) == 0)
|
||||||
|
return true;
|
||||||
|
else if (hweight32(intel_state->active_crtcs) > 1)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/* Since we're now guaranteed to only have one active CRTC... */
|
||||||
|
pipe = ffs(intel_state->active_crtcs) - 1;
|
||||||
|
crtc = dev_priv->pipe_to_crtc_mapping[pipe];
|
||||||
|
|
||||||
|
if (crtc->state->mode.flags & DRM_MODE_FLAG_INTERLACE)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
for_each_plane(dev_priv, pipe, plane) {
|
||||||
|
/* Skip this plane if it's not enabled */
|
||||||
|
if (intel_state->wm_results.plane[pipe][plane][0] == 0)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/* Find the highest enabled wm level for this plane */
|
||||||
|
for (level = ilk_wm_max_level(dev);
|
||||||
|
intel_state->wm_results.plane[pipe][plane][level] == 0; --level)
|
||||||
|
{ }
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If any of the planes on this pipe don't enable wm levels
|
||||||
|
* that incur memory latencies higher then 30µs we can't enable
|
||||||
|
* the SAGV
|
||||||
|
*/
|
||||||
|
if (dev_priv->wm.skl_latency[level] < SKL_SAGV_BLOCK_TIME)
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
|
skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
|
||||||
const struct intel_crtc_state *cstate,
|
const struct intel_crtc_state *cstate,
|
||||||
|
@ -3107,8 +3255,6 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate)
|
||||||
total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id];
|
total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id];
|
||||||
}
|
}
|
||||||
|
|
||||||
WARN_ON(cstate->plane_mask && total_data_rate == 0);
|
|
||||||
|
|
||||||
return total_data_rate;
|
return total_data_rate;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3912,9 +4058,24 @@ skl_compute_ddb(struct drm_atomic_state *state)
|
||||||
* pretend that all pipes switched active status so that we'll
|
* pretend that all pipes switched active status so that we'll
|
||||||
* ensure a full DDB recompute.
|
* ensure a full DDB recompute.
|
||||||
*/
|
*/
|
||||||
if (dev_priv->wm.distrust_bios_wm)
|
if (dev_priv->wm.distrust_bios_wm) {
|
||||||
|
ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
|
||||||
|
state->acquire_ctx);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
intel_state->active_pipe_changes = ~0;
|
intel_state->active_pipe_changes = ~0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We usually only initialize intel_state->active_crtcs if we
|
||||||
|
* we're doing a modeset; make sure this field is always
|
||||||
|
* initialized during the sanitization process that happens
|
||||||
|
* on the first commit too.
|
||||||
|
*/
|
||||||
|
if (!intel_state->modeset)
|
||||||
|
intel_state->active_crtcs = dev_priv->active_crtcs;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the modeset changes which CRTC's are active, we need to
|
* If the modeset changes which CRTC's are active, we need to
|
||||||
* recompute the DDB allocation for *all* active pipes, even
|
* recompute the DDB allocation for *all* active pipes, even
|
||||||
|
@ -3943,11 +4104,33 @@ skl_compute_ddb(struct drm_atomic_state *state)
|
||||||
ret = skl_allocate_pipe_ddb(cstate, ddb);
|
ret = skl_allocate_pipe_ddb(cstate, ddb);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
ret = drm_atomic_add_affected_planes(state, &intel_crtc->base);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
skl_copy_wm_for_pipe(struct skl_wm_values *dst,
|
||||||
|
struct skl_wm_values *src,
|
||||||
|
enum pipe pipe)
|
||||||
|
{
|
||||||
|
dst->wm_linetime[pipe] = src->wm_linetime[pipe];
|
||||||
|
memcpy(dst->plane[pipe], src->plane[pipe],
|
||||||
|
sizeof(dst->plane[pipe]));
|
||||||
|
memcpy(dst->plane_trans[pipe], src->plane_trans[pipe],
|
||||||
|
sizeof(dst->plane_trans[pipe]));
|
||||||
|
|
||||||
|
dst->ddb.pipe[pipe] = src->ddb.pipe[pipe];
|
||||||
|
memcpy(dst->ddb.y_plane[pipe], src->ddb.y_plane[pipe],
|
||||||
|
sizeof(dst->ddb.y_plane[pipe]));
|
||||||
|
memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
|
||||||
|
sizeof(dst->ddb.plane[pipe]));
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
skl_compute_wm(struct drm_atomic_state *state)
|
skl_compute_wm(struct drm_atomic_state *state)
|
||||||
{
|
{
|
||||||
|
@ -4020,8 +4203,10 @@ static void skl_update_wm(struct drm_crtc *crtc)
|
||||||
struct drm_device *dev = crtc->dev;
|
struct drm_device *dev = crtc->dev;
|
||||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||||
struct skl_wm_values *results = &dev_priv->wm.skl_results;
|
struct skl_wm_values *results = &dev_priv->wm.skl_results;
|
||||||
|
struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
|
||||||
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
|
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
|
||||||
struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
|
struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
|
||||||
|
int pipe;
|
||||||
|
|
||||||
if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
|
if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
|
||||||
return;
|
return;
|
||||||
|
@ -4033,8 +4218,12 @@ static void skl_update_wm(struct drm_crtc *crtc)
|
||||||
skl_write_wm_values(dev_priv, results);
|
skl_write_wm_values(dev_priv, results);
|
||||||
skl_flush_wm_values(dev_priv, results);
|
skl_flush_wm_values(dev_priv, results);
|
||||||
|
|
||||||
/* store the new configuration */
|
/*
|
||||||
dev_priv->wm.skl_hw = *results;
|
* Store the new configuration (but only for the pipes that have
|
||||||
|
* changed; the other values weren't recomputed).
|
||||||
|
*/
|
||||||
|
for_each_pipe_masked(dev_priv, pipe, results->dirty_pipes)
|
||||||
|
skl_copy_wm_for_pipe(hw_vals, results, pipe);
|
||||||
|
|
||||||
mutex_unlock(&dev_priv->wm.wm_mutex);
|
mutex_unlock(&dev_priv->wm.wm_mutex);
|
||||||
}
|
}
|
||||||
|
@ -7658,8 +7847,53 @@ void intel_init_pm(struct drm_device *dev)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
|
||||||
|
{
|
||||||
|
uint32_t flags =
|
||||||
|
I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
|
||||||
|
|
||||||
|
switch (flags) {
|
||||||
|
case GEN6_PCODE_SUCCESS:
|
||||||
|
return 0;
|
||||||
|
case GEN6_PCODE_UNIMPLEMENTED_CMD:
|
||||||
|
case GEN6_PCODE_ILLEGAL_CMD:
|
||||||
|
return -ENXIO;
|
||||||
|
case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
|
||||||
|
return -EOVERFLOW;
|
||||||
|
case GEN6_PCODE_TIMEOUT:
|
||||||
|
return -ETIMEDOUT;
|
||||||
|
default:
|
||||||
|
MISSING_CASE(flags)
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv)
|
||||||
|
{
|
||||||
|
uint32_t flags =
|
||||||
|
I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
|
||||||
|
|
||||||
|
switch (flags) {
|
||||||
|
case GEN6_PCODE_SUCCESS:
|
||||||
|
return 0;
|
||||||
|
case GEN6_PCODE_ILLEGAL_CMD:
|
||||||
|
return -ENXIO;
|
||||||
|
case GEN7_PCODE_TIMEOUT:
|
||||||
|
return -ETIMEDOUT;
|
||||||
|
case GEN7_PCODE_ILLEGAL_DATA:
|
||||||
|
return -EINVAL;
|
||||||
|
case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
|
||||||
|
return -EOVERFLOW;
|
||||||
|
default:
|
||||||
|
MISSING_CASE(flags);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
|
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
|
||||||
{
|
{
|
||||||
|
int status;
|
||||||
|
|
||||||
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
|
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
|
||||||
|
|
||||||
/* GEN6_PCODE_* are outside of the forcewake domain, we can
|
/* GEN6_PCODE_* are outside of the forcewake domain, we can
|
||||||
|
@ -7686,12 +7920,25 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
|
||||||
*val = I915_READ_FW(GEN6_PCODE_DATA);
|
*val = I915_READ_FW(GEN6_PCODE_DATA);
|
||||||
I915_WRITE_FW(GEN6_PCODE_DATA, 0);
|
I915_WRITE_FW(GEN6_PCODE_DATA, 0);
|
||||||
|
|
||||||
|
if (INTEL_GEN(dev_priv) > 6)
|
||||||
|
status = gen7_check_mailbox_status(dev_priv);
|
||||||
|
else
|
||||||
|
status = gen6_check_mailbox_status(dev_priv);
|
||||||
|
|
||||||
|
if (status) {
|
||||||
|
DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed: %d\n",
|
||||||
|
status);
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
|
int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
|
||||||
u32 mbox, u32 val)
|
u32 mbox, u32 val)
|
||||||
{
|
{
|
||||||
|
int status;
|
||||||
|
|
||||||
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
|
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
|
||||||
|
|
||||||
/* GEN6_PCODE_* are outside of the forcewake domain, we can
|
/* GEN6_PCODE_* are outside of the forcewake domain, we can
|
||||||
|
@ -7716,6 +7963,17 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
|
||||||
|
|
||||||
I915_WRITE_FW(GEN6_PCODE_DATA, 0);
|
I915_WRITE_FW(GEN6_PCODE_DATA, 0);
|
||||||
|
|
||||||
|
if (INTEL_GEN(dev_priv) > 6)
|
||||||
|
status = gen7_check_mailbox_status(dev_priv);
|
||||||
|
else
|
||||||
|
status = gen6_check_mailbox_status(dev_priv);
|
||||||
|
|
||||||
|
if (status) {
|
||||||
|
DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed: %d\n",
|
||||||
|
status);
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -73,10 +73,12 @@ static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_DRM_FBDEV_EMULATION
|
||||||
static struct fb_deferred_io qxl_defio = {
|
static struct fb_deferred_io qxl_defio = {
|
||||||
.delay = QXL_DIRTY_DELAY,
|
.delay = QXL_DIRTY_DELAY,
|
||||||
.deferred_io = drm_fb_helper_deferred_io,
|
.deferred_io = drm_fb_helper_deferred_io,
|
||||||
};
|
};
|
||||||
|
#endif
|
||||||
|
|
||||||
static struct fb_ops qxlfb_ops = {
|
static struct fb_ops qxlfb_ops = {
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
|
@ -313,8 +315,10 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
|
||||||
goto out_destroy_fbi;
|
goto out_destroy_fbi;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_DRM_FBDEV_EMULATION
|
||||||
info->fbdefio = &qxl_defio;
|
info->fbdefio = &qxl_defio;
|
||||||
fb_deferred_io_init(info);
|
fb_deferred_io_init(info);
|
||||||
|
#endif
|
||||||
|
|
||||||
qdev->fbdev_info = info;
|
qdev->fbdev_info = info;
|
||||||
qdev->fbdev_qfb = &qfbdev->qfb;
|
qdev->fbdev_qfb = &qfbdev->qfb;
|
||||||
|
|
|
@ -627,7 +627,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
|
||||||
if (radeon_crtc->ss.refdiv) {
|
if (radeon_crtc->ss.refdiv) {
|
||||||
radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
|
radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
|
||||||
radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv;
|
radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv;
|
||||||
if (rdev->family >= CHIP_RV770)
|
if (ASIC_IS_AVIVO(rdev) &&
|
||||||
|
rdev->family != CHIP_RS780 &&
|
||||||
|
rdev->family != CHIP_RS880)
|
||||||
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
|
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -263,8 +263,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
|
||||||
|
|
||||||
rdev = radeon_get_rdev(bo->bdev);
|
rdev = radeon_get_rdev(bo->bdev);
|
||||||
ridx = radeon_copy_ring_index(rdev);
|
ridx = radeon_copy_ring_index(rdev);
|
||||||
old_start = old_mem->start << PAGE_SHIFT;
|
old_start = (u64)old_mem->start << PAGE_SHIFT;
|
||||||
new_start = new_mem->start << PAGE_SHIFT;
|
new_start = (u64)new_mem->start << PAGE_SHIFT;
|
||||||
|
|
||||||
switch (old_mem->mem_type) {
|
switch (old_mem->mem_type) {
|
||||||
case TTM_PL_VRAM:
|
case TTM_PL_VRAM:
|
||||||
|
|
|
@ -840,6 +840,21 @@ static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = {
|
||||||
.destroy = tegra_output_encoder_destroy,
|
.destroy = tegra_output_encoder_destroy,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void tegra_dsi_unprepare(struct tegra_dsi *dsi)
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
|
||||||
|
if (dsi->slave)
|
||||||
|
tegra_dsi_unprepare(dsi->slave);
|
||||||
|
|
||||||
|
err = tegra_mipi_disable(dsi->mipi);
|
||||||
|
if (err < 0)
|
||||||
|
dev_err(dsi->dev, "failed to disable MIPI calibration: %d\n",
|
||||||
|
err);
|
||||||
|
|
||||||
|
pm_runtime_put(dsi->dev);
|
||||||
|
}
|
||||||
|
|
||||||
static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
|
static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
|
||||||
{
|
{
|
||||||
struct tegra_output *output = encoder_to_output(encoder);
|
struct tegra_output *output = encoder_to_output(encoder);
|
||||||
|
@ -876,7 +891,26 @@ static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
|
||||||
|
|
||||||
tegra_dsi_disable(dsi);
|
tegra_dsi_disable(dsi);
|
||||||
|
|
||||||
pm_runtime_put(dsi->dev);
|
tegra_dsi_unprepare(dsi);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void tegra_dsi_prepare(struct tegra_dsi *dsi)
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
|
||||||
|
pm_runtime_get_sync(dsi->dev);
|
||||||
|
|
||||||
|
err = tegra_mipi_enable(dsi->mipi);
|
||||||
|
if (err < 0)
|
||||||
|
dev_err(dsi->dev, "failed to enable MIPI calibration: %d\n",
|
||||||
|
err);
|
||||||
|
|
||||||
|
err = tegra_dsi_pad_calibrate(dsi);
|
||||||
|
if (err < 0)
|
||||||
|
dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
|
||||||
|
|
||||||
|
if (dsi->slave)
|
||||||
|
tegra_dsi_prepare(dsi->slave);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
|
static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
|
||||||
|
@ -887,13 +921,8 @@ static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
|
||||||
struct tegra_dsi *dsi = to_dsi(output);
|
struct tegra_dsi *dsi = to_dsi(output);
|
||||||
struct tegra_dsi_state *state;
|
struct tegra_dsi_state *state;
|
||||||
u32 value;
|
u32 value;
|
||||||
int err;
|
|
||||||
|
|
||||||
pm_runtime_get_sync(dsi->dev);
|
tegra_dsi_prepare(dsi);
|
||||||
|
|
||||||
err = tegra_dsi_pad_calibrate(dsi);
|
|
||||||
if (err < 0)
|
|
||||||
dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
|
|
||||||
|
|
||||||
state = tegra_dsi_get_state(dsi);
|
state = tegra_dsi_get_state(dsi);
|
||||||
|
|
||||||
|
|
|
@ -203,6 +203,7 @@ static int udl_fb_open(struct fb_info *info, int user)
|
||||||
|
|
||||||
ufbdev->fb_count++;
|
ufbdev->fb_count++;
|
||||||
|
|
||||||
|
#ifdef CONFIG_DRM_FBDEV_EMULATION
|
||||||
if (fb_defio && (info->fbdefio == NULL)) {
|
if (fb_defio && (info->fbdefio == NULL)) {
|
||||||
/* enable defio at last moment if not disabled by client */
|
/* enable defio at last moment if not disabled by client */
|
||||||
|
|
||||||
|
@ -218,6 +219,7 @@ static int udl_fb_open(struct fb_info *info, int user)
|
||||||
info->fbdefio = fbdefio;
|
info->fbdefio = fbdefio;
|
||||||
fb_deferred_io_init(info);
|
fb_deferred_io_init(info);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n",
|
pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n",
|
||||||
info->node, user, info, ufbdev->fb_count);
|
info->node, user, info, ufbdev->fb_count);
|
||||||
|
@ -235,12 +237,14 @@ static int udl_fb_release(struct fb_info *info, int user)
|
||||||
|
|
||||||
ufbdev->fb_count--;
|
ufbdev->fb_count--;
|
||||||
|
|
||||||
|
#ifdef CONFIG_DRM_FBDEV_EMULATION
|
||||||
if ((ufbdev->fb_count == 0) && (info->fbdefio)) {
|
if ((ufbdev->fb_count == 0) && (info->fbdefio)) {
|
||||||
fb_deferred_io_cleanup(info);
|
fb_deferred_io_cleanup(info);
|
||||||
kfree(info->fbdefio);
|
kfree(info->fbdefio);
|
||||||
info->fbdefio = NULL;
|
info->fbdefio = NULL;
|
||||||
info->fbops->fb_mmap = udl_fb_mmap;
|
info->fbops->fb_mmap = udl_fb_mmap;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
pr_warn("released /dev/fb%d user=%d count=%d\n",
|
pr_warn("released /dev/fb%d user=%d count=%d\n",
|
||||||
info->node, user, ufbdev->fb_count);
|
info->node, user, ufbdev->fb_count);
|
||||||
|
|
|
@ -242,20 +242,6 @@ struct tegra_mipi_device *tegra_mipi_request(struct device *device)
|
||||||
dev->pads = args.args[0];
|
dev->pads = args.args[0];
|
||||||
dev->device = device;
|
dev->device = device;
|
||||||
|
|
||||||
mutex_lock(&dev->mipi->lock);
|
|
||||||
|
|
||||||
if (dev->mipi->usage_count++ == 0) {
|
|
||||||
err = tegra_mipi_power_up(dev->mipi);
|
|
||||||
if (err < 0) {
|
|
||||||
dev_err(dev->mipi->dev,
|
|
||||||
"failed to power up MIPI bricks: %d\n",
|
|
||||||
err);
|
|
||||||
return ERR_PTR(err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mutex_unlock(&dev->mipi->lock);
|
|
||||||
|
|
||||||
return dev;
|
return dev;
|
||||||
|
|
||||||
put:
|
put:
|
||||||
|
@ -270,30 +256,43 @@ EXPORT_SYMBOL(tegra_mipi_request);
|
||||||
|
|
||||||
void tegra_mipi_free(struct tegra_mipi_device *device)
|
void tegra_mipi_free(struct tegra_mipi_device *device)
|
||||||
{
|
{
|
||||||
int err;
|
|
||||||
|
|
||||||
mutex_lock(&device->mipi->lock);
|
|
||||||
|
|
||||||
if (--device->mipi->usage_count == 0) {
|
|
||||||
err = tegra_mipi_power_down(device->mipi);
|
|
||||||
if (err < 0) {
|
|
||||||
/*
|
|
||||||
* Not much that can be done here, so an error message
|
|
||||||
* will have to do.
|
|
||||||
*/
|
|
||||||
dev_err(device->mipi->dev,
|
|
||||||
"failed to power down MIPI bricks: %d\n",
|
|
||||||
err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mutex_unlock(&device->mipi->lock);
|
|
||||||
|
|
||||||
platform_device_put(device->pdev);
|
platform_device_put(device->pdev);
|
||||||
kfree(device);
|
kfree(device);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(tegra_mipi_free);
|
EXPORT_SYMBOL(tegra_mipi_free);
|
||||||
|
|
||||||
|
int tegra_mipi_enable(struct tegra_mipi_device *dev)
|
||||||
|
{
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
|
mutex_lock(&dev->mipi->lock);
|
||||||
|
|
||||||
|
if (dev->mipi->usage_count++ == 0)
|
||||||
|
err = tegra_mipi_power_up(dev->mipi);
|
||||||
|
|
||||||
|
mutex_unlock(&dev->mipi->lock);
|
||||||
|
|
||||||
|
return err;
|
||||||
|
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(tegra_mipi_enable);
|
||||||
|
|
||||||
|
int tegra_mipi_disable(struct tegra_mipi_device *dev)
|
||||||
|
{
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
|
mutex_lock(&dev->mipi->lock);
|
||||||
|
|
||||||
|
if (--dev->mipi->usage_count == 0)
|
||||||
|
err = tegra_mipi_power_down(dev->mipi);
|
||||||
|
|
||||||
|
mutex_unlock(&dev->mipi->lock);
|
||||||
|
|
||||||
|
return err;
|
||||||
|
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(tegra_mipi_disable);
|
||||||
|
|
||||||
static int tegra_mipi_wait(struct tegra_mipi *mipi)
|
static int tegra_mipi_wait(struct tegra_mipi *mipi)
|
||||||
{
|
{
|
||||||
unsigned long timeout = jiffies + msecs_to_jiffies(250);
|
unsigned long timeout = jiffies + msecs_to_jiffies(250);
|
||||||
|
|
|
@ -304,6 +304,8 @@ struct tegra_mipi_device;
|
||||||
|
|
||||||
struct tegra_mipi_device *tegra_mipi_request(struct device *device);
|
struct tegra_mipi_device *tegra_mipi_request(struct device *device);
|
||||||
void tegra_mipi_free(struct tegra_mipi_device *device);
|
void tegra_mipi_free(struct tegra_mipi_device *device);
|
||||||
|
int tegra_mipi_enable(struct tegra_mipi_device *device);
|
||||||
|
int tegra_mipi_disable(struct tegra_mipi_device *device);
|
||||||
int tegra_mipi_calibrate(struct tegra_mipi_device *device);
|
int tegra_mipi_calibrate(struct tegra_mipi_device *device);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
Loading…
Reference in New Issue