drm: msm, i915, amdgpu, imx and sync_file fixes
-----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJZ4VPpAAoJEAx081l5xIa+ndIP/ReK6ch2ZDJmlI9DrFszF9WM FvetkaIZ8mL8f5Q9wYfSkczZHY4mXlrfYQLzVdQLDoe7vwKcs3PWvvJ13tlLWHZH PDdBcOPHkreVJ2wdlfhHm3gnoeN4RUUrLGuzHE/uwQO5vvr0LemnCRwRq8XLGDs5 SZwAOM+yDfsbxrXZUF377BTsOpMuaicTAuqxZObRiPpA9JWDMRC+3bHtHXIrmo6+ Jtx/Ih1S9OFeQnDNSFQJbM0GD/VN1It6VvsGfL+/xYvt2ypzbQl33TozKiJX80uk VIyTek8aA6jJ7YFm9g++FT83kIvjqg/l5RBe8qL6lP9nnjhYqYP8jYHdSuYRGvl1 FFk6t0SsLxhzZ6QrUZ2GUEmZM72MB1MWnjiSWkH2jh9XDsYH5v3jlWPCG9ATID/m iv3Lxxf4c+FSnEXGlZOewIPXcllqDx78j6oEuFmiKeTBP2YdHkfGPHRNVCszcEp9 7lrD4JdRtB/xUTwGpzmZn1dA042VYu1kHdbLFPl+LOE9aSyRrMApQftiYVrwBIkB OHWukwBPJQmiijQGid+AcChz39MPb6+ANiFPM2DndxJlkryi5XtYIUOowZYBHzqf tAMG2NqN2EDCKBdgoCbK/c8yk/UPKYUvLC1dC6lwbs6t2wsbftNcP1RWRrACjBsU ORuoth2IVcBCU5Md91V4 =yrkB -----END PGP SIGNATURE----- Merge tag 'drm-fixes-for-v4.14-rc5' of git://people.freedesktop.org/~airlied/linux Pull drm fixes from Dave Airlie: "Couple of the arm people seem to wake up so this has imx and msm fixes, along with a bunch of i915 stable bounds fixes and an amdgpu regression fix. All seems pretty okay for now" * tag 'drm-fixes-for-v4.14-rc5' of git://people.freedesktop.org/~airlied/linux: drm/msm: fix _NO_IMPLICIT fencing case drm/msm: fix error path cleanup drm/msm/mdp5: Remove extra pm_runtime_put call in mdp5_crtc_cursor_set() drm/msm/dsi: Use correct pm_runtime_put variant during host_init drm/msm: fix return value check in _msm_gem_kernel_new() drm/msm: use proper memory barriers for updating tail/head drm/msm/mdp5: add missing max size for 8x74 v1 drm/amdgpu: fix placement flags in amdgpu_ttm_bind drm/i915/bios: parse DDI ports also for CHV for HDMI DDC pin and DP AUX channel gpu: ipu-v3: pre: implement workaround for ERR009624 gpu: ipu-v3: prg: wait for double buffers to be filled on channel startup gpu: ipu-v3: Allow channel burst locking on i.MX6 only drm/i915: Read timings from the correct transcoder in intel_crtc_mode_get() drm/i915: Order two completing nop_submit_request drm/i915: Silence compiler warning for hsw_power_well_enable() drm/i915: Use crtc_state_is_legacy_gamma in intel_color_check drm/i915/edp: Increase the T12 delay quirk to 1300ms drm/i915/edp: Get the Panel Power Off timestamp after panel is off sync_file: Return consistent status in SYNC_IOC_FILE_INFO drm/atomic: Unref duplicated drm_atomic_state in drm_atomic_helper_resume()
This commit is contained in:
commit
9aa0d2dde6
|
@ -383,7 +383,7 @@ err_put_fd:
|
|||
return err;
|
||||
}
|
||||
|
||||
static void sync_fill_fence_info(struct dma_fence *fence,
|
||||
static int sync_fill_fence_info(struct dma_fence *fence,
|
||||
struct sync_fence_info *info)
|
||||
{
|
||||
strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
|
||||
|
@ -399,6 +399,8 @@ static void sync_fill_fence_info(struct dma_fence *fence,
|
|||
test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ?
|
||||
ktime_to_ns(fence->timestamp) :
|
||||
ktime_set(0, 0);
|
||||
|
||||
return info->status;
|
||||
}
|
||||
|
||||
static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
|
||||
|
@ -424,8 +426,12 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
|
|||
* sync_fence_info and return the actual number of fences on
|
||||
* info->num_fences.
|
||||
*/
|
||||
if (!info.num_fences)
|
||||
if (!info.num_fences) {
|
||||
info.status = dma_fence_is_signaled(sync_file->fence);
|
||||
goto no_fences;
|
||||
} else {
|
||||
info.status = 1;
|
||||
}
|
||||
|
||||
if (info.num_fences < num_fences)
|
||||
return -EINVAL;
|
||||
|
@ -435,8 +441,10 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
|
|||
if (!fence_info)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < num_fences; i++)
|
||||
sync_fill_fence_info(fences[i], &fence_info[i]);
|
||||
for (i = 0; i < num_fences; i++) {
|
||||
int status = sync_fill_fence_info(fences[i], &fence_info[i]);
|
||||
info.status = info.status <= 0 ? info.status : status;
|
||||
}
|
||||
|
||||
if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info,
|
||||
size)) {
|
||||
|
@ -446,7 +454,6 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
|
|||
|
||||
no_fences:
|
||||
sync_file_get_name(sync_file, info.name, sizeof(info.name));
|
||||
info.status = dma_fence_is_signaled(sync_file->fence);
|
||||
info.num_fences = num_fences;
|
||||
|
||||
if (copy_to_user((void __user *)arg, &info, sizeof(info)))
|
||||
|
|
|
@ -834,7 +834,7 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
|
|||
placement.busy_placement = &placements;
|
||||
placements.fpfn = 0;
|
||||
placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT;
|
||||
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
|
||||
placements.flags = bo->mem.placement | TTM_PL_FLAG_TT;
|
||||
|
||||
r = ttm_bo_mem_space(bo, &placement, &tmp, true, false);
|
||||
if (unlikely(r))
|
||||
|
|
|
@ -2960,6 +2960,7 @@ out:
|
|||
drm_modeset_backoff(&ctx);
|
||||
}
|
||||
|
||||
drm_atomic_state_put(state);
|
||||
drm_modeset_drop_locks(&ctx);
|
||||
drm_modeset_acquire_fini(&ctx);
|
||||
|
||||
|
|
|
@ -3013,10 +3013,15 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
|
|||
|
||||
static void nop_submit_request(struct drm_i915_gem_request *request)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error));
|
||||
dma_fence_set_error(&request->fence, -EIO);
|
||||
i915_gem_request_submit(request);
|
||||
|
||||
spin_lock_irqsave(&request->engine->timeline->lock, flags);
|
||||
__i915_gem_request_submit(request);
|
||||
intel_engine_init_global_seqno(request->engine, request->global_seqno);
|
||||
spin_unlock_irqrestore(&request->engine->timeline->lock, flags);
|
||||
}
|
||||
|
||||
static void engine_set_wedged(struct intel_engine_cs *engine)
|
||||
|
|
|
@ -1240,7 +1240,7 @@ static void parse_ddi_ports(struct drm_i915_private *dev_priv,
|
|||
{
|
||||
enum port port;
|
||||
|
||||
if (!HAS_DDI(dev_priv))
|
||||
if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv))
|
||||
return;
|
||||
|
||||
if (!dev_priv->vbt.child_dev_num)
|
||||
|
|
|
@ -74,7 +74,7 @@
|
|||
#define I9XX_CSC_COEFF_1_0 \
|
||||
((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8))
|
||||
|
||||
static bool crtc_state_is_legacy(struct drm_crtc_state *state)
|
||||
static bool crtc_state_is_legacy_gamma(struct drm_crtc_state *state)
|
||||
{
|
||||
return !state->degamma_lut &&
|
||||
!state->ctm &&
|
||||
|
@ -288,7 +288,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
|
|||
}
|
||||
|
||||
mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0);
|
||||
if (!crtc_state_is_legacy(state)) {
|
||||
if (!crtc_state_is_legacy_gamma(state)) {
|
||||
mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
|
||||
(state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0);
|
||||
}
|
||||
|
@ -469,7 +469,7 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
|
|||
struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
|
||||
enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
|
||||
|
||||
if (crtc_state_is_legacy(state)) {
|
||||
if (crtc_state_is_legacy_gamma(state)) {
|
||||
haswell_load_luts(state);
|
||||
return;
|
||||
}
|
||||
|
@ -529,7 +529,7 @@ static void glk_load_luts(struct drm_crtc_state *state)
|
|||
|
||||
glk_load_degamma_lut(state);
|
||||
|
||||
if (crtc_state_is_legacy(state)) {
|
||||
if (crtc_state_is_legacy_gamma(state)) {
|
||||
haswell_load_luts(state);
|
||||
return;
|
||||
}
|
||||
|
@ -551,7 +551,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
|
|||
uint32_t i, lut_size;
|
||||
uint32_t word0, word1;
|
||||
|
||||
if (crtc_state_is_legacy(state)) {
|
||||
if (crtc_state_is_legacy_gamma(state)) {
|
||||
/* Turn off degamma/gamma on CGM block. */
|
||||
I915_WRITE(CGM_PIPE_MODE(pipe),
|
||||
(state->ctm ? CGM_PIPE_MODE_CSC : 0));
|
||||
|
@ -632,12 +632,10 @@ int intel_color_check(struct drm_crtc *crtc,
|
|||
return 0;
|
||||
|
||||
/*
|
||||
* We also allow no degamma lut and a gamma lut at the legacy
|
||||
* We also allow no degamma lut/ctm and a gamma lut at the legacy
|
||||
* size (256 entries).
|
||||
*/
|
||||
if (!crtc_state->degamma_lut &&
|
||||
crtc_state->gamma_lut &&
|
||||
crtc_state->gamma_lut->length == LEGACY_LUT_LENGTH)
|
||||
if (crtc_state_is_legacy_gamma(crtc_state))
|
||||
return 0;
|
||||
|
||||
return -EINVAL;
|
||||
|
|
|
@ -10245,13 +10245,10 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
|
|||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
|
||||
enum transcoder cpu_transcoder;
|
||||
struct drm_display_mode *mode;
|
||||
struct intel_crtc_state *pipe_config;
|
||||
int htot = I915_READ(HTOTAL(cpu_transcoder));
|
||||
int hsync = I915_READ(HSYNC(cpu_transcoder));
|
||||
int vtot = I915_READ(VTOTAL(cpu_transcoder));
|
||||
int vsync = I915_READ(VSYNC(cpu_transcoder));
|
||||
u32 htot, hsync, vtot, vsync;
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
|
||||
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
|
||||
|
@ -10279,6 +10276,13 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
|
|||
i9xx_crtc_clock_get(intel_crtc, pipe_config);
|
||||
|
||||
mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
|
||||
|
||||
cpu_transcoder = pipe_config->cpu_transcoder;
|
||||
htot = I915_READ(HTOTAL(cpu_transcoder));
|
||||
hsync = I915_READ(HSYNC(cpu_transcoder));
|
||||
vtot = I915_READ(VTOTAL(cpu_transcoder));
|
||||
vsync = I915_READ(VSYNC(cpu_transcoder));
|
||||
|
||||
mode->hdisplay = (htot & 0xffff) + 1;
|
||||
mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
|
||||
mode->hsync_start = (hsync & 0xffff) + 1;
|
||||
|
|
|
@ -2307,8 +2307,8 @@ static void edp_panel_off(struct intel_dp *intel_dp)
|
|||
I915_WRITE(pp_ctrl_reg, pp);
|
||||
POSTING_READ(pp_ctrl_reg);
|
||||
|
||||
intel_dp->panel_power_off_time = ktime_get_boottime();
|
||||
wait_panel_off(intel_dp);
|
||||
intel_dp->panel_power_off_time = ktime_get_boottime();
|
||||
|
||||
/* We got a reference when we enabled the VDD. */
|
||||
intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
|
||||
|
@ -5273,7 +5273,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
|
|||
* seems sufficient to avoid this problem.
|
||||
*/
|
||||
if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
|
||||
vbt.t11_t12 = max_t(u16, vbt.t11_t12, 900 * 10);
|
||||
vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
|
||||
DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
|
||||
vbt.t11_t12);
|
||||
}
|
||||
|
|
|
@ -368,7 +368,7 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
|
|||
{
|
||||
enum i915_power_well_id id = power_well->id;
|
||||
bool wait_fuses = power_well->hsw.has_fuses;
|
||||
enum skl_power_gate pg;
|
||||
enum skl_power_gate uninitialized_var(pg);
|
||||
u32 val;
|
||||
|
||||
if (wait_fuses) {
|
||||
|
|
|
@ -248,7 +248,7 @@ disable_clks:
|
|||
clk_disable_unprepare(ahb_clk);
|
||||
disable_gdsc:
|
||||
regulator_disable(gdsc_reg);
|
||||
pm_runtime_put_autosuspend(dev);
|
||||
pm_runtime_put_sync(dev);
|
||||
put_clk:
|
||||
clk_put(ahb_clk);
|
||||
put_gdsc:
|
||||
|
|
|
@ -83,6 +83,8 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
|
|||
.caps = MDP_LM_CAP_WB },
|
||||
},
|
||||
.nb_stages = 5,
|
||||
.max_width = 2048,
|
||||
.max_height = 0xFFFF,
|
||||
},
|
||||
.dspp = {
|
||||
.count = 3,
|
||||
|
|
|
@ -804,8 +804,6 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
|
|||
|
||||
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
|
||||
|
||||
pm_runtime_put_autosuspend(&pdev->dev);
|
||||
|
||||
set_cursor:
|
||||
ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
|
||||
if (ret) {
|
||||
|
|
|
@ -610,17 +610,6 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
|
|||
struct dma_fence *fence;
|
||||
int i, ret;
|
||||
|
||||
if (!exclusive) {
|
||||
/* NOTE: _reserve_shared() must happen before _add_shared_fence(),
|
||||
* which makes this a slightly strange place to call it. OTOH this
|
||||
* is a convenient can-fail point to hook it in. (And similar to
|
||||
* how etnaviv and nouveau handle this.)
|
||||
*/
|
||||
ret = reservation_object_reserve_shared(msm_obj->resv);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
fobj = reservation_object_get_list(msm_obj->resv);
|
||||
if (!fobj || (fobj->shared_count == 0)) {
|
||||
fence = reservation_object_get_excl(msm_obj->resv);
|
||||
|
@ -1045,10 +1034,10 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
|
|||
}
|
||||
|
||||
vaddr = msm_gem_get_vaddr(obj);
|
||||
if (!vaddr) {
|
||||
if (IS_ERR(vaddr)) {
|
||||
msm_gem_put_iova(obj, aspace);
|
||||
drm_gem_object_unreference(obj);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return ERR_CAST(vaddr);
|
||||
}
|
||||
|
||||
if (bo)
|
||||
|
|
|
@ -221,7 +221,7 @@ fail:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int submit_fence_sync(struct msm_gem_submit *submit)
|
||||
static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
|
||||
{
|
||||
int i, ret = 0;
|
||||
|
||||
|
@ -229,6 +229,20 @@ static int submit_fence_sync(struct msm_gem_submit *submit)
|
|||
struct msm_gem_object *msm_obj = submit->bos[i].obj;
|
||||
bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
|
||||
|
||||
if (!write) {
|
||||
/* NOTE: _reserve_shared() must happen before
|
||||
* _add_shared_fence(), which makes this a slightly
|
||||
* strange place to call it. OTOH this is a
|
||||
* convenient can-fail point to hook it in.
|
||||
*/
|
||||
ret = reservation_object_reserve_shared(msm_obj->resv);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (no_implicit)
|
||||
continue;
|
||||
|
||||
ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write);
|
||||
if (ret)
|
||||
break;
|
||||
|
@ -451,11 +465,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
|||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) {
|
||||
ret = submit_fence_sync(submit);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT));
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = submit_pin_objects(submit);
|
||||
if (ret)
|
||||
|
|
|
@ -718,7 +718,8 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
|
|||
msm_gem_put_iova(gpu->rb->bo, gpu->aspace);
|
||||
msm_ringbuffer_destroy(gpu->rb);
|
||||
}
|
||||
if (gpu->aspace) {
|
||||
|
||||
if (!IS_ERR_OR_NULL(gpu->aspace)) {
|
||||
gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
|
||||
NULL, 0);
|
||||
msm_gem_address_space_put(gpu->aspace);
|
||||
|
|
|
@ -111,10 +111,14 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz)
|
|||
|
||||
wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0);
|
||||
|
||||
/* Note that smp_load_acquire() is not strictly required
|
||||
* as CIRC_SPACE_TO_END() does not access the tail more
|
||||
* than once.
|
||||
*/
|
||||
n = min(sz, circ_space_to_end(&rd->fifo));
|
||||
memcpy(fptr, ptr, n);
|
||||
|
||||
fifo->head = (fifo->head + n) & (BUF_SZ - 1);
|
||||
smp_store_release(&fifo->head, (fifo->head + n) & (BUF_SZ - 1));
|
||||
sz -= n;
|
||||
ptr += n;
|
||||
|
||||
|
@ -145,13 +149,17 @@ static ssize_t rd_read(struct file *file, char __user *buf,
|
|||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* Note that smp_load_acquire() is not strictly required
|
||||
* as CIRC_CNT_TO_END() does not access the head more than
|
||||
* once.
|
||||
*/
|
||||
n = min_t(int, sz, circ_count_to_end(&rd->fifo));
|
||||
if (copy_to_user(buf, fptr, n)) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
fifo->tail = (fifo->tail + n) & (BUF_SZ - 1);
|
||||
smp_store_release(&fifo->tail, (fifo->tail + n) & (BUF_SZ - 1));
|
||||
*ppos += n;
|
||||
|
||||
wake_up_all(&rd->fifo_event);
|
||||
|
|
|
@ -405,6 +405,14 @@ int ipu_idmac_lock_enable(struct ipuv3_channel *channel, int num_bursts)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* IPUv3EX / i.MX51 has a different register layout, and on IPUv3M /
|
||||
* i.MX53 channel arbitration locking doesn't seem to work properly.
|
||||
* Allow enabling the lock feature on IPUv3H / i.MX6 only.
|
||||
*/
|
||||
if (bursts && ipu->ipu_type != IPUV3H)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) {
|
||||
if (channel->num == idmac_lock_en_info[i].chnum)
|
||||
break;
|
||||
|
|
|
@ -73,6 +73,14 @@
|
|||
#define IPU_PRE_STORE_ENG_CTRL_WR_NUM_BYTES(v) ((v & 0x7) << 1)
|
||||
#define IPU_PRE_STORE_ENG_CTRL_OUTPUT_ACTIVE_BPP(v) ((v & 0x3) << 4)
|
||||
|
||||
#define IPU_PRE_STORE_ENG_STATUS 0x120
|
||||
#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_MASK 0xffff
|
||||
#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_SHIFT 0
|
||||
#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK 0x3fff
|
||||
#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT 16
|
||||
#define IPU_PRE_STORE_ENG_STATUS_STORE_FIFO_FULL (1 << 30)
|
||||
#define IPU_PRE_STORE_ENG_STATUS_STORE_FIELD (1 << 31)
|
||||
|
||||
#define IPU_PRE_STORE_ENG_SIZE 0x130
|
||||
#define IPU_PRE_STORE_ENG_SIZE_INPUT_WIDTH(v) ((v & 0xffff) << 0)
|
||||
#define IPU_PRE_STORE_ENG_SIZE_INPUT_HEIGHT(v) ((v & 0xffff) << 16)
|
||||
|
@ -93,6 +101,7 @@ struct ipu_pre {
|
|||
dma_addr_t buffer_paddr;
|
||||
void *buffer_virt;
|
||||
bool in_use;
|
||||
unsigned int safe_window_end;
|
||||
};
|
||||
|
||||
static DEFINE_MUTEX(ipu_pre_list_mutex);
|
||||
|
@ -160,6 +169,9 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
|
|||
u32 active_bpp = info->cpp[0] >> 1;
|
||||
u32 val;
|
||||
|
||||
/* calculate safe window for ctrl register updates */
|
||||
pre->safe_window_end = height - 2;
|
||||
|
||||
writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF);
|
||||
writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
|
||||
|
||||
|
@ -199,7 +211,24 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
|
|||
|
||||
void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr)
|
||||
{
|
||||
unsigned long timeout = jiffies + msecs_to_jiffies(5);
|
||||
unsigned short current_yblock;
|
||||
u32 val;
|
||||
|
||||
writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
|
||||
|
||||
do {
|
||||
if (time_after(jiffies, timeout)) {
|
||||
dev_warn(pre->dev, "timeout waiting for PRE safe window\n");
|
||||
return;
|
||||
}
|
||||
|
||||
val = readl(pre->regs + IPU_PRE_STORE_ENG_STATUS);
|
||||
current_yblock =
|
||||
(val >> IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT) &
|
||||
IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK;
|
||||
} while (current_yblock == 0 || current_yblock >= pre->safe_window_end);
|
||||
|
||||
writel(IPU_PRE_CTRL_SDW_UPDATE, pre->regs + IPU_PRE_CTRL_SET);
|
||||
}
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <drm/drm_fourcc.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/mfd/syscon.h>
|
||||
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -329,6 +330,12 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan,
|
|||
val = IPU_PRG_REG_UPDATE_REG_UPDATE;
|
||||
writel(val, prg->regs + IPU_PRG_REG_UPDATE);
|
||||
|
||||
/* wait for both double buffers to be filled */
|
||||
readl_poll_timeout(prg->regs + IPU_PRG_STATUS, val,
|
||||
(val & IPU_PRG_STATUS_BUFFER0_READY(prg_chan)) &&
|
||||
(val & IPU_PRG_STATUS_BUFFER1_READY(prg_chan)),
|
||||
5, 1000);
|
||||
|
||||
clk_disable_unprepare(prg->clk_ipg);
|
||||
|
||||
chan->enabled = true;
|
||||
|
|
Loading…
Reference in New Issue