msm next, i915, vc4, v3d fixes
-----BEGIN PGP SIGNATURE----- iQIbBAABAgAGBQJbHhIbAAoJEAx081l5xIa+x+0P9379QM1Y52rKfMYoBQqjVFw2 VwVfq5BjjBW8B5mz5tNlI9UCH1feg8kK4xntHegVKBJXDGepYj+b46g3wlC4T3jj uMt7lwRk3iVPCA2SZvAmsEUEm6peQzCFR0cIspJndeFoMpuH+9ymjN4d25Qj2QnW 5OUwTOx1nnIlL+fU4Hbk3RO1wByOUNYdG0aovA6jrHotRiZAxf7bqVXTReWF7Rx3 XCh5gTMUYeQLh2UUpAvVV3AimGNMBZ8hivSsGAwBW/c3+yj4OKE7dIx2DCCGbRcK i6uzbeidr/i5dHkMAV3Midfi60iXaJamFkR5QmsNa8MuMcesrhkNxxdKpH48s9PP ok+Xm3mE65dOmauffA2RS2MxnAH3T+leclEdgV8GWzow1XMsDahoULmwNA77uIIC iK1cXocXFZfSFVX0m1SG+vyNJS4FCjOt2jqKDRkHOOAcmoSAWOk0esB67UBgFtIT foWMDINWMGi3I/hvGhlXpWZZ8KR2VCisattvudcaWYmU8IW9pjUHCbbsAIcBgJbS T8tvnoBihFDOG3Gk3cr1OjsLT8LYBDva0ARDGCXmWuwNl9RmO/gw2YAb++hWHz+p oqUGGkSbLTkplJDXwfCrZIISU/mpUx6Xe1ZQHOPriqhN2KWsWxcKW8LyIFsFKDiA 5fKSeDecjYvs5/L/lL4= =GHrL -----END PGP SIGNATURE----- Merge tag 'drm-next-2018-06-11' of git://anongit.freedesktop.org/drm/drm Pull drm msm updates and misc fixes from Dave Airlie: "I looked at Rob's msm tree, he kept it small due to being late, and it was in -next for a while before he was ill, so I think it should be fine. Otherwise this contains a set of i915 fixes and a v3d build fix, and vc4 leak fix" * tag 'drm-next-2018-06-11' of git://anongit.freedesktop.org/drm/drm: (31 commits) drm/i915/icl: Don't update enabled dbuf slices struct until updated in hw drm/i915/icl: fix icl_unmap/map_plls_to_ports drm/i915: Remove bogus NV12 PLANE_COLOR_CTL setup drm/msm: Fix NULL deref on bind/probe deferral drm/msm: Switch to atomic_helper_commit() drm/msm: Remove msm_commit/worker, use atomic helper commit drm/msm: Issue queued events when disabling crtc drm/msm: Move implicit sync handling to prepare_fb drm/msm: Refactor complete_commit() to look more the helpers drm/msm: Don't subclass drm_atomic_state anymore drm/msm/mdp5: Use the new private_obj state drm/msm/mdp5: Add global state as a private atomic object drm/msm: use correct aspace pointer in msm_gem_put_iova() drm/msm: remove unbalanced mutex unlock drm/msm: don't deref error pointer in the msm_fbdev_create error path drm/msm/dsi: use correct enum in dsi_get_cmd_fmt drm/msm: Fix possible null dereference on failure of get_pages() drm/msm: Add modifier to mdp_get_format arguments drm/msm: Mark the crtc->state->event consumed drm/msm/dsi: implement auto PHY timing calculator for 10nm PHY ...
This commit is contained in:
commit
8d08c05542
|
@ -2909,6 +2909,7 @@ static int init_cmd_table(struct intel_gvt *gvt)
|
|||
if (info) {
|
||||
gvt_err("%s %s duplicated\n", e->info->name,
|
||||
info->name);
|
||||
kfree(e);
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@
|
|||
#define AUX_NATIVE_REPLY_NAK (0x1 << 4)
|
||||
#define AUX_NATIVE_REPLY_DEFER (0x2 << 4)
|
||||
|
||||
#define AUX_BURST_SIZE 16
|
||||
#define AUX_BURST_SIZE 20
|
||||
|
||||
/* DPCD addresses */
|
||||
#define DPCD_REV 0x000
|
||||
|
|
|
@ -903,11 +903,14 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
|
|||
}
|
||||
|
||||
/*
|
||||
* Write request format: (command + address) occupies
|
||||
* 3 bytes, followed by (len + 1) bytes of data.
|
||||
* Write request format: Headr (command + address + size) occupies
|
||||
* 4 bytes, followed by (len + 1) bytes of data. See details at
|
||||
* intel_dp_aux_transfer().
|
||||
*/
|
||||
if (WARN_ON((len + 4) > AUX_BURST_SIZE))
|
||||
if ((len + 1 + 4) > AUX_BURST_SIZE) {
|
||||
gvt_vgpu_err("dp_aux_header: len %d is too large\n", len);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* unpack data from vreg to buf */
|
||||
for (t = 0; t < 4; t++) {
|
||||
|
@ -971,8 +974,10 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
|
|||
/*
|
||||
* Read reply format: ACK (1 byte) plus (len + 1) bytes of data.
|
||||
*/
|
||||
if (WARN_ON((len + 2) > AUX_BURST_SIZE))
|
||||
if ((len + 2) > AUX_BURST_SIZE) {
|
||||
gvt_vgpu_err("dp_aux_header: len %d is too large\n", len);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* read from virtual DPCD to vreg */
|
||||
/* first 4 bytes: [ACK][addr][addr+1][addr+2] */
|
||||
|
|
|
@ -123,6 +123,12 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!pfn_valid(pfn)) {
|
||||
gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn);
|
||||
vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Setup DMA mapping. */
|
||||
page = pfn_to_page(pfn);
|
||||
*dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE,
|
||||
|
@ -583,6 +589,17 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct eventfd_ctx *trigger;
|
||||
|
||||
trigger = vgpu->vdev.msi_trigger;
|
||||
if (trigger) {
|
||||
eventfd_ctx_put(trigger);
|
||||
vgpu->vdev.msi_trigger = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void __intel_vgpu_release(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct kvmgt_guest_info *info;
|
||||
|
@ -607,6 +624,8 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
|
|||
info = (struct kvmgt_guest_info *)vgpu->handle;
|
||||
kvmgt_guest_exit(info);
|
||||
|
||||
intel_vgpu_release_msi_eventfd_ctx(vgpu);
|
||||
|
||||
vgpu->vdev.kvm = NULL;
|
||||
vgpu->handle = 0;
|
||||
}
|
||||
|
@ -987,7 +1006,8 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
|
|||
return PTR_ERR(trigger);
|
||||
}
|
||||
vgpu->vdev.msi_trigger = trigger;
|
||||
}
|
||||
} else if ((flags & VFIO_IRQ_SET_DATA_NONE) && !count)
|
||||
intel_vgpu_release_msi_eventfd_ctx(vgpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1592,6 +1612,18 @@ static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
|
|||
info = (struct kvmgt_guest_info *)handle;
|
||||
vgpu = info->vgpu;
|
||||
|
||||
/*
|
||||
* When guest is poweroff, msi_trigger is set to NULL, but vgpu's
|
||||
* config and mmio register isn't restored to default during guest
|
||||
* poweroff. If this vgpu is still used in next vm, this vgpu's pipe
|
||||
* may be enabled, then once this vgpu is active, it will get inject
|
||||
* vblank interrupt request. But msi_trigger is null until msi is
|
||||
* enabled by guest. so if msi_trigger is null, success is still
|
||||
* returned and don't inject interrupt into guest.
|
||||
*/
|
||||
if (vgpu->vdev.msi_trigger == NULL)
|
||||
return 0;
|
||||
|
||||
if (eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1)
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -2972,23 +2972,22 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
|
|||
struct i915_request *request, *active = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
/* We are called by the error capture and reset at a random
|
||||
* point in time. In particular, note that neither is crucially
|
||||
* ordered with an interrupt. After a hang, the GPU is dead and we
|
||||
* assume that no more writes can happen (we waited long enough for
|
||||
* all writes that were in transaction to be flushed) - adding an
|
||||
/*
|
||||
* We are called by the error capture, reset and to dump engine
|
||||
* state at random points in time. In particular, note that neither is
|
||||
* crucially ordered with an interrupt. After a hang, the GPU is dead
|
||||
* and we assume that no more writes can happen (we waited long enough
|
||||
* for all writes that were in transaction to be flushed) - adding an
|
||||
* extra delay for a recent interrupt is pointless. Hence, we do
|
||||
* not need an engine->irq_seqno_barrier() before the seqno reads.
|
||||
* At all other times, we must assume the GPU is still running, but
|
||||
* we only care about the snapshot of this moment.
|
||||
*/
|
||||
spin_lock_irqsave(&engine->timeline.lock, flags);
|
||||
list_for_each_entry(request, &engine->timeline.requests, link) {
|
||||
if (__i915_request_completed(request, request->global_seqno))
|
||||
continue;
|
||||
|
||||
GEM_BUG_ON(request->engine != engine);
|
||||
GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
|
||||
&request->fence.flags));
|
||||
|
||||
active = request;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -2453,12 +2453,13 @@ void icl_map_plls_to_ports(struct drm_crtc *crtc,
|
|||
for_each_new_connector_in_state(old_state, conn, conn_state, i) {
|
||||
struct intel_encoder *encoder =
|
||||
to_intel_encoder(conn_state->best_encoder);
|
||||
enum port port = encoder->port;
|
||||
enum port port;
|
||||
uint32_t val;
|
||||
|
||||
if (conn_state->crtc != crtc)
|
||||
continue;
|
||||
|
||||
port = encoder->port;
|
||||
mutex_lock(&dev_priv->dpll_lock);
|
||||
|
||||
val = I915_READ(DPCLKA_CFGCR0_ICL);
|
||||
|
@ -2490,11 +2491,12 @@ void icl_unmap_plls_to_ports(struct drm_crtc *crtc,
|
|||
for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
|
||||
struct intel_encoder *encoder =
|
||||
to_intel_encoder(old_conn_state->best_encoder);
|
||||
enum port port = encoder->port;
|
||||
enum port port;
|
||||
|
||||
if (old_conn_state->crtc != crtc)
|
||||
continue;
|
||||
|
||||
port = encoder->port;
|
||||
mutex_lock(&dev_priv->dpll_lock);
|
||||
I915_WRITE(DPCLKA_CFGCR0_ICL,
|
||||
I915_READ(DPCLKA_CFGCR0_ICL) |
|
||||
|
|
|
@ -3690,11 +3690,6 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
|
|||
plane_color_ctl |= glk_plane_color_ctl_alpha(fb->format->format);
|
||||
|
||||
if (intel_format_is_yuv(fb->format->format)) {
|
||||
if (fb->format->format == DRM_FORMAT_NV12) {
|
||||
plane_color_ctl |=
|
||||
PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
|
||||
goto out;
|
||||
}
|
||||
if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
|
||||
plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
|
||||
else
|
||||
|
@ -3703,7 +3698,7 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
|
|||
if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
|
||||
plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
|
||||
}
|
||||
out:
|
||||
|
||||
return plane_color_ctl;
|
||||
}
|
||||
|
||||
|
|
|
@ -1679,23 +1679,6 @@ static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
|
|||
return bpp;
|
||||
}
|
||||
|
||||
static bool intel_edp_compare_alt_mode(struct drm_display_mode *m1,
|
||||
struct drm_display_mode *m2)
|
||||
{
|
||||
bool bres = false;
|
||||
|
||||
if (m1 && m2)
|
||||
bres = (m1->hdisplay == m2->hdisplay &&
|
||||
m1->hsync_start == m2->hsync_start &&
|
||||
m1->hsync_end == m2->hsync_end &&
|
||||
m1->htotal == m2->htotal &&
|
||||
m1->vdisplay == m2->vdisplay &&
|
||||
m1->vsync_start == m2->vsync_start &&
|
||||
m1->vsync_end == m2->vsync_end &&
|
||||
m1->vtotal == m2->vtotal);
|
||||
return bres;
|
||||
}
|
||||
|
||||
/* Adjust link config limits based on compliance test requests. */
|
||||
static void
|
||||
intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
|
||||
|
@ -1860,16 +1843,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
|
|||
pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
|
||||
|
||||
if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
|
||||
struct drm_display_mode *panel_mode =
|
||||
intel_connector->panel.alt_fixed_mode;
|
||||
struct drm_display_mode *req_mode = &pipe_config->base.mode;
|
||||
|
||||
if (!intel_edp_compare_alt_mode(req_mode, panel_mode))
|
||||
panel_mode = intel_connector->panel.fixed_mode;
|
||||
|
||||
drm_mode_debug_printmodeline(panel_mode);
|
||||
|
||||
intel_fixed_panel_mode(panel_mode, adjusted_mode);
|
||||
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
|
||||
adjusted_mode);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 9) {
|
||||
int ret;
|
||||
|
@ -6159,7 +6134,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
|
|||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_connector *connector = &intel_connector->base;
|
||||
struct drm_display_mode *fixed_mode = NULL;
|
||||
struct drm_display_mode *alt_fixed_mode = NULL;
|
||||
struct drm_display_mode *downclock_mode = NULL;
|
||||
bool has_dpcd;
|
||||
struct drm_display_mode *scan;
|
||||
|
@ -6214,14 +6188,13 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
|
|||
}
|
||||
intel_connector->edid = edid;
|
||||
|
||||
/* prefer fixed mode from EDID if available, save an alt mode also */
|
||||
/* prefer fixed mode from EDID if available */
|
||||
list_for_each_entry(scan, &connector->probed_modes, head) {
|
||||
if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
|
||||
fixed_mode = drm_mode_duplicate(dev, scan);
|
||||
downclock_mode = intel_dp_drrs_init(
|
||||
intel_connector, fixed_mode);
|
||||
} else if (!alt_fixed_mode) {
|
||||
alt_fixed_mode = drm_mode_duplicate(dev, scan);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -6258,8 +6231,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
|
|||
pipe_name(pipe));
|
||||
}
|
||||
|
||||
intel_panel_init(&intel_connector->panel, fixed_mode, alt_fixed_mode,
|
||||
downclock_mode);
|
||||
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
|
||||
intel_connector->panel.backlight.power = intel_edp_backlight_power;
|
||||
intel_panel_setup_backlight(connector, pipe);
|
||||
|
||||
|
|
|
@ -277,7 +277,6 @@ struct intel_encoder {
|
|||
|
||||
struct intel_panel {
|
||||
struct drm_display_mode *fixed_mode;
|
||||
struct drm_display_mode *alt_fixed_mode;
|
||||
struct drm_display_mode *downclock_mode;
|
||||
|
||||
/* backlight */
|
||||
|
@ -1850,7 +1849,6 @@ void intel_overlay_reset(struct drm_i915_private *dev_priv);
|
|||
/* intel_panel.c */
|
||||
int intel_panel_init(struct intel_panel *panel,
|
||||
struct drm_display_mode *fixed_mode,
|
||||
struct drm_display_mode *alt_fixed_mode,
|
||||
struct drm_display_mode *downclock_mode);
|
||||
void intel_panel_fini(struct intel_panel *panel);
|
||||
void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
|
||||
|
|
|
@ -1846,7 +1846,7 @@ void intel_dsi_init(struct drm_i915_private *dev_priv)
|
|||
connector->display_info.width_mm = fixed_mode->width_mm;
|
||||
connector->display_info.height_mm = fixed_mode->height_mm;
|
||||
|
||||
intel_panel_init(&intel_connector->panel, fixed_mode, NULL, NULL);
|
||||
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
|
||||
intel_panel_setup_backlight(connector, INVALID_PIPE);
|
||||
|
||||
intel_dsi_add_properties(intel_connector);
|
||||
|
|
|
@ -536,7 +536,7 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
|
|||
*/
|
||||
intel_panel_init(&intel_connector->panel,
|
||||
intel_dvo_get_current_mode(intel_encoder),
|
||||
NULL, NULL);
|
||||
NULL);
|
||||
intel_dvo->panel_wants_dither = true;
|
||||
}
|
||||
|
||||
|
|
|
@ -1114,7 +1114,7 @@ static void print_request(struct drm_printer *m,
|
|||
const char *prefix)
|
||||
{
|
||||
const char *name = rq->fence.ops->get_timeline_name(&rq->fence);
|
||||
char buf[80];
|
||||
char buf[80] = "";
|
||||
int x = 0;
|
||||
|
||||
x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
|
||||
|
|
|
@ -1175,8 +1175,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
|
|||
out:
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
|
||||
intel_panel_init(&intel_connector->panel, fixed_mode, NULL,
|
||||
downclock_mode);
|
||||
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
|
||||
intel_panel_setup_backlight(connector, INVALID_PIPE);
|
||||
|
||||
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
|
||||
|
|
|
@ -1928,13 +1928,11 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
|
|||
|
||||
int intel_panel_init(struct intel_panel *panel,
|
||||
struct drm_display_mode *fixed_mode,
|
||||
struct drm_display_mode *alt_fixed_mode,
|
||||
struct drm_display_mode *downclock_mode)
|
||||
{
|
||||
intel_panel_init_backlight_funcs(panel);
|
||||
|
||||
panel->fixed_mode = fixed_mode;
|
||||
panel->alt_fixed_mode = alt_fixed_mode;
|
||||
panel->downclock_mode = downclock_mode;
|
||||
|
||||
return 0;
|
||||
|
@ -1948,10 +1946,6 @@ void intel_panel_fini(struct intel_panel *panel)
|
|||
if (panel->fixed_mode)
|
||||
drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
|
||||
|
||||
if (panel->alt_fixed_mode)
|
||||
drm_mode_destroy(intel_connector->base.dev,
|
||||
panel->alt_fixed_mode);
|
||||
|
||||
if (panel->downclock_mode)
|
||||
drm_mode_destroy(intel_connector->base.dev,
|
||||
panel->downclock_mode);
|
||||
|
|
|
@ -5150,7 +5150,6 @@ skl_copy_ddb_for_pipe(struct skl_ddb_values *dst,
|
|||
sizeof(dst->ddb.uv_plane[pipe]));
|
||||
memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
|
||||
sizeof(dst->ddb.plane[pipe]));
|
||||
dst->ddb.enabled_slices = src->ddb.enabled_slices;
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -98,21 +98,6 @@ static const struct drm_plane_funcs mdp4_plane_funcs = {
|
|||
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
|
||||
};
|
||||
|
||||
static int mdp4_plane_prepare_fb(struct drm_plane *plane,
|
||||
struct drm_plane_state *new_state)
|
||||
{
|
||||
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
|
||||
struct mdp4_kms *mdp4_kms = get_kms(plane);
|
||||
struct msm_kms *kms = &mdp4_kms->base.base;
|
||||
struct drm_framebuffer *fb = new_state->fb;
|
||||
|
||||
if (!fb)
|
||||
return 0;
|
||||
|
||||
DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id);
|
||||
return msm_framebuffer_prepare(fb, kms->aspace);
|
||||
}
|
||||
|
||||
static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
|
||||
struct drm_plane_state *old_state)
|
||||
{
|
||||
|
@ -152,7 +137,7 @@ static void mdp4_plane_atomic_update(struct drm_plane *plane,
|
|||
}
|
||||
|
||||
static const struct drm_plane_helper_funcs mdp4_plane_helper_funcs = {
|
||||
.prepare_fb = mdp4_plane_prepare_fb,
|
||||
.prepare_fb = msm_atomic_prepare_fb,
|
||||
.cleanup_fb = mdp4_plane_cleanup_fb,
|
||||
.atomic_check = mdp4_plane_atomic_check,
|
||||
.atomic_update = mdp4_plane_atomic_update,
|
||||
|
|
|
@ -430,6 +430,7 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
|
|||
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
|
||||
struct mdp5_kms *mdp5_kms = get_kms(crtc);
|
||||
struct device *dev = &mdp5_kms->pdev->dev;
|
||||
unsigned long flags;
|
||||
|
||||
DBG("%s", crtc->name);
|
||||
|
||||
|
@ -445,6 +446,14 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
|
|||
mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
|
||||
pm_runtime_put_sync(dev);
|
||||
|
||||
if (crtc->state->event && !crtc->state->active) {
|
||||
WARN_ON(mdp5_crtc->event);
|
||||
spin_lock_irqsave(&mdp5_kms->dev->event_lock, flags);
|
||||
drm_crtc_send_vblank_event(crtc, crtc->state->event);
|
||||
crtc->state->event = NULL;
|
||||
spin_unlock_irqrestore(&mdp5_kms->dev->event_lock, flags);
|
||||
}
|
||||
|
||||
mdp5_crtc->enabled = false;
|
||||
}
|
||||
|
||||
|
|
|
@ -70,60 +70,110 @@ static int mdp5_hw_init(struct msm_kms *kms)
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct mdp5_state *mdp5_get_state(struct drm_atomic_state *s)
|
||||
/* Global/shared object state funcs */
|
||||
|
||||
/*
|
||||
* This is a helper that returns the private state currently in operation.
|
||||
* Note that this would return the "old_state" if called in the atomic check
|
||||
* path, and the "new_state" after the atomic swap has been done.
|
||||
*/
|
||||
struct mdp5_global_state *
|
||||
mdp5_get_existing_global_state(struct mdp5_kms *mdp5_kms)
|
||||
{
|
||||
return to_mdp5_global_state(mdp5_kms->glob_state.state);
|
||||
}
|
||||
|
||||
/*
|
||||
* This acquires the modeset lock set aside for global state, creates
|
||||
* a new duplicated private object state.
|
||||
*/
|
||||
struct mdp5_global_state *mdp5_get_global_state(struct drm_atomic_state *s)
|
||||
{
|
||||
struct msm_drm_private *priv = s->dev->dev_private;
|
||||
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
|
||||
struct msm_kms_state *state = to_kms_state(s);
|
||||
struct mdp5_state *new_state;
|
||||
struct drm_private_state *priv_state;
|
||||
int ret;
|
||||
|
||||
if (state->state)
|
||||
return state->state;
|
||||
|
||||
ret = drm_modeset_lock(&mdp5_kms->state_lock, s->acquire_ctx);
|
||||
ret = drm_modeset_lock(&mdp5_kms->glob_state_lock, s->acquire_ctx);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
new_state = kmalloc(sizeof(*mdp5_kms->state), GFP_KERNEL);
|
||||
if (!new_state)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
priv_state = drm_atomic_get_private_obj_state(s, &mdp5_kms->glob_state);
|
||||
if (IS_ERR(priv_state))
|
||||
return ERR_CAST(priv_state);
|
||||
|
||||
/* Copy state: */
|
||||
new_state->hwpipe = mdp5_kms->state->hwpipe;
|
||||
new_state->hwmixer = mdp5_kms->state->hwmixer;
|
||||
if (mdp5_kms->smp)
|
||||
new_state->smp = mdp5_kms->state->smp;
|
||||
|
||||
state->state = new_state;
|
||||
|
||||
return new_state;
|
||||
return to_mdp5_global_state(priv_state);
|
||||
}
|
||||
|
||||
static void mdp5_swap_state(struct msm_kms *kms, struct drm_atomic_state *state)
|
||||
static struct drm_private_state *
|
||||
mdp5_global_duplicate_state(struct drm_private_obj *obj)
|
||||
{
|
||||
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
|
||||
swap(to_kms_state(state)->state, mdp5_kms->state);
|
||||
struct mdp5_global_state *state;
|
||||
|
||||
state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
|
||||
if (!state)
|
||||
return NULL;
|
||||
|
||||
__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
|
||||
|
||||
return &state->base;
|
||||
}
|
||||
|
||||
static void mdp5_global_destroy_state(struct drm_private_obj *obj,
|
||||
struct drm_private_state *state)
|
||||
{
|
||||
struct mdp5_global_state *mdp5_state = to_mdp5_global_state(state);
|
||||
|
||||
kfree(mdp5_state);
|
||||
}
|
||||
|
||||
static const struct drm_private_state_funcs mdp5_global_state_funcs = {
|
||||
.atomic_duplicate_state = mdp5_global_duplicate_state,
|
||||
.atomic_destroy_state = mdp5_global_destroy_state,
|
||||
};
|
||||
|
||||
static int mdp5_global_obj_init(struct mdp5_kms *mdp5_kms)
|
||||
{
|
||||
struct mdp5_global_state *state;
|
||||
|
||||
drm_modeset_lock_init(&mdp5_kms->glob_state_lock);
|
||||
|
||||
state = kzalloc(sizeof(*state), GFP_KERNEL);
|
||||
if (!state)
|
||||
return -ENOMEM;
|
||||
|
||||
state->mdp5_kms = mdp5_kms;
|
||||
|
||||
drm_atomic_private_obj_init(&mdp5_kms->glob_state,
|
||||
&state->base,
|
||||
&mdp5_global_state_funcs);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
|
||||
{
|
||||
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
|
||||
struct device *dev = &mdp5_kms->pdev->dev;
|
||||
struct mdp5_global_state *global_state;
|
||||
|
||||
global_state = mdp5_get_existing_global_state(mdp5_kms);
|
||||
|
||||
pm_runtime_get_sync(dev);
|
||||
|
||||
if (mdp5_kms->smp)
|
||||
mdp5_smp_prepare_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
|
||||
mdp5_smp_prepare_commit(mdp5_kms->smp, &global_state->smp);
|
||||
}
|
||||
|
||||
static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
|
||||
{
|
||||
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
|
||||
struct device *dev = &mdp5_kms->pdev->dev;
|
||||
struct mdp5_global_state *global_state;
|
||||
|
||||
global_state = mdp5_get_existing_global_state(mdp5_kms);
|
||||
|
||||
if (mdp5_kms->smp)
|
||||
mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
|
||||
mdp5_smp_complete_commit(mdp5_kms->smp, &global_state->smp);
|
||||
|
||||
pm_runtime_put_sync(dev);
|
||||
}
|
||||
|
@ -229,7 +279,6 @@ static const struct mdp_kms_funcs kms_funcs = {
|
|||
.irq = mdp5_irq,
|
||||
.enable_vblank = mdp5_enable_vblank,
|
||||
.disable_vblank = mdp5_disable_vblank,
|
||||
.swap_state = mdp5_swap_state,
|
||||
.prepare_commit = mdp5_prepare_commit,
|
||||
.complete_commit = mdp5_complete_commit,
|
||||
.wait_for_crtc_commit_done = mdp5_wait_for_crtc_commit_done,
|
||||
|
@ -727,7 +776,8 @@ static void mdp5_destroy(struct platform_device *pdev)
|
|||
if (mdp5_kms->rpm_enabled)
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
kfree(mdp5_kms->state);
|
||||
drm_atomic_private_obj_fini(&mdp5_kms->glob_state);
|
||||
drm_modeset_lock_fini(&mdp5_kms->glob_state_lock);
|
||||
}
|
||||
|
||||
static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt,
|
||||
|
@ -880,12 +930,9 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
|
|||
mdp5_kms->dev = dev;
|
||||
mdp5_kms->pdev = pdev;
|
||||
|
||||
drm_modeset_lock_init(&mdp5_kms->state_lock);
|
||||
mdp5_kms->state = kzalloc(sizeof(*mdp5_kms->state), GFP_KERNEL);
|
||||
if (!mdp5_kms->state) {
|
||||
ret = -ENOMEM;
|
||||
ret = mdp5_global_obj_init(mdp5_kms);
|
||||
if (ret)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
|
||||
if (IS_ERR(mdp5_kms->mmio)) {
|
||||
|
|
|
@ -28,8 +28,6 @@
|
|||
#include "mdp5_ctl.h"
|
||||
#include "mdp5_smp.h"
|
||||
|
||||
struct mdp5_state;
|
||||
|
||||
struct mdp5_kms {
|
||||
struct mdp_kms base;
|
||||
|
||||
|
@ -49,11 +47,12 @@ struct mdp5_kms {
|
|||
struct mdp5_cfg_handler *cfg;
|
||||
uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */
|
||||
|
||||
/**
|
||||
* Global atomic state. Do not access directly, use mdp5_get_state()
|
||||
/*
|
||||
* Global private object state, Do not access directly, use
|
||||
* mdp5_global_get_state()
|
||||
*/
|
||||
struct mdp5_state *state;
|
||||
struct drm_modeset_lock state_lock;
|
||||
struct drm_modeset_lock glob_state_lock;
|
||||
struct drm_private_obj glob_state;
|
||||
|
||||
struct mdp5_smp *smp;
|
||||
struct mdp5_ctl_manager *ctlm;
|
||||
|
@ -81,19 +80,23 @@ struct mdp5_kms {
|
|||
};
|
||||
#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
|
||||
|
||||
/* Global atomic state for tracking resources that are shared across
|
||||
/* Global private object state for tracking resources that are shared across
|
||||
* multiple kms objects (planes/crtcs/etc).
|
||||
*
|
||||
* For atomic updates which require modifying global state,
|
||||
*/
|
||||
struct mdp5_state {
|
||||
#define to_mdp5_global_state(x) container_of(x, struct mdp5_global_state, base)
|
||||
struct mdp5_global_state {
|
||||
struct drm_private_state base;
|
||||
|
||||
struct drm_atomic_state *state;
|
||||
struct mdp5_kms *mdp5_kms;
|
||||
|
||||
struct mdp5_hw_pipe_state hwpipe;
|
||||
struct mdp5_hw_mixer_state hwmixer;
|
||||
struct mdp5_smp_state smp;
|
||||
};
|
||||
|
||||
struct mdp5_state *__must_check
|
||||
mdp5_get_state(struct drm_atomic_state *s);
|
||||
struct mdp5_global_state * mdp5_get_existing_global_state(struct mdp5_kms *mdp5_kms);
|
||||
struct mdp5_global_state *__must_check mdp5_get_global_state(struct drm_atomic_state *s);
|
||||
|
||||
/* Atomic plane state. Subclasses the base drm_plane_state in order to
|
||||
* track assigned hwpipe and hw specific state.
|
||||
|
|
|
@ -52,14 +52,14 @@ int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
|
|||
{
|
||||
struct msm_drm_private *priv = s->dev->dev_private;
|
||||
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
|
||||
struct mdp5_state *state = mdp5_get_state(s);
|
||||
struct mdp5_global_state *global_state = mdp5_get_global_state(s);
|
||||
struct mdp5_hw_mixer_state *new_state;
|
||||
int i;
|
||||
|
||||
if (IS_ERR(state))
|
||||
return PTR_ERR(state);
|
||||
if (IS_ERR(global_state))
|
||||
return PTR_ERR(global_state);
|
||||
|
||||
new_state = &state->hwmixer;
|
||||
new_state = &global_state->hwmixer;
|
||||
|
||||
for (i = 0; i < mdp5_kms->num_hwmixers; i++) {
|
||||
struct mdp5_hw_mixer *cur = mdp5_kms->hwmixers[i];
|
||||
|
@ -129,8 +129,8 @@ int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
|
|||
|
||||
void mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer)
|
||||
{
|
||||
struct mdp5_state *state = mdp5_get_state(s);
|
||||
struct mdp5_hw_mixer_state *new_state = &state->hwmixer;
|
||||
struct mdp5_global_state *global_state = mdp5_get_global_state(s);
|
||||
struct mdp5_hw_mixer_state *new_state = &global_state->hwmixer;
|
||||
|
||||
if (!mixer)
|
||||
return;
|
||||
|
|
|
@ -24,17 +24,19 @@ int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
|
|||
{
|
||||
struct msm_drm_private *priv = s->dev->dev_private;
|
||||
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
|
||||
struct mdp5_state *state;
|
||||
struct mdp5_global_state *new_global_state, *old_global_state;
|
||||
struct mdp5_hw_pipe_state *old_state, *new_state;
|
||||
int i, j;
|
||||
|
||||
state = mdp5_get_state(s);
|
||||
if (IS_ERR(state))
|
||||
return PTR_ERR(state);
|
||||
new_global_state = mdp5_get_global_state(s);
|
||||
if (IS_ERR(new_global_state))
|
||||
return PTR_ERR(new_global_state);
|
||||
|
||||
/* grab old_state after mdp5_get_state(), since now we hold lock: */
|
||||
old_state = &mdp5_kms->state->hwpipe;
|
||||
new_state = &state->hwpipe;
|
||||
/* grab old_state after mdp5_get_global_state(), since now we hold lock: */
|
||||
old_global_state = mdp5_get_existing_global_state(mdp5_kms);
|
||||
|
||||
old_state = &old_global_state->hwpipe;
|
||||
new_state = &new_global_state->hwpipe;
|
||||
|
||||
for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
|
||||
struct mdp5_hw_pipe *cur = mdp5_kms->hwpipes[i];
|
||||
|
@ -107,7 +109,7 @@ int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
|
|||
WARN_ON(r_hwpipe);
|
||||
|
||||
DBG("%s: alloc SMP blocks", (*hwpipe)->name);
|
||||
ret = mdp5_smp_assign(mdp5_kms->smp, &state->smp,
|
||||
ret = mdp5_smp_assign(mdp5_kms->smp, &new_global_state->smp,
|
||||
(*hwpipe)->pipe, blkcfg);
|
||||
if (ret)
|
||||
return -ENOMEM;
|
||||
|
@ -132,7 +134,7 @@ void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
|
|||
{
|
||||
struct msm_drm_private *priv = s->dev->dev_private;
|
||||
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
|
||||
struct mdp5_state *state = mdp5_get_state(s);
|
||||
struct mdp5_global_state *state = mdp5_get_global_state(s);
|
||||
struct mdp5_hw_pipe_state *new_state = &state->hwpipe;
|
||||
|
||||
if (!hwpipe)
|
||||
|
|
|
@ -245,20 +245,6 @@ static const struct drm_plane_funcs mdp5_plane_funcs = {
|
|||
.atomic_print_state = mdp5_plane_atomic_print_state,
|
||||
};
|
||||
|
||||
static int mdp5_plane_prepare_fb(struct drm_plane *plane,
|
||||
struct drm_plane_state *new_state)
|
||||
{
|
||||
struct mdp5_kms *mdp5_kms = get_kms(plane);
|
||||
struct msm_kms *kms = &mdp5_kms->base.base;
|
||||
struct drm_framebuffer *fb = new_state->fb;
|
||||
|
||||
if (!new_state->fb)
|
||||
return 0;
|
||||
|
||||
DBG("%s: prepare: FB[%u]", plane->name, fb->base.id);
|
||||
return msm_framebuffer_prepare(fb, kms->aspace);
|
||||
}
|
||||
|
||||
static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
|
||||
struct drm_plane_state *old_state)
|
||||
{
|
||||
|
@ -543,7 +529,7 @@ static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
|
|||
}
|
||||
|
||||
static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
|
||||
.prepare_fb = mdp5_plane_prepare_fb,
|
||||
.prepare_fb = msm_atomic_prepare_fb,
|
||||
.cleanup_fb = mdp5_plane_cleanup_fb,
|
||||
.atomic_check = mdp5_plane_atomic_check,
|
||||
.atomic_update = mdp5_plane_atomic_update,
|
||||
|
|
|
@ -340,17 +340,20 @@ void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p)
|
|||
struct mdp5_kms *mdp5_kms = get_kms(smp);
|
||||
struct mdp5_hw_pipe_state *hwpstate;
|
||||
struct mdp5_smp_state *state;
|
||||
struct mdp5_global_state *global_state;
|
||||
int total = 0, i, j;
|
||||
|
||||
drm_printf(p, "name\tinuse\tplane\n");
|
||||
drm_printf(p, "----\t-----\t-----\n");
|
||||
|
||||
if (drm_can_sleep())
|
||||
drm_modeset_lock(&mdp5_kms->state_lock, NULL);
|
||||
drm_modeset_lock(&mdp5_kms->glob_state_lock, NULL);
|
||||
|
||||
global_state = mdp5_get_existing_global_state(mdp5_kms);
|
||||
|
||||
/* grab these *after* we hold the state_lock */
|
||||
hwpstate = &mdp5_kms->state->hwpipe;
|
||||
state = &mdp5_kms->state->smp;
|
||||
hwpstate = &global_state->hwpipe;
|
||||
state = &global_state->smp;
|
||||
|
||||
for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
|
||||
struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
|
||||
|
@ -374,7 +377,7 @@ void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p)
|
|||
bitmap_weight(state->state, smp->blk_cnt));
|
||||
|
||||
if (drm_can_sleep())
|
||||
drm_modeset_unlock(&mdp5_kms->state_lock);
|
||||
drm_modeset_unlock(&mdp5_kms->glob_state_lock);
|
||||
}
|
||||
|
||||
void mdp5_smp_destroy(struct mdp5_smp *smp)
|
||||
|
@ -384,7 +387,8 @@ void mdp5_smp_destroy(struct mdp5_smp *smp)
|
|||
|
||||
struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_block *cfg)
|
||||
{
|
||||
struct mdp5_smp_state *state = &mdp5_kms->state->smp;
|
||||
struct mdp5_smp_state *state;
|
||||
struct mdp5_global_state *global_state;
|
||||
struct mdp5_smp *smp = NULL;
|
||||
int ret;
|
||||
|
||||
|
@ -398,6 +402,9 @@ struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_
|
|||
smp->blk_cnt = cfg->mmb_count;
|
||||
smp->blk_size = cfg->mmb_size;
|
||||
|
||||
global_state = mdp5_get_existing_global_state(mdp5_kms);
|
||||
state = &global_state->smp;
|
||||
|
||||
/* statically tied MMBs cannot be re-allocated: */
|
||||
bitmap_copy(state->state, cfg->reserved_state, smp->blk_cnt);
|
||||
memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved));
|
||||
|
|
|
@ -1036,7 +1036,6 @@ static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
|
|||
|
||||
ret = msm_gem_get_iova(msm_host->tx_gem_obj,
|
||||
priv->kms->aspace, &iova);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (ret) {
|
||||
pr_err("%s: failed to get iova, %d\n", __func__, ret);
|
||||
return ret;
|
||||
|
@ -1067,9 +1066,20 @@ static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
|
|||
static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
|
||||
{
|
||||
struct drm_device *dev = msm_host->dev;
|
||||
struct msm_drm_private *priv;
|
||||
|
||||
/*
|
||||
* This is possible if we're tearing down before we've had a chance to
|
||||
* fully initialize. A very real possibility if our probe is deferred,
|
||||
* in which case we'll hit msm_dsi_host_destroy() without having run
|
||||
* through the dsi_tx_buf_alloc().
|
||||
*/
|
||||
if (!dev)
|
||||
return;
|
||||
|
||||
priv = dev->dev_private;
|
||||
if (msm_host->tx_gem_obj) {
|
||||
msm_gem_put_iova(msm_host->tx_gem_obj, 0);
|
||||
msm_gem_put_iova(msm_host->tx_gem_obj, priv->kms->aspace);
|
||||
drm_gem_object_put_unlocked(msm_host->tx_gem_obj);
|
||||
msm_host->tx_gem_obj = NULL;
|
||||
}
|
||||
|
|
|
@ -16,69 +16,8 @@
|
|||
*/
|
||||
|
||||
#include "msm_drv.h"
|
||||
#include "msm_kms.h"
|
||||
#include "msm_gem.h"
|
||||
#include "msm_fence.h"
|
||||
|
||||
struct msm_commit {
|
||||
struct drm_device *dev;
|
||||
struct drm_atomic_state *state;
|
||||
struct work_struct work;
|
||||
uint32_t crtc_mask;
|
||||
};
|
||||
|
||||
static void commit_worker(struct work_struct *work);
|
||||
|
||||
/* block until specified crtcs are no longer pending update, and
|
||||
* atomically mark them as pending update
|
||||
*/
|
||||
static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
|
||||
{
|
||||
int ret;
|
||||
|
||||
spin_lock(&priv->pending_crtcs_event.lock);
|
||||
ret = wait_event_interruptible_locked(priv->pending_crtcs_event,
|
||||
!(priv->pending_crtcs & crtc_mask));
|
||||
if (ret == 0) {
|
||||
DBG("start: %08x", crtc_mask);
|
||||
priv->pending_crtcs |= crtc_mask;
|
||||
}
|
||||
spin_unlock(&priv->pending_crtcs_event.lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* clear specified crtcs (no longer pending update)
|
||||
*/
|
||||
static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
|
||||
{
|
||||
spin_lock(&priv->pending_crtcs_event.lock);
|
||||
DBG("end: %08x", crtc_mask);
|
||||
priv->pending_crtcs &= ~crtc_mask;
|
||||
wake_up_all_locked(&priv->pending_crtcs_event);
|
||||
spin_unlock(&priv->pending_crtcs_event.lock);
|
||||
}
|
||||
|
||||
static struct msm_commit *commit_init(struct drm_atomic_state *state)
|
||||
{
|
||||
struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
|
||||
|
||||
if (!c)
|
||||
return NULL;
|
||||
|
||||
c->dev = state->dev;
|
||||
c->state = state;
|
||||
|
||||
INIT_WORK(&c->work, commit_worker);
|
||||
|
||||
return c;
|
||||
}
|
||||
|
||||
static void commit_destroy(struct msm_commit *c)
|
||||
{
|
||||
end_atomic(c->dev->dev_private, c->crtc_mask);
|
||||
kfree(c);
|
||||
}
|
||||
#include "msm_kms.h"
|
||||
|
||||
static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
|
||||
struct drm_atomic_state *old_state)
|
||||
|
@ -97,18 +36,33 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
|
|||
}
|
||||
}
|
||||
|
||||
/* The (potentially) asynchronous part of the commit. At this point
|
||||
* nothing can fail short of armageddon.
|
||||
*/
|
||||
static void complete_commit(struct msm_commit *c, bool async)
|
||||
int msm_atomic_prepare_fb(struct drm_plane *plane,
|
||||
struct drm_plane_state *new_state)
|
||||
{
|
||||
struct msm_drm_private *priv = plane->dev->dev_private;
|
||||
struct msm_kms *kms = priv->kms;
|
||||
struct drm_gem_object *obj;
|
||||
struct msm_gem_object *msm_obj;
|
||||
struct dma_fence *fence;
|
||||
|
||||
if (!new_state->fb)
|
||||
return 0;
|
||||
|
||||
obj = msm_framebuffer_bo(new_state->fb, 0);
|
||||
msm_obj = to_msm_bo(obj);
|
||||
fence = reservation_object_get_excl_rcu(msm_obj->resv);
|
||||
|
||||
drm_atomic_set_fence_for_plane(new_state, fence);
|
||||
|
||||
return msm_framebuffer_prepare(new_state->fb, kms->aspace);
|
||||
}
|
||||
|
||||
void msm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_atomic_state *state = c->state;
|
||||
struct drm_device *dev = state->dev;
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct msm_kms *kms = priv->kms;
|
||||
|
||||
drm_atomic_helper_wait_for_fences(dev, state, false);
|
||||
|
||||
kms->funcs->prepare_commit(kms, state);
|
||||
|
||||
drm_atomic_helper_commit_modeset_disables(dev, state);
|
||||
|
@ -117,175 +71,13 @@ static void complete_commit(struct msm_commit *c, bool async)
|
|||
|
||||
drm_atomic_helper_commit_modeset_enables(dev, state);
|
||||
|
||||
/* NOTE: _wait_for_vblanks() only waits for vblank on
|
||||
* enabled CRTCs. So we end up faulting when disabling
|
||||
* due to (potentially) unref'ing the outgoing fb's
|
||||
* before the vblank when the disable has latched.
|
||||
*
|
||||
* But if it did wait on disabled (or newly disabled)
|
||||
* CRTCs, that would be racy (ie. we could have missed
|
||||
* the irq. We need some way to poll for pipe shut
|
||||
* down. Or just live with occasionally hitting the
|
||||
* timeout in the CRTC disable path (which really should
|
||||
* not be critical path)
|
||||
*/
|
||||
|
||||
msm_atomic_wait_for_commit_done(dev, state);
|
||||
|
||||
drm_atomic_helper_cleanup_planes(dev, state);
|
||||
|
||||
kms->funcs->complete_commit(kms, state);
|
||||
|
||||
drm_atomic_state_put(state);
|
||||
drm_atomic_helper_wait_for_vblanks(dev, state);
|
||||
|
||||
commit_destroy(c);
|
||||
}
|
||||
drm_atomic_helper_commit_hw_done(state);
|
||||
|
||||
static void commit_worker(struct work_struct *work)
|
||||
{
|
||||
complete_commit(container_of(work, struct msm_commit, work), true);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_atomic_helper_commit - commit validated state object
|
||||
* @dev: DRM device
|
||||
* @state: the driver state object
|
||||
* @nonblock: nonblocking commit
|
||||
*
|
||||
* This function commits a with drm_atomic_helper_check() pre-validated state
|
||||
* object. This can still fail when e.g. the framebuffer reservation fails.
|
||||
*
|
||||
* RETURNS
|
||||
* Zero for success or -errno.
|
||||
*/
|
||||
int msm_atomic_commit(struct drm_device *dev,
|
||||
struct drm_atomic_state *state, bool nonblock)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct msm_commit *c;
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_crtc_state *crtc_state;
|
||||
struct drm_plane *plane;
|
||||
struct drm_plane_state *old_plane_state, *new_plane_state;
|
||||
int i, ret;
|
||||
|
||||
ret = drm_atomic_helper_prepare_planes(dev, state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Note that plane->atomic_async_check() should fail if we need
|
||||
* to re-assign hwpipe or anything that touches global atomic
|
||||
* state, so we'll never go down the async update path in those
|
||||
* cases.
|
||||
*/
|
||||
if (state->async_update) {
|
||||
drm_atomic_helper_async_commit(dev, state);
|
||||
drm_atomic_helper_cleanup_planes(dev, state);
|
||||
return 0;
|
||||
}
|
||||
|
||||
c = commit_init(state);
|
||||
if (!c) {
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Figure out what crtcs we have:
|
||||
*/
|
||||
for_each_new_crtc_in_state(state, crtc, crtc_state, i)
|
||||
c->crtc_mask |= drm_crtc_mask(crtc);
|
||||
|
||||
/*
|
||||
* Figure out what fence to wait for:
|
||||
*/
|
||||
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
|
||||
if ((new_plane_state->fb != old_plane_state->fb) && new_plane_state->fb) {
|
||||
struct drm_gem_object *obj = msm_framebuffer_bo(new_plane_state->fb, 0);
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
struct dma_fence *fence = reservation_object_get_excl_rcu(msm_obj->resv);
|
||||
|
||||
drm_atomic_set_fence_for_plane(new_plane_state, fence);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait for pending updates on any of the same crtc's and then
|
||||
* mark our set of crtc's as busy:
|
||||
*/
|
||||
ret = start_atomic(dev->dev_private, c->crtc_mask);
|
||||
if (ret)
|
||||
goto err_free;
|
||||
|
||||
BUG_ON(drm_atomic_helper_swap_state(state, false) < 0);
|
||||
|
||||
/*
|
||||
* This is the point of no return - everything below never fails except
|
||||
* when the hw goes bonghits. Which means we can commit the new state on
|
||||
* the software side now.
|
||||
*
|
||||
* swap driver private state while still holding state_lock
|
||||
*/
|
||||
if (to_kms_state(state)->state)
|
||||
priv->kms->funcs->swap_state(priv->kms, state);
|
||||
|
||||
/*
|
||||
* Everything below can be run asynchronously without the need to grab
|
||||
* any modeset locks at all under one conditions: It must be guaranteed
|
||||
* that the asynchronous work has either been cancelled (if the driver
|
||||
* supports it, which at least requires that the framebuffers get
|
||||
* cleaned up with drm_atomic_helper_cleanup_planes()) or completed
|
||||
* before the new state gets committed on the software side with
|
||||
* drm_atomic_helper_swap_state().
|
||||
*
|
||||
* This scheme allows new atomic state updates to be prepared and
|
||||
* checked in parallel to the asynchronous completion of the previous
|
||||
* update. Which is important since compositors need to figure out the
|
||||
* composition of the next frame right after having submitted the
|
||||
* current layout.
|
||||
*/
|
||||
|
||||
drm_atomic_state_get(state);
|
||||
if (nonblock) {
|
||||
queue_work(priv->atomic_wq, &c->work);
|
||||
return 0;
|
||||
}
|
||||
|
||||
complete_commit(c, false);
|
||||
|
||||
return 0;
|
||||
|
||||
err_free:
|
||||
kfree(c);
|
||||
error:
|
||||
drm_atomic_helper_cleanup_planes(dev, state);
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev)
|
||||
{
|
||||
struct msm_kms_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
|
||||
|
||||
if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
|
||||
kfree(state);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return &state->base;
|
||||
}
|
||||
|
||||
void msm_atomic_state_clear(struct drm_atomic_state *s)
|
||||
{
|
||||
struct msm_kms_state *state = to_kms_state(s);
|
||||
drm_atomic_state_default_clear(&state->base);
|
||||
kfree(state->state);
|
||||
state->state = NULL;
|
||||
}
|
||||
|
||||
void msm_atomic_state_free(struct drm_atomic_state *state)
|
||||
{
|
||||
kfree(to_kms_state(state)->state);
|
||||
drm_atomic_state_default_release(state);
|
||||
kfree(state);
|
||||
}
|
||||
|
|
|
@ -41,10 +41,11 @@ static const struct drm_mode_config_funcs mode_config_funcs = {
|
|||
.fb_create = msm_framebuffer_create,
|
||||
.output_poll_changed = drm_fb_helper_output_poll_changed,
|
||||
.atomic_check = drm_atomic_helper_check,
|
||||
.atomic_commit = msm_atomic_commit,
|
||||
.atomic_state_alloc = msm_atomic_state_alloc,
|
||||
.atomic_state_clear = msm_atomic_state_clear,
|
||||
.atomic_state_free = msm_atomic_state_free,
|
||||
.atomic_commit = drm_atomic_helper_commit,
|
||||
};
|
||||
|
||||
static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
|
||||
.atomic_commit_tail = msm_atomic_commit_tail,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
|
||||
|
@ -384,7 +385,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
|
|||
|
||||
priv->wq = alloc_ordered_workqueue("msm", 0);
|
||||
priv->atomic_wq = alloc_ordered_workqueue("msm:atomic", 0);
|
||||
init_waitqueue_head(&priv->pending_crtcs_event);
|
||||
|
||||
INIT_LIST_HEAD(&priv->inactive_list);
|
||||
INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
|
||||
|
@ -442,6 +442,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
|
|||
}
|
||||
|
||||
ddev->mode_config.funcs = &mode_config_funcs;
|
||||
ddev->mode_config.helper_private = &mode_config_helper_funcs;
|
||||
|
||||
ret = drm_vblank_init(ddev, priv->num_crtcs);
|
||||
if (ret < 0) {
|
||||
|
|
|
@ -117,10 +117,6 @@ struct msm_drm_private {
|
|||
struct workqueue_struct *wq;
|
||||
struct workqueue_struct *atomic_wq;
|
||||
|
||||
/* crtcs pending async atomic updates: */
|
||||
uint32_t pending_crtcs;
|
||||
wait_queue_head_t pending_crtcs_event;
|
||||
|
||||
unsigned int num_planes;
|
||||
struct drm_plane *planes[16];
|
||||
|
||||
|
@ -160,8 +156,9 @@ struct msm_format {
|
|||
uint32_t pixel_format;
|
||||
};
|
||||
|
||||
int msm_atomic_commit(struct drm_device *dev,
|
||||
struct drm_atomic_state *state, bool nonblock);
|
||||
int msm_atomic_prepare_fb(struct drm_plane *plane,
|
||||
struct drm_plane_state *new_state);
|
||||
void msm_atomic_commit_tail(struct drm_atomic_state *state);
|
||||
struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
|
||||
void msm_atomic_state_clear(struct drm_atomic_state *state);
|
||||
void msm_atomic_state_free(struct drm_atomic_state *state);
|
||||
|
|
|
@ -40,8 +40,6 @@ struct msm_kms_funcs {
|
|||
irqreturn_t (*irq)(struct msm_kms *kms);
|
||||
int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
|
||||
void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
|
||||
/* swap global atomic state: */
|
||||
void (*swap_state)(struct msm_kms *kms, struct drm_atomic_state *state);
|
||||
/* modeset, bracketing atomic_commit(): */
|
||||
void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
|
||||
void (*complete_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
|
||||
|
@ -80,18 +78,6 @@ struct msm_kms {
|
|||
struct msm_gem_address_space *aspace;
|
||||
};
|
||||
|
||||
/**
|
||||
* Subclass of drm_atomic_state, to allow kms backend to have driver
|
||||
* private global state. The kms backend can do whatever it wants
|
||||
* with the ->state ptr. On ->atomic_state_clear() the ->state ptr
|
||||
* is kfree'd and set back to NULL.
|
||||
*/
|
||||
struct msm_kms_state {
|
||||
struct drm_atomic_state base;
|
||||
void *state;
|
||||
};
|
||||
#define to_kms_state(x) container_of(x, struct msm_kms_state, base)
|
||||
|
||||
static inline void msm_kms_init(struct msm_kms *kms,
|
||||
const struct msm_kms_funcs *funcs)
|
||||
{
|
||||
|
|
|
@ -3,6 +3,7 @@ config DRM_V3D
|
|||
depends on ARCH_BCM || ARCH_BCMSTB || COMPILE_TEST
|
||||
depends on DRM
|
||||
depends on COMMON_CLK
|
||||
depends on MMU
|
||||
select DRM_SCHED
|
||||
help
|
||||
Choose this option if you have a system that has a Broadcom
|
||||
|
|
Loading…
Reference in New Issue