drm/msm: move irq utils to mdp_kms
We'll want basically the same thing for mdp5, so refactor it out so it can be shared. Signed-off-by: Rob Clark <robdclark@gmail.com>
This commit is contained in:
parent
dd2da6e346
commit
9e0efa6356
|
@ -13,6 +13,7 @@ msm-y := \
|
|||
hdmi/hdmi_phy_8960.o \
|
||||
hdmi/hdmi_phy_8x60.o \
|
||||
mdp/mdp_format.o \
|
||||
mdp/mdp_kms.o \
|
||||
mdp/mdp4/mdp4_crtc.o \
|
||||
mdp/mdp4/mdp4_dtv_encoder.o \
|
||||
mdp/mdp4/mdp4_irq.o \
|
||||
|
|
|
@ -66,15 +66,15 @@ struct mdp4_crtc {
|
|||
/* for unref'ing cursor bo's after scanout completes: */
|
||||
struct drm_flip_work unref_cursor_work;
|
||||
|
||||
struct mdp4_irq vblank;
|
||||
struct mdp4_irq err;
|
||||
struct mdp_irq vblank;
|
||||
struct mdp_irq err;
|
||||
};
|
||||
#define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base)
|
||||
|
||||
static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
|
||||
{
|
||||
struct msm_drm_private *priv = crtc->dev->dev_private;
|
||||
return to_mdp4_kms(priv->kms);
|
||||
return to_mdp4_kms(to_mdp_kms(priv->kms));
|
||||
}
|
||||
|
||||
static void update_fb(struct drm_crtc *crtc, bool async,
|
||||
|
@ -93,7 +93,7 @@ static void update_fb(struct drm_crtc *crtc, bool async,
|
|||
|
||||
if (!async) {
|
||||
/* enable vblank to pick up the old_fb */
|
||||
mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank);
|
||||
mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -145,7 +145,7 @@ static void request_pending(struct drm_crtc *crtc, uint32_t pending)
|
|||
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
|
||||
|
||||
atomic_or(pending, &mdp4_crtc->pending);
|
||||
mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank);
|
||||
mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
|
||||
}
|
||||
|
||||
static void pageflip_cb(struct msm_fence_cb *cb)
|
||||
|
@ -210,9 +210,9 @@ static void mdp4_crtc_dpms(struct drm_crtc *crtc, int mode)
|
|||
if (enabled != mdp4_crtc->enabled) {
|
||||
if (enabled) {
|
||||
mdp4_enable(mdp4_kms);
|
||||
mdp4_irq_register(mdp4_kms, &mdp4_crtc->err);
|
||||
mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err);
|
||||
} else {
|
||||
mdp4_irq_unregister(mdp4_kms, &mdp4_crtc->err);
|
||||
mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
|
||||
mdp4_disable(mdp4_kms);
|
||||
}
|
||||
mdp4_crtc->enabled = enabled;
|
||||
|
@ -571,14 +571,14 @@ static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
|
|||
.load_lut = mdp4_crtc_load_lut,
|
||||
};
|
||||
|
||||
static void mdp4_crtc_vblank_irq(struct mdp4_irq *irq, uint32_t irqstatus)
|
||||
static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
|
||||
{
|
||||
struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
|
||||
struct drm_crtc *crtc = &mdp4_crtc->base;
|
||||
struct msm_drm_private *priv = crtc->dev->dev_private;
|
||||
unsigned pending;
|
||||
|
||||
mdp4_irq_unregister(get_kms(crtc), &mdp4_crtc->vblank);
|
||||
mdp_irq_unregister(&get_kms(crtc)->base, &mdp4_crtc->vblank);
|
||||
|
||||
pending = atomic_xchg(&mdp4_crtc->pending, 0);
|
||||
|
||||
|
@ -593,7 +593,7 @@ static void mdp4_crtc_vblank_irq(struct mdp4_irq *irq, uint32_t irqstatus)
|
|||
}
|
||||
}
|
||||
|
||||
static void mdp4_crtc_err_irq(struct mdp4_irq *irq, uint32_t irqstatus)
|
||||
static void mdp4_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
|
||||
{
|
||||
struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err);
|
||||
struct drm_crtc *crtc = &mdp4_crtc->base;
|
||||
|
|
|
@ -35,7 +35,7 @@ struct mdp4_dtv_encoder {
|
|||
static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
|
||||
{
|
||||
struct msm_drm_private *priv = encoder->dev->dev_private;
|
||||
return to_mdp4_kms(priv->kms);
|
||||
return to_mdp4_kms(to_mdp_kms(priv->kms));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MSM_BUS_SCALING
|
||||
|
@ -137,7 +137,7 @@ static void mdp4_dtv_encoder_dpms(struct drm_encoder *encoder, int mode)
|
|||
* the settings changes for the new modeset (like new
|
||||
* scanout buffer) don't latch properly..
|
||||
*/
|
||||
mdp4_irq_wait(mdp4_kms, MDP4_IRQ_EXTERNAL_VSYNC);
|
||||
mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_EXTERNAL_VSYNC);
|
||||
|
||||
clk_disable_unprepare(mdp4_dtv_encoder->src_clk);
|
||||
clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk);
|
||||
|
|
|
@ -19,77 +19,49 @@
|
|||
#include "msm_drv.h"
|
||||
#include "mdp4_kms.h"
|
||||
|
||||
|
||||
struct mdp4_irq_wait {
|
||||
struct mdp4_irq irq;
|
||||
int count;
|
||||
};
|
||||
|
||||
static DECLARE_WAIT_QUEUE_HEAD(wait_event);
|
||||
|
||||
static DEFINE_SPINLOCK(list_lock);
|
||||
|
||||
static void update_irq(struct mdp4_kms *mdp4_kms)
|
||||
void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask)
|
||||
{
|
||||
struct mdp4_irq *irq;
|
||||
uint32_t irqmask = mdp4_kms->vblank_mask;
|
||||
|
||||
BUG_ON(!spin_is_locked(&list_lock));
|
||||
|
||||
list_for_each_entry(irq, &mdp4_kms->irq_list, node)
|
||||
irqmask |= irq->irqmask;
|
||||
|
||||
mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, irqmask);
|
||||
mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_ENABLE, irqmask);
|
||||
}
|
||||
|
||||
static void update_irq_unlocked(struct mdp4_kms *mdp4_kms)
|
||||
{
|
||||
unsigned long flags;
|
||||
spin_lock_irqsave(&list_lock, flags);
|
||||
update_irq(mdp4_kms);
|
||||
spin_unlock_irqrestore(&list_lock, flags);
|
||||
}
|
||||
|
||||
static void mdp4_irq_error_handler(struct mdp4_irq *irq, uint32_t irqstatus)
|
||||
static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
|
||||
{
|
||||
DRM_ERROR("errors: %08x\n", irqstatus);
|
||||
}
|
||||
|
||||
void mdp4_irq_preinstall(struct msm_kms *kms)
|
||||
{
|
||||
struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
|
||||
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
|
||||
mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff);
|
||||
}
|
||||
|
||||
int mdp4_irq_postinstall(struct msm_kms *kms)
|
||||
{
|
||||
struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
|
||||
struct mdp4_irq *error_handler = &mdp4_kms->error_handler;
|
||||
|
||||
INIT_LIST_HEAD(&mdp4_kms->irq_list);
|
||||
struct mdp_kms *mdp_kms = to_mdp_kms(kms);
|
||||
struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms);
|
||||
struct mdp_irq *error_handler = &mdp4_kms->error_handler;
|
||||
|
||||
error_handler->irq = mdp4_irq_error_handler;
|
||||
error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN |
|
||||
MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
|
||||
|
||||
mdp4_irq_register(mdp4_kms, error_handler);
|
||||
mdp_irq_register(mdp_kms, error_handler);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mdp4_irq_uninstall(struct msm_kms *kms)
|
||||
{
|
||||
struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
|
||||
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
|
||||
mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
|
||||
}
|
||||
|
||||
irqreturn_t mdp4_irq(struct msm_kms *kms)
|
||||
{
|
||||
struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
|
||||
struct mdp_kms *mdp_kms = to_mdp_kms(kms);
|
||||
struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms);
|
||||
struct drm_device *dev = mdp4_kms->dev;
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct mdp4_irq *handler, *n;
|
||||
unsigned long flags;
|
||||
unsigned int id;
|
||||
uint32_t status;
|
||||
|
||||
|
@ -102,102 +74,20 @@ irqreturn_t mdp4_irq(struct msm_kms *kms)
|
|||
if (status & mdp4_crtc_vblank(priv->crtcs[id]))
|
||||
drm_handle_vblank(dev, id);
|
||||
|
||||
spin_lock_irqsave(&list_lock, flags);
|
||||
mdp4_kms->in_irq = true;
|
||||
list_for_each_entry_safe(handler, n, &mdp4_kms->irq_list, node) {
|
||||
if (handler->irqmask & status) {
|
||||
spin_unlock_irqrestore(&list_lock, flags);
|
||||
handler->irq(handler, handler->irqmask & status);
|
||||
spin_lock_irqsave(&list_lock, flags);
|
||||
}
|
||||
}
|
||||
mdp4_kms->in_irq = false;
|
||||
update_irq(mdp4_kms);
|
||||
spin_unlock_irqrestore(&list_lock, flags);
|
||||
mdp_dispatch_irqs(mdp_kms, status);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
|
||||
{
|
||||
struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&list_lock, flags);
|
||||
mdp4_kms->vblank_mask |= mdp4_crtc_vblank(crtc);
|
||||
update_irq(mdp4_kms);
|
||||
spin_unlock_irqrestore(&list_lock, flags);
|
||||
|
||||
mdp_update_vblank_mask(to_mdp_kms(kms),
|
||||
mdp4_crtc_vblank(crtc), true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
|
||||
{
|
||||
struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&list_lock, flags);
|
||||
mdp4_kms->vblank_mask &= ~mdp4_crtc_vblank(crtc);
|
||||
update_irq(mdp4_kms);
|
||||
spin_unlock_irqrestore(&list_lock, flags);
|
||||
}
|
||||
|
||||
static void wait_irq(struct mdp4_irq *irq, uint32_t irqstatus)
|
||||
{
|
||||
struct mdp4_irq_wait *wait =
|
||||
container_of(irq, struct mdp4_irq_wait, irq);
|
||||
wait->count--;
|
||||
wake_up_all(&wait_event);
|
||||
}
|
||||
|
||||
void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask)
|
||||
{
|
||||
struct mdp4_irq_wait wait = {
|
||||
.irq = {
|
||||
.irq = wait_irq,
|
||||
.irqmask = irqmask,
|
||||
},
|
||||
.count = 1,
|
||||
};
|
||||
mdp4_irq_register(mdp4_kms, &wait.irq);
|
||||
wait_event(wait_event, (wait.count <= 0));
|
||||
mdp4_irq_unregister(mdp4_kms, &wait.irq);
|
||||
}
|
||||
|
||||
void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq)
|
||||
{
|
||||
unsigned long flags;
|
||||
bool needs_update = false;
|
||||
|
||||
spin_lock_irqsave(&list_lock, flags);
|
||||
|
||||
if (!irq->registered) {
|
||||
irq->registered = true;
|
||||
list_add(&irq->node, &mdp4_kms->irq_list);
|
||||
needs_update = !mdp4_kms->in_irq;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&list_lock, flags);
|
||||
|
||||
if (needs_update)
|
||||
update_irq_unlocked(mdp4_kms);
|
||||
}
|
||||
|
||||
void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq)
|
||||
{
|
||||
unsigned long flags;
|
||||
bool needs_update = false;
|
||||
|
||||
spin_lock_irqsave(&list_lock, flags);
|
||||
|
||||
if (irq->registered) {
|
||||
irq->registered = false;
|
||||
list_del(&irq->node);
|
||||
needs_update = !mdp4_kms->in_irq;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&list_lock, flags);
|
||||
|
||||
if (needs_update)
|
||||
update_irq_unlocked(mdp4_kms);
|
||||
mdp_update_vblank_mask(to_mdp_kms(kms),
|
||||
mdp4_crtc_vblank(crtc), false);
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
|
|||
|
||||
static int mdp4_hw_init(struct msm_kms *kms)
|
||||
{
|
||||
struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
|
||||
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
|
||||
struct drm_device *dev = mdp4_kms->dev;
|
||||
uint32_t version, major, minor, dmap_cfg, vg_cfg;
|
||||
unsigned long clk;
|
||||
|
@ -133,7 +133,7 @@ static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
|
|||
|
||||
static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
|
||||
{
|
||||
struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
|
||||
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
|
||||
struct msm_drm_private *priv = mdp4_kms->dev->dev_private;
|
||||
unsigned i;
|
||||
|
||||
|
@ -143,11 +143,12 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
|
|||
|
||||
static void mdp4_destroy(struct msm_kms *kms)
|
||||
{
|
||||
struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
|
||||
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
|
||||
kfree(mdp4_kms);
|
||||
}
|
||||
|
||||
static const struct msm_kms_funcs kms_funcs = {
|
||||
static const struct mdp_kms_funcs kms_funcs = {
|
||||
.base = {
|
||||
.hw_init = mdp4_hw_init,
|
||||
.irq_preinstall = mdp4_irq_preinstall,
|
||||
.irq_postinstall = mdp4_irq_postinstall,
|
||||
|
@ -159,6 +160,8 @@ static const struct msm_kms_funcs kms_funcs = {
|
|||
.round_pixclk = mdp4_round_pixclk,
|
||||
.preclose = mdp4_preclose,
|
||||
.destroy = mdp4_destroy,
|
||||
},
|
||||
.set_irqmask = mdp4_set_irqmask,
|
||||
};
|
||||
|
||||
int mdp4_disable(struct mdp4_kms *mdp4_kms)
|
||||
|
@ -273,8 +276,9 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
|
|||
goto fail;
|
||||
}
|
||||
|
||||
kms = &mdp4_kms->base;
|
||||
kms->funcs = &kms_funcs;
|
||||
mdp_kms_init(&mdp4_kms->base, &kms_funcs);
|
||||
|
||||
kms = &mdp4_kms->base.base;
|
||||
|
||||
mdp4_kms->dev = dev;
|
||||
|
||||
|
|
|
@ -23,22 +23,8 @@
|
|||
#include "mdp/mdp_kms.h"
|
||||
#include "mdp4.xml.h"
|
||||
|
||||
|
||||
/* For transiently registering for different MDP4 irqs that various parts
|
||||
* of the KMS code need during setup/configuration. We these are not
|
||||
* necessarily the same as what drm_vblank_get/put() are requesting, and
|
||||
* the hysteresis in drm_vblank_put() is not necessarily desirable for
|
||||
* internal housekeeping related irq usage.
|
||||
*/
|
||||
struct mdp4_irq {
|
||||
struct list_head node;
|
||||
uint32_t irqmask;
|
||||
bool registered;
|
||||
void (*irq)(struct mdp4_irq *irq, uint32_t irqstatus);
|
||||
};
|
||||
|
||||
struct mdp4_kms {
|
||||
struct msm_kms base;
|
||||
struct mdp_kms base;
|
||||
|
||||
struct drm_device *dev;
|
||||
|
||||
|
@ -57,11 +43,7 @@ struct mdp4_kms {
|
|||
struct clk *pclk;
|
||||
struct clk *lut_clk;
|
||||
|
||||
/* irq handling: */
|
||||
bool in_irq;
|
||||
struct list_head irq_list; /* list of mdp4_irq */
|
||||
uint32_t vblank_mask; /* irq bits set for userspace vblank */
|
||||
struct mdp4_irq error_handler;
|
||||
struct mdp_irq error_handler;
|
||||
};
|
||||
#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
|
||||
|
||||
|
@ -166,13 +148,11 @@ static inline uint32_t mixercfg(int mixer, enum mdp4_pipe pipe,
|
|||
int mdp4_disable(struct mdp4_kms *mdp4_kms);
|
||||
int mdp4_enable(struct mdp4_kms *mdp4_kms);
|
||||
|
||||
void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask);
|
||||
void mdp4_irq_preinstall(struct msm_kms *kms);
|
||||
int mdp4_irq_postinstall(struct msm_kms *kms);
|
||||
void mdp4_irq_uninstall(struct msm_kms *kms);
|
||||
irqreturn_t mdp4_irq(struct msm_kms *kms);
|
||||
void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask);
|
||||
void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq);
|
||||
void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq);
|
||||
int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
|
||||
void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ struct mdp4_plane {
|
|||
static struct mdp4_kms *get_kms(struct drm_plane *plane)
|
||||
{
|
||||
struct msm_drm_private *priv = plane->dev->dev_private;
|
||||
return to_mdp4_kms(priv->kms);
|
||||
return to_mdp4_kms(to_mdp_kms(priv->kms));
|
||||
}
|
||||
|
||||
static int mdp4_plane_update(struct drm_plane *plane,
|
||||
|
|
|
@ -0,0 +1,144 @@
|
|||
/*
|
||||
* Copyright (C) 2013 Red Hat
|
||||
* Author: Rob Clark <robdclark@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
#include "msm_drv.h"
|
||||
#include "mdp_kms.h"
|
||||
|
||||
|
||||
struct mdp_irq_wait {
|
||||
struct mdp_irq irq;
|
||||
int count;
|
||||
};
|
||||
|
||||
static DECLARE_WAIT_QUEUE_HEAD(wait_event);
|
||||
|
||||
static DEFINE_SPINLOCK(list_lock);
|
||||
|
||||
static void update_irq(struct mdp_kms *mdp_kms)
|
||||
{
|
||||
struct mdp_irq *irq;
|
||||
uint32_t irqmask = mdp_kms->vblank_mask;
|
||||
|
||||
BUG_ON(!spin_is_locked(&list_lock));
|
||||
|
||||
list_for_each_entry(irq, &mdp_kms->irq_list, node)
|
||||
irqmask |= irq->irqmask;
|
||||
|
||||
mdp_kms->funcs->set_irqmask(mdp_kms, irqmask);
|
||||
}
|
||||
|
||||
static void update_irq_unlocked(struct mdp_kms *mdp_kms)
|
||||
{
|
||||
unsigned long flags;
|
||||
spin_lock_irqsave(&list_lock, flags);
|
||||
update_irq(mdp_kms);
|
||||
spin_unlock_irqrestore(&list_lock, flags);
|
||||
}
|
||||
|
||||
void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status)
|
||||
{
|
||||
struct mdp_irq *handler, *n;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&list_lock, flags);
|
||||
mdp_kms->in_irq = true;
|
||||
list_for_each_entry_safe(handler, n, &mdp_kms->irq_list, node) {
|
||||
if (handler->irqmask & status) {
|
||||
spin_unlock_irqrestore(&list_lock, flags);
|
||||
handler->irq(handler, handler->irqmask & status);
|
||||
spin_lock_irqsave(&list_lock, flags);
|
||||
}
|
||||
}
|
||||
mdp_kms->in_irq = false;
|
||||
update_irq(mdp_kms);
|
||||
spin_unlock_irqrestore(&list_lock, flags);
|
||||
|
||||
}
|
||||
|
||||
void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&list_lock, flags);
|
||||
if (enable)
|
||||
mdp_kms->vblank_mask |= mask;
|
||||
else
|
||||
mdp_kms->vblank_mask &= ~mask;
|
||||
update_irq(mdp_kms);
|
||||
spin_unlock_irqrestore(&list_lock, flags);
|
||||
}
|
||||
|
||||
static void wait_irq(struct mdp_irq *irq, uint32_t irqstatus)
|
||||
{
|
||||
struct mdp_irq_wait *wait =
|
||||
container_of(irq, struct mdp_irq_wait, irq);
|
||||
wait->count--;
|
||||
wake_up_all(&wait_event);
|
||||
}
|
||||
|
||||
void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask)
|
||||
{
|
||||
struct mdp_irq_wait wait = {
|
||||
.irq = {
|
||||
.irq = wait_irq,
|
||||
.irqmask = irqmask,
|
||||
},
|
||||
.count = 1,
|
||||
};
|
||||
mdp_irq_register(mdp_kms, &wait.irq);
|
||||
wait_event(wait_event, (wait.count <= 0));
|
||||
mdp_irq_unregister(mdp_kms, &wait.irq);
|
||||
}
|
||||
|
||||
void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
|
||||
{
|
||||
unsigned long flags;
|
||||
bool needs_update = false;
|
||||
|
||||
spin_lock_irqsave(&list_lock, flags);
|
||||
|
||||
if (!irq->registered) {
|
||||
irq->registered = true;
|
||||
list_add(&irq->node, &mdp_kms->irq_list);
|
||||
needs_update = !mdp_kms->in_irq;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&list_lock, flags);
|
||||
|
||||
if (needs_update)
|
||||
update_irq_unlocked(mdp_kms);
|
||||
}
|
||||
|
||||
void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
|
||||
{
|
||||
unsigned long flags;
|
||||
bool needs_update = false;
|
||||
|
||||
spin_lock_irqsave(&list_lock, flags);
|
||||
|
||||
if (irq->registered) {
|
||||
irq->registered = false;
|
||||
list_del(&irq->node);
|
||||
needs_update = !mdp_kms->in_irq;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&list_lock, flags);
|
||||
|
||||
if (needs_update)
|
||||
update_irq_unlocked(mdp_kms);
|
||||
}
|
|
@ -23,8 +23,64 @@
|
|||
#include <linux/regulator/consumer.h>
|
||||
|
||||
#include "msm_drv.h"
|
||||
#include "msm_kms.h"
|
||||
#include "mdp_common.xml.h"
|
||||
|
||||
struct mdp_kms;
|
||||
|
||||
struct mdp_kms_funcs {
|
||||
struct msm_kms_funcs base;
|
||||
void (*set_irqmask)(struct mdp_kms *mdp_kms, uint32_t irqmask);
|
||||
};
|
||||
|
||||
struct mdp_kms {
|
||||
struct msm_kms base;
|
||||
|
||||
const struct mdp_kms_funcs *funcs;
|
||||
|
||||
/* irq handling: */
|
||||
bool in_irq;
|
||||
struct list_head irq_list; /* list of mdp4_irq */
|
||||
uint32_t vblank_mask; /* irq bits set for userspace vblank */
|
||||
};
|
||||
#define to_mdp_kms(x) container_of(x, struct mdp_kms, base)
|
||||
|
||||
static inline void mdp_kms_init(struct mdp_kms *mdp_kms,
|
||||
const struct mdp_kms_funcs *funcs)
|
||||
{
|
||||
mdp_kms->funcs = funcs;
|
||||
INIT_LIST_HEAD(&mdp_kms->irq_list);
|
||||
msm_kms_init(&mdp_kms->base, &funcs->base);
|
||||
}
|
||||
|
||||
/*
|
||||
* irq helpers:
|
||||
*/
|
||||
|
||||
/* For transiently registering for different MDP irqs that various parts
|
||||
* of the KMS code need during setup/configuration. These are not
|
||||
* necessarily the same as what drm_vblank_get/put() are requesting, and
|
||||
* the hysteresis in drm_vblank_put() is not necessarily desirable for
|
||||
* internal housekeeping related irq usage.
|
||||
*/
|
||||
struct mdp_irq {
|
||||
struct list_head node;
|
||||
uint32_t irqmask;
|
||||
bool registered;
|
||||
void (*irq)(struct mdp_irq *irq, uint32_t irqstatus);
|
||||
};
|
||||
|
||||
void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status);
|
||||
void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable);
|
||||
void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask);
|
||||
void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
|
||||
void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
|
||||
|
||||
|
||||
/*
|
||||
* pixel format helpers:
|
||||
*/
|
||||
|
||||
struct mdp_format {
|
||||
struct msm_format base;
|
||||
enum mdp_bpc bpc_r, bpc_g, bpc_b;
|
||||
|
@ -35,7 +91,6 @@ struct mdp_format {
|
|||
};
|
||||
#define to_mdp_format(x) container_of(x, struct mdp_format, base)
|
||||
|
||||
|
||||
uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats);
|
||||
const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
|
||||
|
||||
|
|
|
@ -49,8 +49,19 @@ struct msm_kms_funcs {
|
|||
|
||||
struct msm_kms {
|
||||
const struct msm_kms_funcs *funcs;
|
||||
|
||||
/* irq handling: */
|
||||
bool in_irq;
|
||||
struct list_head irq_list; /* list of mdp4_irq */
|
||||
uint32_t vblank_mask; /* irq bits set for userspace vblank */
|
||||
};
|
||||
|
||||
static inline void msm_kms_init(struct msm_kms *kms,
|
||||
const struct msm_kms_funcs *funcs)
|
||||
{
|
||||
kms->funcs = funcs;
|
||||
}
|
||||
|
||||
struct msm_kms *mdp4_kms_init(struct drm_device *dev);
|
||||
|
||||
#endif /* __MSM_KMS_H__ */
|
||||
|
|
Loading…
Reference in New Issue