drm/msm/mdp5: Use the new hierarchy and drop old irq management
Call msm_mdss_init in msm_drv to set up top level registers/irq line. Start using the new kms_init2/destroy2 funcs to inititalize MDP5 KMS. With the MDSS interrupt and irqdomain set up, the old MDP5 irq code can be dropped. The mdp5_hw_init kms func now uses the platform device tied to MDP5 instead of the one tied to the drm_device/MDSS. Signed-off-by: Archit Taneja <architt@codeaurora.org> Signed-off-by: Rob Clark <robdclark@gmail.com>
This commit is contained in:
parent
aec095ecbc
commit
0a6030d224
|
@ -15,7 +15,6 @@
|
|||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/irq.h>
|
||||
|
||||
#include "msm_drv.h"
|
||||
|
@ -68,8 +67,9 @@ void mdp5_irq_uninstall(struct msm_kms *kms)
|
|||
mdp5_disable(mdp5_kms);
|
||||
}
|
||||
|
||||
static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
|
||||
irqreturn_t mdp5_irq(struct msm_kms *kms)
|
||||
{
|
||||
struct mdp_kms *mdp_kms = to_mdp_kms(kms);
|
||||
struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
|
||||
struct drm_device *dev = mdp5_kms->dev;
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
|
@ -87,29 +87,6 @@ static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
|
|||
for (id = 0; id < priv->num_crtcs; id++)
|
||||
if (status & mdp5_crtc_vblank(priv->crtcs[id]))
|
||||
drm_handle_vblank(dev, id);
|
||||
}
|
||||
|
||||
irqreturn_t mdp5_irq(struct msm_kms *kms)
|
||||
{
|
||||
struct mdp_kms *mdp_kms = to_mdp_kms(kms);
|
||||
struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
|
||||
uint32_t intr;
|
||||
|
||||
intr = mdp5_read(mdp5_kms, REG_MDSS_HW_INTR_STATUS);
|
||||
|
||||
VERB("intr=%08x", intr);
|
||||
|
||||
if (intr & MDSS_HW_INTR_STATUS_INTR_MDP) {
|
||||
mdp5_irq_mdp(mdp_kms);
|
||||
intr &= ~MDSS_HW_INTR_STATUS_INTR_MDP;
|
||||
}
|
||||
|
||||
while (intr) {
|
||||
irq_hw_number_t hwirq = fls(intr) - 1;
|
||||
generic_handle_irq(irq_find_mapping(
|
||||
mdp5_kms->irqcontroller.domain, hwirq));
|
||||
intr &= ~(1 << hwirq);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -135,81 +112,3 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
|
|||
mdp5_crtc_vblank(crtc), false);
|
||||
mdp5_disable(mdp5_kms);
|
||||
}
|
||||
|
||||
/*
|
||||
* interrupt-controller implementation, so sub-blocks (hdmi/eDP/dsi/etc)
|
||||
* can register to get their irq's delivered
|
||||
*/
|
||||
|
||||
#define VALID_IRQS (MDSS_HW_INTR_STATUS_INTR_DSI0 | \
|
||||
MDSS_HW_INTR_STATUS_INTR_DSI1 | \
|
||||
MDSS_HW_INTR_STATUS_INTR_HDMI | \
|
||||
MDSS_HW_INTR_STATUS_INTR_EDP)
|
||||
|
||||
static void mdp5_hw_mask_irq(struct irq_data *irqd)
|
||||
{
|
||||
struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
static void mdp5_hw_unmask_irq(struct irq_data *irqd)
|
||||
{
|
||||
struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
|
||||
smp_mb__before_atomic();
|
||||
set_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
static struct irq_chip mdp5_hw_irq_chip = {
|
||||
.name = "mdp5",
|
||||
.irq_mask = mdp5_hw_mask_irq,
|
||||
.irq_unmask = mdp5_hw_unmask_irq,
|
||||
};
|
||||
|
||||
static int mdp5_hw_irqdomain_map(struct irq_domain *d,
|
||||
unsigned int irq, irq_hw_number_t hwirq)
|
||||
{
|
||||
struct mdp5_kms *mdp5_kms = d->host_data;
|
||||
|
||||
if (!(VALID_IRQS & (1 << hwirq)))
|
||||
return -EPERM;
|
||||
|
||||
irq_set_chip_and_handler(irq, &mdp5_hw_irq_chip, handle_level_irq);
|
||||
irq_set_chip_data(irq, mdp5_kms);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct irq_domain_ops mdp5_hw_irqdomain_ops = {
|
||||
.map = mdp5_hw_irqdomain_map,
|
||||
.xlate = irq_domain_xlate_onecell,
|
||||
};
|
||||
|
||||
|
||||
int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms)
|
||||
{
|
||||
struct device *dev = mdp5_kms->dev->dev;
|
||||
struct irq_domain *d;
|
||||
|
||||
d = irq_domain_add_linear(dev->of_node, 32,
|
||||
&mdp5_hw_irqdomain_ops, mdp5_kms);
|
||||
if (!d) {
|
||||
dev_err(dev, "mdp5 irq domain add failed\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
mdp5_kms->irqcontroller.enabled_mask = 0;
|
||||
mdp5_kms->irqcontroller.domain = d;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms)
|
||||
{
|
||||
if (mdp5_kms->irqcontroller.domain) {
|
||||
irq_domain_remove(mdp5_kms->irqcontroller.domain);
|
||||
mdp5_kms->irqcontroller.domain = NULL;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,10 +29,10 @@ static const char *iommu_ports[] = {
|
|||
static int mdp5_hw_init(struct msm_kms *kms)
|
||||
{
|
||||
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
|
||||
struct drm_device *dev = mdp5_kms->dev;
|
||||
struct platform_device *pdev = mdp5_kms->pdev;
|
||||
unsigned long flags;
|
||||
|
||||
pm_runtime_get_sync(dev->dev);
|
||||
pm_runtime_get_sync(&pdev->dev);
|
||||
|
||||
/* Magic unknown register writes:
|
||||
*
|
||||
|
@ -64,7 +64,7 @@ static int mdp5_hw_init(struct msm_kms *kms)
|
|||
|
||||
mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
|
||||
|
||||
pm_runtime_put_sync(dev->dev);
|
||||
pm_runtime_put_sync(&pdev->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -117,8 +117,6 @@ static void mdp5_kms_destroy(struct msm_kms *kms)
|
|||
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
|
||||
struct msm_mmu *mmu = mdp5_kms->mmu;
|
||||
|
||||
mdp5_irq_domain_fini(mdp5_kms);
|
||||
|
||||
if (mmu) {
|
||||
mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
|
||||
mmu->funcs->destroy(mmu);
|
||||
|
@ -160,7 +158,7 @@ static const struct mdp_kms_funcs kms_funcs = {
|
|||
.get_format = mdp_get_format,
|
||||
.round_pixclk = mdp5_round_pixclk,
|
||||
.set_split_display = mdp5_set_split_display,
|
||||
.destroy = mdp5_kms_destroy,
|
||||
.destroy = mdp5_kms_destroy2,
|
||||
},
|
||||
.set_irqmask = mdp5_set_irqmask,
|
||||
};
|
||||
|
@ -357,13 +355,6 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
|
|||
|
||||
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
|
||||
|
||||
/* register our interrupt-controller for hdmi/eDP/dsi/etc
|
||||
* to use for irqs routed through mdp:
|
||||
*/
|
||||
ret = mdp5_irq_domain_init(mdp5_kms);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
/* construct CRTCs and their private planes: */
|
||||
for (i = 0; i < hw_cfg->pipe_rgb.count; i++) {
|
||||
struct drm_plane *plane;
|
||||
|
|
|
@ -237,6 +237,8 @@ static int msm_drm_uninit(struct device *dev)
|
|||
|
||||
component_unbind_all(dev, ddev);
|
||||
|
||||
msm_mdss_destroy(ddev);
|
||||
|
||||
ddev->dev_private = NULL;
|
||||
drm_dev_unref(ddev);
|
||||
|
||||
|
@ -351,6 +353,13 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
|
|||
|
||||
ddev->dev_private = priv;
|
||||
|
||||
ret = msm_mdss_init(ddev);
|
||||
if (ret) {
|
||||
kfree(priv);
|
||||
drm_dev_unref(ddev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
priv->wq = alloc_ordered_workqueue("msm", 0);
|
||||
priv->atomic_wq = alloc_ordered_workqueue("msm:atomic", 0);
|
||||
init_waitqueue_head(&priv->pending_crtcs_event);
|
||||
|
@ -365,6 +374,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
|
|||
/* Bind all our sub-components: */
|
||||
ret = component_bind_all(dev, ddev);
|
||||
if (ret) {
|
||||
msm_mdss_destroy(ddev);
|
||||
kfree(priv);
|
||||
drm_dev_unref(ddev);
|
||||
return ret;
|
||||
|
@ -377,9 +387,10 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
|
|||
switch (get_mdp_ver(pdev)) {
|
||||
case 4:
|
||||
kms = mdp4_kms_init(ddev);
|
||||
priv->kms = kms;
|
||||
break;
|
||||
case 5:
|
||||
kms = mdp5_kms_init(ddev);
|
||||
kms = mdp5_kms_init2(ddev);
|
||||
break;
|
||||
default:
|
||||
kms = ERR_PTR(-ENODEV);
|
||||
|
@ -398,8 +409,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
|
|||
goto fail;
|
||||
}
|
||||
|
||||
priv->kms = kms;
|
||||
|
||||
if (kms) {
|
||||
pm_runtime_enable(dev);
|
||||
ret = kms->funcs->hw_init(kms);
|
||||
|
|
Loading…
Reference in New Issue