drm/msm: Add SDM845 DPU support

SDM845 SoC includes the Mobile Display Sub System (MDSS) which is a
top level wrapper consisting of Display Processing Unit (DPU) and
display peripheral modules such as Display Serial Interface (DSI)
and DisplayPort (DP).

MDSS functions essentially as a back-end composition engine. It blends
video and graphic images stored in the frame buffers and scans out the
composed image to a display sink (over DSI/DP).

The following diagram represents hardware blocks for a simple pipeline
(two planes are present on a given crtc which is connected to a DSI
connector):

       MDSS
      +---------------------------------+
      | +-----------------------------+ |
      | | DPU                         | |
      | |  +--------+  +--------+     | |
      | |  |  SSPP  |  |  SSPP  |     | |
      | |  +----+---+  +----+---+     | |
      | |       |           |         | |
      | |  +----v-----------v---+     | |
      | |  |  Layer Mixer (LM)  |     | |
      | |  +--------------------+     | |
      | |  +--------------------+     | |
      | |  |    PingPong (PP)   |     | |
      | |  +--------------------+     | |
      | |  +--------------------+     | |
      | |  |  INTERFACE (VIDEO) |     | |
      | |  +---+----------------+     | |
      | +------|----------------------+ |
      |        |                        |
      | +------|---------------------+  |
      | |      | DISPLAY PERIPHERALS |  |
      | |  +---v-+      +-----+      |  |
      | |  | DSI |      |  DP |      |  |
      | |  +-----+      +-----+      |  |
      | +----------------------------+  |
      +---------------------------------+

The number of DPU sub-blocks (i.e. SSPPs, LMs, PP blocks and INTFs)
depends on SoC capabilities.

Overview of DPU sub-blocks:
---------------------------
* Source Surface Processor (SSPP):
 Refers to any of hardware pipes like ViG, DMA etc. Only ViG pipes are
 capable of performing format conversion, scaling and quality improvement
 for source surfaces.

* Layer Mixer (LM):
 Blend source surfaces together (in requested zorder)

* PingPong (PP):
 This block controls frame done interrupt output, EOL and EOF generation,
 overflow/underflow control.

* Display interface (INTF):
 Timing generator and interface connecting the display peripherals.

DRM components mapping to DPU architecture:
------------------------------------------
PLANEs maps to SSPPs
CRTC maps to LMs
Encoder maps to PPs, INTFs

Data flow setup:
---------------
MDSS hardware can support various data flows (e.g.):
  - Dual pipe: Output from two LMs combined to single display.
  - Split display: Output from two LMs connected to two separate
                   interfaces.

The hardware capabilities determine the number of concurrent data paths
possible. Any control path (i.e. pipeline w/i DPU) can be routed to any
of the hardware data paths. A given control path can be triggered,
flushed and controlled independently.

Changes in v3:
- Move msm_media_info.h from uapi to dpu/ subdir
- Remove preclose callback dpu (it's handled in core)
- Fix kbuild warnings with parent_ops
- Remove unused functions from dpu_core_irq
- Rename mdss_phys to mdss
- Rename mdp_phys address space to mdp
- Drop _phys from vbif and regdma binding names

Signed-off-by: Abhinav Kumar <abhinavk@codeaurora.org>
Signed-off-by: Archit Taneja <architt@codeaurora.org>
Signed-off-by: Chandan Uddaraju <chandanu@codeaurora.org>
Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org>
Signed-off-by: Rajesh Yadav <ryadav@codeaurora.org>
Signed-off-by: Sravanthi Kollukuduru <skolluku@codeaurora.org>
Signed-off-by: Sean Paul <seanpaul@chromium.org>
[robclark minor rebase]
Signed-off-by: Rob Clark <robdclark@gmail.com>
This commit is contained in:
Jeykumar Sankaran 2018-06-27 15:26:09 -04:00 committed by Sean Paul
parent 036bfeb33b
commit 25fdd5933e
64 changed files with 31913 additions and 15 deletions

View File

@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
ccflags-y := -Idrivers/gpu/drm/msm
ccflags-y += -Idrivers/gpu/drm/msm/disp/dpu1
ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
msm-y := \
@ -45,6 +46,34 @@ msm-y := \
disp/mdp5/mdp5_mixer.o \
disp/mdp5/mdp5_plane.o \
disp/mdp5/mdp5_smp.o \
disp/dpu1/dpu_core_irq.o \
disp/dpu1/dpu_core_perf.o \
disp/dpu1/dpu_crtc.o \
disp/dpu1/dpu_encoder.o \
disp/dpu1/dpu_encoder_phys_cmd.o \
disp/dpu1/dpu_encoder_phys_vid.o \
disp/dpu1/dpu_formats.o \
disp/dpu1/dpu_hw_blk.o \
disp/dpu1/dpu_hw_catalog.o \
disp/dpu1/dpu_hw_cdm.o \
disp/dpu1/dpu_hw_ctl.o \
disp/dpu1/dpu_hw_interrupts.o \
disp/dpu1/dpu_hw_intf.o \
disp/dpu1/dpu_hw_lm.o \
disp/dpu1/dpu_hw_pingpong.o \
disp/dpu1/dpu_hw_sspp.o \
disp/dpu1/dpu_hw_top.o \
disp/dpu1/dpu_hw_util.o \
disp/dpu1/dpu_hw_vbif.o \
disp/dpu1/dpu_io_util.o \
disp/dpu1/dpu_irq.o \
disp/dpu1/dpu_kms.o \
disp/dpu1/dpu_kms_utils.o \
disp/dpu1/dpu_mdss.o \
disp/dpu1/dpu_plane.o \
disp/dpu1/dpu_power_handle.o \
disp/dpu1/dpu_rm.o \
disp/dpu1/dpu_vbif.o \
msm_atomic.o \
msm_debugfs.o \
msm_drv.o \
@ -62,7 +91,8 @@ msm-y := \
msm_ringbuffer.o \
msm_submitqueue.o
msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o
msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \
disp/dpu1/dpu_dbg.o
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o

View File

@ -0,0 +1,479 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include <linux/debugfs.h>
#include <linux/irqdomain.h>
#include <linux/irq.h>
#include <linux/kthread.h>
#include "dpu_core_irq.h"
#include "dpu_trace.h"
/**
* dpu_core_irq_callback_handler - dispatch core interrupts
* @arg: private data of callback handler
* @irq_idx: interrupt index
*/
static void dpu_core_irq_callback_handler(void *arg, int irq_idx)
{
struct dpu_kms *dpu_kms = arg;
struct dpu_irq *irq_obj = &dpu_kms->irq_obj;
struct dpu_irq_callback *cb;
unsigned long irq_flags;
pr_debug("irq_idx=%d\n", irq_idx);
if (list_empty(&irq_obj->irq_cb_tbl[irq_idx])) {
DRM_ERROR("no registered cb, idx:%d enable_count:%d\n", irq_idx,
atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]));
}
atomic_inc(&irq_obj->irq_counts[irq_idx]);
/*
* Perform registered function callback
*/
spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
list_for_each_entry(cb, &irq_obj->irq_cb_tbl[irq_idx], list)
if (cb->func)
cb->func(cb->arg, irq_idx);
spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
/*
* Clear pending interrupt status in HW.
* NOTE: dpu_core_irq_callback_handler is protected by top-level
* spinlock, so it is safe to clear any interrupt status here.
*/
dpu_kms->hw_intr->ops.clear_intr_status_nolock(
dpu_kms->hw_intr,
irq_idx);
}
int dpu_core_irq_idx_lookup(struct dpu_kms *dpu_kms,
enum dpu_intr_type intr_type, u32 instance_idx)
{
if (!dpu_kms || !dpu_kms->hw_intr ||
!dpu_kms->hw_intr->ops.irq_idx_lookup)
return -EINVAL;
return dpu_kms->hw_intr->ops.irq_idx_lookup(intr_type,
instance_idx);
}
/**
* _dpu_core_irq_enable - enable core interrupt given by the index
* @dpu_kms: Pointer to dpu kms context
* @irq_idx: interrupt index
*/
static int _dpu_core_irq_enable(struct dpu_kms *dpu_kms, int irq_idx)
{
unsigned long irq_flags;
int ret = 0, enable_count;
if (!dpu_kms || !dpu_kms->hw_intr ||
!dpu_kms->irq_obj.enable_counts ||
!dpu_kms->irq_obj.irq_counts) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
return -EINVAL;
}
enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
trace_dpu_core_irq_enable_idx(irq_idx, enable_count);
if (atomic_inc_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 1) {
ret = dpu_kms->hw_intr->ops.enable_irq(
dpu_kms->hw_intr,
irq_idx);
if (ret)
DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
irq_idx);
DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
/* empty callback list but interrupt is enabled */
if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]))
DPU_ERROR("irq_idx=%d enabled with no callback\n",
irq_idx);
spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
}
return ret;
}
int dpu_core_irq_enable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
{
int i, ret = 0, counts;
if (!dpu_kms || !irq_idxs || !irq_count) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
if (counts)
DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
for (i = 0; (i < irq_count) && !ret; i++)
ret = _dpu_core_irq_enable(dpu_kms, irq_idxs[i]);
return ret;
}
/**
* _dpu_core_irq_disable - disable core interrupt given by the index
* @dpu_kms: Pointer to dpu kms context
* @irq_idx: interrupt index
*/
static int _dpu_core_irq_disable(struct dpu_kms *dpu_kms, int irq_idx)
{
int ret = 0, enable_count;
if (!dpu_kms || !dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
return -EINVAL;
}
enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
trace_dpu_core_irq_disable_idx(irq_idx, enable_count);
if (atomic_dec_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 0) {
ret = dpu_kms->hw_intr->ops.disable_irq(
dpu_kms->hw_intr,
irq_idx);
if (ret)
DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
irq_idx);
DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
}
return ret;
}
int dpu_core_irq_disable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
{
int i, ret = 0, counts;
if (!dpu_kms || !irq_idxs || !irq_count) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
if (counts == 2)
DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
for (i = 0; (i < irq_count) && !ret; i++)
ret = _dpu_core_irq_disable(dpu_kms, irq_idxs[i]);
return ret;
}
u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
{
if (!dpu_kms || !dpu_kms->hw_intr ||
!dpu_kms->hw_intr->ops.get_interrupt_status)
return 0;
if (irq_idx < 0) {
DPU_ERROR("[%pS] invalid irq_idx=%d\n",
__builtin_return_address(0), irq_idx);
return 0;
}
return dpu_kms->hw_intr->ops.get_interrupt_status(dpu_kms->hw_intr,
irq_idx, clear);
}
int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
struct dpu_irq_callback *register_irq_cb)
{
unsigned long irq_flags;
if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
if (!register_irq_cb || !register_irq_cb->func) {
DPU_ERROR("invalid irq_cb:%d func:%d\n",
register_irq_cb != NULL,
register_irq_cb ?
register_irq_cb->func != NULL : -1);
return -EINVAL;
}
if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
return -EINVAL;
}
DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb);
list_del_init(&register_irq_cb->list);
list_add_tail(&register_irq_cb->list,
&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]);
spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
return 0;
}
int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
struct dpu_irq_callback *register_irq_cb)
{
unsigned long irq_flags;
if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
if (!register_irq_cb || !register_irq_cb->func) {
DPU_ERROR("invalid irq_cb:%d func:%d\n",
register_irq_cb != NULL,
register_irq_cb ?
register_irq_cb->func != NULL : -1);
return -EINVAL;
}
if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
return -EINVAL;
}
DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
list_del_init(&register_irq_cb->list);
/* empty callback list but interrupt is still enabled */
if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]) &&
atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]))
DPU_ERROR("irq_idx=%d enabled with no callback\n", irq_idx);
spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
return 0;
}
static void dpu_clear_all_irqs(struct dpu_kms *dpu_kms)
{
if (!dpu_kms || !dpu_kms->hw_intr ||
!dpu_kms->hw_intr->ops.clear_all_irqs)
return;
dpu_kms->hw_intr->ops.clear_all_irqs(dpu_kms->hw_intr);
}
static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
{
if (!dpu_kms || !dpu_kms->hw_intr ||
!dpu_kms->hw_intr->ops.disable_all_irqs)
return;
dpu_kms->hw_intr->ops.disable_all_irqs(dpu_kms->hw_intr);
}
#ifdef CONFIG_DEBUG_FS
#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
static int __prefix ## _open(struct inode *inode, struct file *file) \
{ \
return single_open(file, __prefix ## _show, inode->i_private); \
} \
static const struct file_operations __prefix ## _fops = { \
.owner = THIS_MODULE, \
.open = __prefix ## _open, \
.release = single_release, \
.read = seq_read, \
.llseek = seq_lseek, \
}
static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
{
struct dpu_irq *irq_obj = s->private;
struct dpu_irq_callback *cb;
unsigned long irq_flags;
int i, irq_count, enable_count, cb_count;
if (!irq_obj || !irq_obj->enable_counts || !irq_obj->irq_cb_tbl) {
DPU_ERROR("invalid parameters\n");
return 0;
}
for (i = 0; i < irq_obj->total_irqs; i++) {
spin_lock_irqsave(&irq_obj->cb_lock, irq_flags);
cb_count = 0;
irq_count = atomic_read(&irq_obj->irq_counts[i]);
enable_count = atomic_read(&irq_obj->enable_counts[i]);
list_for_each_entry(cb, &irq_obj->irq_cb_tbl[i], list)
cb_count++;
spin_unlock_irqrestore(&irq_obj->cb_lock, irq_flags);
if (irq_count || enable_count || cb_count)
seq_printf(s, "idx:%d irq:%d enable:%d cb:%d\n",
i, irq_count, enable_count, cb_count);
}
return 0;
}
DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_core_irq);
int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
struct dentry *parent)
{
dpu_kms->irq_obj.debugfs_file = debugfs_create_file("core_irq", 0600,
parent, &dpu_kms->irq_obj,
&dpu_debugfs_core_irq_fops);
return 0;
}
void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
{
debugfs_remove(dpu_kms->irq_obj.debugfs_file);
dpu_kms->irq_obj.debugfs_file = NULL;
}
#else
int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
struct dentry *parent)
{
return 0;
}
void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
{
}
#endif
void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
{
struct msm_drm_private *priv;
int i;
if (!dpu_kms) {
DPU_ERROR("invalid dpu_kms\n");
return;
} else if (!dpu_kms->dev) {
DPU_ERROR("invalid drm device\n");
return;
} else if (!dpu_kms->dev->dev_private) {
DPU_ERROR("invalid device private\n");
return;
}
priv = dpu_kms->dev->dev_private;
pm_runtime_get_sync(&dpu_kms->pdev->dev);
dpu_clear_all_irqs(dpu_kms);
dpu_disable_all_irqs(dpu_kms);
pm_runtime_put_sync(&dpu_kms->pdev->dev);
spin_lock_init(&dpu_kms->irq_obj.cb_lock);
/* Create irq callbacks for all possible irq_idx */
dpu_kms->irq_obj.total_irqs = dpu_kms->hw_intr->irq_idx_tbl_size;
dpu_kms->irq_obj.irq_cb_tbl = kcalloc(dpu_kms->irq_obj.total_irqs,
sizeof(struct list_head), GFP_KERNEL);
dpu_kms->irq_obj.enable_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
sizeof(atomic_t), GFP_KERNEL);
dpu_kms->irq_obj.irq_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
sizeof(atomic_t), GFP_KERNEL);
for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++) {
INIT_LIST_HEAD(&dpu_kms->irq_obj.irq_cb_tbl[i]);
atomic_set(&dpu_kms->irq_obj.enable_counts[i], 0);
atomic_set(&dpu_kms->irq_obj.irq_counts[i], 0);
}
}
int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms)
{
return 0;
}
void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
{
struct msm_drm_private *priv;
int i;
if (!dpu_kms) {
DPU_ERROR("invalid dpu_kms\n");
return;
} else if (!dpu_kms->dev) {
DPU_ERROR("invalid drm device\n");
return;
} else if (!dpu_kms->dev->dev_private) {
DPU_ERROR("invalid device private\n");
return;
}
priv = dpu_kms->dev->dev_private;
pm_runtime_get_sync(&dpu_kms->pdev->dev);
for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++)
if (atomic_read(&dpu_kms->irq_obj.enable_counts[i]) ||
!list_empty(&dpu_kms->irq_obj.irq_cb_tbl[i]))
DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
dpu_clear_all_irqs(dpu_kms);
dpu_disable_all_irqs(dpu_kms);
pm_runtime_put_sync(&dpu_kms->pdev->dev);
kfree(dpu_kms->irq_obj.irq_cb_tbl);
kfree(dpu_kms->irq_obj.enable_counts);
kfree(dpu_kms->irq_obj.irq_counts);
dpu_kms->irq_obj.irq_cb_tbl = NULL;
dpu_kms->irq_obj.enable_counts = NULL;
dpu_kms->irq_obj.irq_counts = NULL;
dpu_kms->irq_obj.total_irqs = 0;
}
irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms)
{
/*
* Read interrupt status from all sources. Interrupt status are
* stored within hw_intr.
* Function will also clear the interrupt status after reading.
* Individual interrupt status bit will only get stored if it
* is enabled.
*/
dpu_kms->hw_intr->ops.get_interrupt_statuses(dpu_kms->hw_intr);
/*
* Dispatch to HW driver to handle interrupt lookup that is being
* fired. When matching interrupt is located, HW driver will call to
* dpu_core_irq_callback_handler with the irq_idx from the lookup table.
* dpu_core_irq_callback_handler will perform the registered function
* callback, and do the interrupt status clearing once the registered
* callback is finished.
*/
dpu_kms->hw_intr->ops.dispatch_irqs(
dpu_kms->hw_intr,
dpu_core_irq_callback_handler,
dpu_kms);
return IRQ_HANDLED;
}

View File

@ -0,0 +1,153 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __DPU_CORE_IRQ_H__
#define __DPU_CORE_IRQ_H__
#include "dpu_kms.h"
#include "dpu_hw_interrupts.h"
/**
* dpu_core_irq_preinstall - perform pre-installation of core IRQ handler
* @dpu_kms: DPU handle
* @return: none
*/
void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms);
/**
* dpu_core_irq_postinstall - perform post-installation of core IRQ handler
* @dpu_kms: DPU handle
* @return: 0 if success; error code otherwise
*/
int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms);
/**
* dpu_core_irq_uninstall - uninstall core IRQ handler
* @dpu_kms: DPU handle
* @return: none
*/
void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms);
/**
* dpu_core_irq - core IRQ handler
* @dpu_kms: DPU handle
* @return: interrupt handling status
*/
irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms);
/**
* dpu_core_irq_idx_lookup - IRQ helper function for lookup irq_idx from HW
* interrupt mapping table.
* @dpu_kms: DPU handle
* @intr_type: DPU HW interrupt type for lookup
* @instance_idx: DPU HW block instance defined in dpu_hw_mdss.h
* @return: irq_idx or -EINVAL when fail to lookup
*/
int dpu_core_irq_idx_lookup(
struct dpu_kms *dpu_kms,
enum dpu_intr_type intr_type,
uint32_t instance_idx);
/**
* dpu_core_irq_enable - IRQ helper function for enabling one or more IRQs
* @dpu_kms: DPU handle
* @irq_idxs: Array of irq index
* @irq_count: Number of irq_idx provided in the array
* @return: 0 for success enabling IRQ, otherwise failure
*
* This function increments count on each enable and decrements on each
* disable. Interrupts is enabled if count is 0 before increment.
*/
int dpu_core_irq_enable(
struct dpu_kms *dpu_kms,
int *irq_idxs,
uint32_t irq_count);
/**
* dpu_core_irq_disable - IRQ helper function for disabling one of more IRQs
* @dpu_kms: DPU handle
* @irq_idxs: Array of irq index
* @irq_count: Number of irq_idx provided in the array
* @return: 0 for success disabling IRQ, otherwise failure
*
* This function increments count on each enable and decrements on each
* disable. Interrupts is disabled if count is 0 after decrement.
*/
int dpu_core_irq_disable(
struct dpu_kms *dpu_kms,
int *irq_idxs,
uint32_t irq_count);
/**
* dpu_core_irq_read - IRQ helper function for reading IRQ status
* @dpu_kms: DPU handle
* @irq_idx: irq index
* @clear: True to clear the irq after read
* @return: non-zero if irq detected; otherwise no irq detected
*/
u32 dpu_core_irq_read(
struct dpu_kms *dpu_kms,
int irq_idx,
bool clear);
/**
* dpu_core_irq_register_callback - For registering callback function on IRQ
* interrupt
* @dpu_kms: DPU handle
* @irq_idx: irq index
* @irq_cb: IRQ callback structure, containing callback function
* and argument. Passing NULL for irq_cb will unregister
* the callback for the given irq_idx
* This must exist until un-registration.
* @return: 0 for success registering callback, otherwise failure
*
* This function supports registration of multiple callbacks for each interrupt.
*/
int dpu_core_irq_register_callback(
struct dpu_kms *dpu_kms,
int irq_idx,
struct dpu_irq_callback *irq_cb);
/**
* dpu_core_irq_unregister_callback - For unregistering callback function on IRQ
* interrupt
* @dpu_kms: DPU handle
* @irq_idx: irq index
* @irq_cb: IRQ callback structure, containing callback function
* and argument. Passing NULL for irq_cb will unregister
* the callback for the given irq_idx
* This must match with registration.
* @return: 0 for success registering callback, otherwise failure
*
* This function supports registration of multiple callbacks for each interrupt.
*/
int dpu_core_irq_unregister_callback(
struct dpu_kms *dpu_kms,
int irq_idx,
struct dpu_irq_callback *irq_cb);
/**
* dpu_debugfs_core_irq_init - register core irq debugfs
* @dpu_kms: pointer to kms
* @parent: debugfs directory root
* @Return: 0 on success
*/
int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
struct dentry *parent);
/**
* dpu_debugfs_core_irq_destroy - deregister core irq debugfs
* @dpu_kms: pointer to kms
*/
void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms);
#endif /* __DPU_CORE_IRQ_H__ */

View File

@ -0,0 +1,637 @@
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include <linux/debugfs.h>
#include <linux/errno.h>
#include <linux/mutex.h>
#include <linux/sort.h>
#include <linux/clk.h>
#include <linux/bitmap.h>
#include "dpu_kms.h"
#include "dpu_trace.h"
#include "dpu_crtc.h"
#include "dpu_core_perf.h"
#define DPU_PERF_MODE_STRING_SIZE 128
/**
* enum dpu_perf_mode - performance tuning mode
* @DPU_PERF_MODE_NORMAL: performance controlled by user mode client
* @DPU_PERF_MODE_MINIMUM: performance bounded by minimum setting
* @DPU_PERF_MODE_FIXED: performance bounded by fixed setting
*/
enum dpu_perf_mode {
DPU_PERF_MODE_NORMAL,
DPU_PERF_MODE_MINIMUM,
DPU_PERF_MODE_FIXED,
DPU_PERF_MODE_MAX
};
static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
{
struct msm_drm_private *priv;
if (!crtc->dev || !crtc->dev->dev_private) {
DPU_ERROR("invalid device\n");
return NULL;
}
priv = crtc->dev->dev_private;
if (!priv || !priv->kms) {
DPU_ERROR("invalid kms\n");
return NULL;
}
return to_dpu_kms(priv->kms);
}
static bool _dpu_core_perf_crtc_is_power_on(struct drm_crtc *crtc)
{
return dpu_crtc_is_enabled(crtc);
}
static bool _dpu_core_video_mode_intf_connected(struct drm_crtc *crtc)
{
struct drm_crtc *tmp_crtc;
bool intf_connected = false;
if (!crtc)
goto end;
drm_for_each_crtc(tmp_crtc, crtc->dev) {
if ((dpu_crtc_get_intf_mode(tmp_crtc) == INTF_MODE_VIDEO) &&
_dpu_core_perf_crtc_is_power_on(tmp_crtc)) {
DPU_DEBUG("video interface connected crtc:%d\n",
tmp_crtc->base.id);
intf_connected = true;
goto end;
}
}
end:
return intf_connected;
}
static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
struct drm_crtc *crtc,
struct drm_crtc_state *state,
struct dpu_core_perf_params *perf)
{
struct dpu_crtc_state *dpu_cstate;
int i;
if (!kms || !kms->catalog || !crtc || !state || !perf) {
DPU_ERROR("invalid parameters\n");
return;
}
dpu_cstate = to_dpu_crtc_state(state);
memset(perf, 0, sizeof(struct dpu_core_perf_params));
if (!dpu_cstate->bw_control) {
for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
perf->bw_ctl[i] = kms->catalog->perf.max_bw_high *
1000ULL;
perf->max_per_pipe_ib[i] = perf->bw_ctl[i];
}
perf->core_clk_rate = kms->perf.max_core_clk_rate;
} else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
perf->bw_ctl[i] = 0;
perf->max_per_pipe_ib[i] = 0;
}
perf->core_clk_rate = 0;
} else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) {
for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
perf->bw_ctl[i] = kms->perf.fix_core_ab_vote;
perf->max_per_pipe_ib[i] = kms->perf.fix_core_ib_vote;
}
perf->core_clk_rate = kms->perf.fix_core_clk_rate;
}
DPU_DEBUG(
"crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu llcc_ib=%llu llcc_ab=%llu mem_ib=%llu mem_ab=%llu\n",
crtc->base.id, perf->core_clk_rate,
perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MNOC],
perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_LLCC],
perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_EBI],
perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI]);
}
int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
u32 bw, threshold;
u64 bw_sum_of_intfs = 0;
enum dpu_crtc_client_type curr_client_type;
bool is_video_mode;
struct dpu_crtc_state *dpu_cstate;
struct drm_crtc *tmp_crtc;
struct dpu_kms *kms;
int i;
if (!crtc || !state) {
DPU_ERROR("invalid crtc\n");
return -EINVAL;
}
kms = _dpu_crtc_get_kms(crtc);
if (!kms || !kms->catalog) {
DPU_ERROR("invalid parameters\n");
return 0;
}
/* we only need bandwidth check on real-time clients (interfaces) */
if (dpu_crtc_get_client_type(crtc) == NRT_CLIENT)
return 0;
dpu_cstate = to_dpu_crtc_state(state);
/* obtain new values */
_dpu_core_perf_calc_crtc(kms, crtc, state, &dpu_cstate->new_perf);
for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl[i];
curr_client_type = dpu_crtc_get_client_type(crtc);
drm_for_each_crtc(tmp_crtc, crtc->dev) {
if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
(dpu_crtc_get_client_type(tmp_crtc) ==
curr_client_type) &&
(tmp_crtc != crtc)) {
struct dpu_crtc_state *tmp_cstate =
to_dpu_crtc_state(tmp_crtc->state);
DPU_DEBUG("crtc:%d bw:%llu ctrl:%d\n",
tmp_crtc->base.id,
tmp_cstate->new_perf.bw_ctl[i],
tmp_cstate->bw_control);
/*
* For bw check only use the bw if the
* atomic property has been already set
*/
if (tmp_cstate->bw_control)
bw_sum_of_intfs +=
tmp_cstate->new_perf.bw_ctl[i];
}
}
/* convert bandwidth to kb */
bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
DPU_DEBUG("calculated bandwidth=%uk\n", bw);
is_video_mode = dpu_crtc_get_intf_mode(crtc) == INTF_MODE_VIDEO;
threshold = (is_video_mode ||
_dpu_core_video_mode_intf_connected(crtc)) ?
kms->catalog->perf.max_bw_low :
kms->catalog->perf.max_bw_high;
DPU_DEBUG("final threshold bw limit = %d\n", threshold);
if (!dpu_cstate->bw_control) {
DPU_DEBUG("bypass bandwidth check\n");
} else if (!threshold) {
DPU_ERROR("no bandwidth limits specified\n");
return -E2BIG;
} else if (bw > threshold) {
DPU_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw,
threshold);
return -E2BIG;
}
}
return 0;
}
static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
struct drm_crtc *crtc, u32 bus_id)
{
struct dpu_core_perf_params perf = { { 0 } };
enum dpu_crtc_client_type curr_client_type
= dpu_crtc_get_client_type(crtc);
struct drm_crtc *tmp_crtc;
struct dpu_crtc_state *dpu_cstate;
int ret = 0;
drm_for_each_crtc(tmp_crtc, crtc->dev) {
if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
curr_client_type ==
dpu_crtc_get_client_type(tmp_crtc)) {
dpu_cstate = to_dpu_crtc_state(tmp_crtc->state);
perf.max_per_pipe_ib[bus_id] =
max(perf.max_per_pipe_ib[bus_id],
dpu_cstate->new_perf.max_per_pipe_ib[bus_id]);
DPU_DEBUG("crtc=%d bus_id=%d bw=%llu\n",
tmp_crtc->base.id, bus_id,
dpu_cstate->new_perf.bw_ctl[bus_id]);
}
}
return ret;
}
/**
* @dpu_core_perf_crtc_release_bw() - request zero bandwidth
* @crtc - pointer to a crtc
*
* Function checks a state variable for the crtc, if all pending commit
* requests are done, meaning no more bandwidth is needed, release
* bandwidth request.
*/
void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
{
struct drm_crtc *tmp_crtc;
struct dpu_crtc *dpu_crtc;
struct dpu_crtc_state *dpu_cstate;
struct dpu_kms *kms;
int i;
if (!crtc) {
DPU_ERROR("invalid crtc\n");
return;
}
kms = _dpu_crtc_get_kms(crtc);
if (!kms || !kms->catalog) {
DPU_ERROR("invalid kms\n");
return;
}
dpu_crtc = to_dpu_crtc(crtc);
dpu_cstate = to_dpu_crtc_state(crtc->state);
/* only do this for command mode rt client */
if (dpu_crtc_get_intf_mode(crtc) != INTF_MODE_CMD)
return;
/*
* If video interface present, cmd panel bandwidth cannot be
* released.
*/
if (dpu_crtc_get_intf_mode(crtc) == INTF_MODE_CMD)
drm_for_each_crtc(tmp_crtc, crtc->dev) {
if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
dpu_crtc_get_intf_mode(tmp_crtc) ==
INTF_MODE_VIDEO)
return;
}
/* Release the bandwidth */
if (kms->perf.enable_bw_release) {
trace_dpu_cmd_release_bw(crtc->base.id);
DPU_DEBUG("Release BW crtc=%d\n", crtc->base.id);
for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
dpu_crtc->cur_perf.bw_ctl[i] = 0;
_dpu_core_perf_crtc_update_bus(kms, crtc, i);
}
}
}
static int _dpu_core_perf_set_core_clk_rate(struct dpu_kms *kms, u64 rate)
{
struct dss_clk *core_clk = kms->perf.core_clk;
if (core_clk->max_rate && (rate > core_clk->max_rate))
rate = core_clk->max_rate;
core_clk->rate = rate;
return msm_dss_clk_set_rate(core_clk, 1);
}
static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms)
{
u64 clk_rate = kms->perf.perf_tune.min_core_clk;
struct drm_crtc *crtc;
struct dpu_crtc_state *dpu_cstate;
drm_for_each_crtc(crtc, kms->dev) {
if (_dpu_core_perf_crtc_is_power_on(crtc)) {
dpu_cstate = to_dpu_crtc_state(crtc->state);
clk_rate = max(dpu_cstate->new_perf.core_clk_rate,
clk_rate);
clk_rate = clk_round_rate(kms->perf.core_clk->clk,
clk_rate);
}
}
if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED)
clk_rate = kms->perf.fix_core_clk_rate;
DPU_DEBUG("clk:%llu\n", clk_rate);
return clk_rate;
}
int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
int params_changed, bool stop_req)
{
struct dpu_core_perf_params *new, *old;
int update_bus = 0, update_clk = 0;
u64 clk_rate = 0;
struct dpu_crtc *dpu_crtc;
struct dpu_crtc_state *dpu_cstate;
int i;
struct msm_drm_private *priv;
struct dpu_kms *kms;
int ret;
if (!crtc) {
DPU_ERROR("invalid crtc\n");
return -EINVAL;
}
kms = _dpu_crtc_get_kms(crtc);
if (!kms || !kms->catalog) {
DPU_ERROR("invalid kms\n");
return -EINVAL;
}
priv = kms->dev->dev_private;
dpu_crtc = to_dpu_crtc(crtc);
dpu_cstate = to_dpu_crtc_state(crtc->state);
DPU_DEBUG("crtc:%d stop_req:%d core_clk:%llu\n",
crtc->base.id, stop_req, kms->perf.core_clk_rate);
old = &dpu_crtc->cur_perf;
new = &dpu_cstate->new_perf;
if (_dpu_core_perf_crtc_is_power_on(crtc) && !stop_req) {
for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
/*
* cases for bus bandwidth update.
* 1. new bandwidth vote - "ab or ib vote" is higher
* than current vote for update request.
* 2. new bandwidth vote - "ab or ib vote" is lower
* than current vote at end of commit or stop.
*/
if ((params_changed && ((new->bw_ctl[i] >
old->bw_ctl[i]) ||
(new->max_per_pipe_ib[i] >
old->max_per_pipe_ib[i]))) ||
(!params_changed && ((new->bw_ctl[i] <
old->bw_ctl[i]) ||
(new->max_per_pipe_ib[i] <
old->max_per_pipe_ib[i])))) {
DPU_DEBUG(
"crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
crtc->base.id, params_changed,
new->bw_ctl[i], old->bw_ctl[i]);
old->bw_ctl[i] = new->bw_ctl[i];
old->max_per_pipe_ib[i] =
new->max_per_pipe_ib[i];
update_bus |= BIT(i);
}
}
if ((params_changed &&
(new->core_clk_rate > old->core_clk_rate)) ||
(!params_changed &&
(new->core_clk_rate < old->core_clk_rate))) {
old->core_clk_rate = new->core_clk_rate;
update_clk = 1;
}
} else {
DPU_DEBUG("crtc=%d disable\n", crtc->base.id);
memset(old, 0, sizeof(*old));
memset(new, 0, sizeof(*new));
update_bus = ~0;
update_clk = 1;
}
trace_dpu_perf_crtc_update(crtc->base.id,
new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI],
new->core_clk_rate, stop_req,
update_bus, update_clk);
for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
if (update_bus & BIT(i)) {
ret = _dpu_core_perf_crtc_update_bus(kms, crtc, i);
if (ret) {
DPU_ERROR("crtc-%d: failed to update bw vote for bus-%d\n",
crtc->base.id, i);
return ret;
}
}
}
/*
* Update the clock after bandwidth vote to ensure
* bandwidth is available before clock rate is increased.
*/
if (update_clk) {
clk_rate = _dpu_core_perf_get_core_clk_rate(kms);
trace_dpu_core_perf_update_clk(kms->dev, stop_req, clk_rate);
ret = _dpu_core_perf_set_core_clk_rate(kms, clk_rate);
if (ret) {
DPU_ERROR("failed to set %s clock rate %llu\n",
kms->perf.core_clk->clk_name, clk_rate);
return ret;
}
kms->perf.core_clk_rate = clk_rate;
DPU_DEBUG("update clk rate = %lld HZ\n", clk_rate);
}
return 0;
}
#ifdef CONFIG_DEBUG_FS
static ssize_t _dpu_core_perf_mode_write(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{
struct dpu_core_perf *perf = file->private_data;
struct dpu_perf_cfg *cfg = &perf->catalog->perf;
u32 perf_mode = 0;
char buf[10];
if (!perf)
return -ENODEV;
if (count >= sizeof(buf))
return -EFAULT;
if (copy_from_user(buf, user_buf, count))
return -EFAULT;
buf[count] = 0; /* end of string */
if (kstrtouint(buf, 0, &perf_mode))
return -EFAULT;
if (perf_mode >= DPU_PERF_MODE_MAX)
return -EFAULT;
if (perf_mode == DPU_PERF_MODE_FIXED) {
DRM_INFO("fix performance mode\n");
} else if (perf_mode == DPU_PERF_MODE_MINIMUM) {
/* run the driver with max clk and BW vote */
perf->perf_tune.min_core_clk = perf->max_core_clk_rate;
perf->perf_tune.min_bus_vote =
(u64) cfg->max_bw_high * 1000;
DRM_INFO("minimum performance mode\n");
} else if (perf_mode == DPU_PERF_MODE_NORMAL) {
/* reset the perf tune params to 0 */
perf->perf_tune.min_core_clk = 0;
perf->perf_tune.min_bus_vote = 0;
DRM_INFO("normal performance mode\n");
}
perf->perf_tune.mode = perf_mode;
return count;
}
static ssize_t _dpu_core_perf_mode_read(struct file *file,
char __user *buff, size_t count, loff_t *ppos)
{
struct dpu_core_perf *perf = file->private_data;
int len = 0;
char buf[DPU_PERF_MODE_STRING_SIZE] = {'\0'};
if (!perf)
return -ENODEV;
if (*ppos)
return 0; /* the end */
len = snprintf(buf, sizeof(buf),
"mode %d min_mdp_clk %llu min_bus_vote %llu\n",
perf->perf_tune.mode,
perf->perf_tune.min_core_clk,
perf->perf_tune.min_bus_vote);
if (len < 0 || len >= sizeof(buf))
return 0;
if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
return -EFAULT;
*ppos += len; /* increase offset */
return len;
}
static const struct file_operations dpu_core_perf_mode_fops = {
.open = simple_open,
.read = _dpu_core_perf_mode_read,
.write = _dpu_core_perf_mode_write,
};
static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
{
debugfs_remove_recursive(perf->debugfs_root);
perf->debugfs_root = NULL;
}
int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
struct dentry *parent)
{
struct dpu_mdss_cfg *catalog = perf->catalog;
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
priv = perf->dev->dev_private;
if (!priv || !priv->kms) {
DPU_ERROR("invalid KMS reference\n");
return -EINVAL;
}
dpu_kms = to_dpu_kms(priv->kms);
perf->debugfs_root = debugfs_create_dir("core_perf", parent);
if (!perf->debugfs_root) {
DPU_ERROR("failed to create core perf debugfs\n");
return -EINVAL;
}
debugfs_create_u64("max_core_clk_rate", 0600, perf->debugfs_root,
&perf->max_core_clk_rate);
debugfs_create_u64("core_clk_rate", 0600, perf->debugfs_root,
&perf->core_clk_rate);
debugfs_create_u32("enable_bw_release", 0600, perf->debugfs_root,
(u32 *)&perf->enable_bw_release);
debugfs_create_u32("threshold_low", 0600, perf->debugfs_root,
(u32 *)&catalog->perf.max_bw_low);
debugfs_create_u32("threshold_high", 0600, perf->debugfs_root,
(u32 *)&catalog->perf.max_bw_high);
debugfs_create_u32("min_core_ib", 0600, perf->debugfs_root,
(u32 *)&catalog->perf.min_core_ib);
debugfs_create_u32("min_llcc_ib", 0600, perf->debugfs_root,
(u32 *)&catalog->perf.min_llcc_ib);
debugfs_create_u32("min_dram_ib", 0600, perf->debugfs_root,
(u32 *)&catalog->perf.min_dram_ib);
debugfs_create_file("perf_mode", 0600, perf->debugfs_root,
(u32 *)perf, &dpu_core_perf_mode_fops);
debugfs_create_u64("fix_core_clk_rate", 0600, perf->debugfs_root,
&perf->fix_core_clk_rate);
debugfs_create_u64("fix_core_ib_vote", 0600, perf->debugfs_root,
&perf->fix_core_ib_vote);
debugfs_create_u64("fix_core_ab_vote", 0600, perf->debugfs_root,
&perf->fix_core_ab_vote);
return 0;
}
#else
static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
{
}
int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
struct dentry *parent)
{
return 0;
}
#endif
void dpu_core_perf_destroy(struct dpu_core_perf *perf)
{
if (!perf) {
DPU_ERROR("invalid parameters\n");
return;
}
dpu_core_perf_debugfs_destroy(perf);
perf->max_core_clk_rate = 0;
perf->core_clk = NULL;
perf->phandle = NULL;
perf->catalog = NULL;
perf->dev = NULL;
}
int dpu_core_perf_init(struct dpu_core_perf *perf,
struct drm_device *dev,
struct dpu_mdss_cfg *catalog,
struct dpu_power_handle *phandle,
struct dss_clk *core_clk)
{
perf->dev = dev;
perf->catalog = catalog;
perf->phandle = phandle;
perf->core_clk = core_clk;
perf->max_core_clk_rate = core_clk->max_rate;
if (!perf->max_core_clk_rate) {
DPU_DEBUG("optional max core clk rate, use default\n");
perf->max_core_clk_rate = DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE;
}
return 0;
}

View File

@ -0,0 +1,133 @@
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _DPU_CORE_PERF_H_
#define _DPU_CORE_PERF_H_
#include <linux/types.h>
#include <linux/dcache.h>
#include <linux/mutex.h>
#include <drm/drm_crtc.h>
#include "dpu_hw_catalog.h"
#include "dpu_power_handle.h"
#define DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE 412500000
/**
* struct dpu_core_perf_params - definition of performance parameters
* @max_per_pipe_ib: maximum instantaneous bandwidth request
* @bw_ctl: arbitrated bandwidth request
* @core_clk_rate: core clock rate request
*/
struct dpu_core_perf_params {
u64 max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MAX];
u64 bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MAX];
u64 core_clk_rate;
};
/**
* struct dpu_core_perf_tune - definition of performance tuning control
* @mode: performance mode
* @min_core_clk: minimum core clock
* @min_bus_vote: minimum bus vote
*/
struct dpu_core_perf_tune {
u32 mode;
u64 min_core_clk;
u64 min_bus_vote;
};
/**
* struct dpu_core_perf - definition of core performance context
* @dev: Pointer to drm device
* @debugfs_root: top level debug folder
* @catalog: Pointer to catalog configuration
* @phandle: Pointer to power handler
* @core_clk: Pointer to core clock structure
* @core_clk_rate: current core clock rate
* @max_core_clk_rate: maximum allowable core clock rate
* @perf_tune: debug control for performance tuning
* @enable_bw_release: debug control for bandwidth release
* @fix_core_clk_rate: fixed core clock request in Hz used in mode 2
* @fix_core_ib_vote: fixed core ib vote in bps used in mode 2
* @fix_core_ab_vote: fixed core ab vote in bps used in mode 2
*/
struct dpu_core_perf {
struct drm_device *dev;
struct dentry *debugfs_root;
struct dpu_mdss_cfg *catalog;
struct dpu_power_handle *phandle;
struct dss_clk *core_clk;
u64 core_clk_rate;
u64 max_core_clk_rate;
struct dpu_core_perf_tune perf_tune;
u32 enable_bw_release;
u64 fix_core_clk_rate;
u64 fix_core_ib_vote;
u64 fix_core_ab_vote;
};
/**
* dpu_core_perf_crtc_check - validate performance of the given crtc state
* @crtc: Pointer to crtc
* @state: Pointer to new crtc state
* return: zero if success, or error code otherwise
*/
int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
struct drm_crtc_state *state);
/**
* dpu_core_perf_crtc_update - update performance of the given crtc
* @crtc: Pointer to crtc
* @params_changed: true if crtc parameters are modified
* @stop_req: true if this is a stop request
* return: zero if success, or error code otherwise
*/
int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
int params_changed, bool stop_req);
/**
* dpu_core_perf_crtc_release_bw - release bandwidth of the given crtc
* @crtc: Pointer to crtc
*/
void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc);
/**
* dpu_core_perf_destroy - destroy the given core performance context
* @perf: Pointer to core performance context
*/
void dpu_core_perf_destroy(struct dpu_core_perf *perf);
/**
* dpu_core_perf_init - initialize the given core performance context
* @perf: Pointer to core performance context
* @dev: Pointer to drm device
* @catalog: Pointer to catalog
* @phandle: Pointer to power handle
* @core_clk: pointer to core clock
*/
int dpu_core_perf_init(struct dpu_core_perf *perf,
struct drm_device *dev,
struct dpu_mdss_cfg *catalog,
struct dpu_power_handle *phandle,
struct dss_clk *core_clk);
/**
* dpu_core_perf_debugfs_init - initialize debugfs for core performance context
* @perf: Pointer to core performance context
* @debugfs_parent: Pointer to parent debugfs
*/
int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
struct dentry *parent);
#endif /* _DPU_CORE_PERF_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,484 @@
/*
* Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _DPU_CRTC_H_
#define _DPU_CRTC_H_
#include <linux/kthread.h>
#include <drm/drm_crtc.h>
#include "dpu_kms.h"
#include "dpu_core_perf.h"
#include "dpu_hw_blk.h"
#define DPU_CRTC_NAME_SIZE 12
/* define the maximum number of in-flight frame events */
#define DPU_CRTC_FRAME_EVENT_SIZE 4
/**
* enum dpu_crtc_client_type: crtc client type
* @RT_CLIENT: RealTime client like video/cmd mode display
* voting through apps rsc
* @NRT_CLIENT: Non-RealTime client like WB display
* voting through apps rsc
*/
enum dpu_crtc_client_type {
RT_CLIENT,
NRT_CLIENT,
};
/**
* enum dpu_crtc_smmu_state: smmu state
* @ATTACHED: all the context banks are attached.
* @DETACHED: all the context banks are detached.
* @ATTACH_ALL_REQ: transient state of attaching context banks.
* @DETACH_ALL_REQ: transient state of detaching context banks.
*/
enum dpu_crtc_smmu_state {
ATTACHED = 0,
DETACHED,
ATTACH_ALL_REQ,
DETACH_ALL_REQ,
};
/**
* enum dpu_crtc_smmu_state_transition_type: state transition type
* @NONE: no pending state transitions
* @PRE_COMMIT: state transitions should be done before processing the commit
* @POST_COMMIT: state transitions to be done after processing the commit.
*/
enum dpu_crtc_smmu_state_transition_type {
NONE,
PRE_COMMIT,
POST_COMMIT
};
/**
* struct dpu_crtc_smmu_state_data: stores the smmu state and transition type
* @state: current state of smmu context banks
* @transition_type: transition request type
* @transition_error: whether there is error while transitioning the state
*/
struct dpu_crtc_smmu_state_data {
uint32_t state;
uint32_t transition_type;
uint32_t transition_error;
};
/**
* struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC
* @hw_lm: LM HW Driver context
* @hw_ctl: CTL Path HW driver context
* @encoder: Encoder attached to this lm & ctl
* @mixer_op_mode: mixer blending operation mode
* @flush_mask: mixer flush mask for ctl, mixer and pipe
*/
struct dpu_crtc_mixer {
struct dpu_hw_mixer *hw_lm;
struct dpu_hw_ctl *hw_ctl;
struct drm_encoder *encoder;
u32 mixer_op_mode;
u32 flush_mask;
};
/**
* struct dpu_crtc_frame_event: stores crtc frame event for crtc processing
* @work: base work structure
* @crtc: Pointer to crtc handling this event
* @list: event list
* @ts: timestamp at queue entry
* @event: event identifier
*/
struct dpu_crtc_frame_event {
struct kthread_work work;
struct drm_crtc *crtc;
struct list_head list;
ktime_t ts;
u32 event;
};
/**
* struct dpu_crtc_event - event callback tracking structure
* @list: Linked list tracking node
* @kt_work: Kthread worker structure
* @dpu_crtc: Pointer to associated dpu_crtc structure
* @cb_func: Pointer to callback function
* @usr: Pointer to user data to be provided to the callback
*/
struct dpu_crtc_event {
struct list_head list;
struct kthread_work kt_work;
void *dpu_crtc;
void (*cb_func)(struct drm_crtc *crtc, void *usr);
void *usr;
};
/*
* Maximum number of free event structures to cache
*/
#define DPU_CRTC_MAX_EVENT_COUNT 16
/**
* struct dpu_crtc - virtualized CRTC data structure
* @base : Base drm crtc structure
* @name : ASCII description of this crtc
* @num_ctls : Number of ctl paths in use
* @num_mixers : Number of mixers in use
* @mixers_swapped: Whether the mixers have been swapped for left/right update
* especially in the case of DSC Merge.
* @mixers : List of active mixers
* @event : Pointer to last received drm vblank event. If there is a
* pending vblank event, this will be non-null.
* @vsync_count : Running count of received vsync events
* @drm_requested_vblank : Whether vblanks have been enabled in the encoder
* @property_info : Opaque structure for generic property support
* @property_defaults : Array of default values for generic property support
* @stage_cfg : H/w mixer stage configuration
* @debugfs_root : Parent of debugfs node
* @vblank_cb_count : count of vblank callback since last reset
* @play_count : frame count between crtc enable and disable
* @vblank_cb_time : ktime at vblank count reset
* @vblank_requested : whether the user has requested vblank events
* @suspend : whether or not a suspend operation is in progress
* @enabled : whether the DPU CRTC is currently enabled. updated in the
* commit-thread, not state-swap time which is earlier, so
* safe to make decisions on during VBLANK on/off work
* @feature_list : list of color processing features supported on a crtc
* @active_list : list of color processing features are active
* @dirty_list : list of color processing features are dirty
* @ad_dirty: list containing ad properties that are dirty
* @ad_active: list containing ad properties that are active
* @crtc_lock : crtc lock around create, destroy and access.
* @frame_pending : Whether or not an update is pending
* @frame_events : static allocation of in-flight frame events
* @frame_event_list : available frame event list
* @spin_lock : spin lock for frame event, transaction status, etc...
* @frame_done_comp : for frame_event_done synchronization
* @event_thread : Pointer to event handler thread
* @event_worker : Event worker queue
* @event_cache : Local cache of event worker structures
* @event_free_list : List of available event structures
* @event_lock : Spinlock around event handling code
* @misr_enable : boolean entry indicates misr enable/disable status.
* @misr_frame_count : misr frame count provided by client
* @misr_data : store misr data before turning off the clocks.
* @phandle: Pointer to power handler
* @power_event : registered power event handle
* @cur_perf : current performance committed to clock/bandwidth driver
* @rp_lock : serialization lock for resource pool
* @rp_head : list of active resource pool
* @scl3_cfg_lut : qseed3 lut config
*/
struct dpu_crtc {
struct drm_crtc base;
char name[DPU_CRTC_NAME_SIZE];
/* HW Resources reserved for the crtc */
u32 num_ctls;
u32 num_mixers;
bool mixers_swapped;
struct dpu_crtc_mixer mixers[CRTC_DUAL_MIXERS];
struct dpu_hw_scaler3_lut_cfg *scl3_lut_cfg;
struct drm_pending_vblank_event *event;
u32 vsync_count;
struct dpu_hw_stage_cfg stage_cfg;
struct dentry *debugfs_root;
u32 vblank_cb_count;
u64 play_count;
ktime_t vblank_cb_time;
bool vblank_requested;
bool suspend;
bool enabled;
struct list_head feature_list;
struct list_head active_list;
struct list_head dirty_list;
struct list_head ad_dirty;
struct list_head ad_active;
struct mutex crtc_lock;
atomic_t frame_pending;
struct dpu_crtc_frame_event frame_events[DPU_CRTC_FRAME_EVENT_SIZE];
struct list_head frame_event_list;
spinlock_t spin_lock;
struct completion frame_done_comp;
/* for handling internal event thread */
struct dpu_crtc_event event_cache[DPU_CRTC_MAX_EVENT_COUNT];
struct list_head event_free_list;
spinlock_t event_lock;
bool misr_enable;
u32 misr_frame_count;
u32 misr_data[CRTC_DUAL_MIXERS];
struct dpu_power_handle *phandle;
struct dpu_power_event *power_event;
struct dpu_core_perf_params cur_perf;
struct mutex rp_lock;
struct list_head rp_head;
struct dpu_crtc_smmu_state_data smmu_state;
};
#define to_dpu_crtc(x) container_of(x, struct dpu_crtc, base)
/**
* struct dpu_crtc_res_ops - common operations for crtc resources
* @get: get given resource
* @put: put given resource
*/
struct dpu_crtc_res_ops {
void *(*get)(void *val, u32 type, u64 tag);
void (*put)(void *val);
};
#define DPU_CRTC_RES_FLAG_FREE BIT(0)
/**
* struct dpu_crtc_res - definition of crtc resources
* @list: list of crtc resource
* @type: crtc resource type
* @tag: unique identifier per type
* @refcount: reference/usage count
* @ops: callback operations
* @val: resource handle associated with type/tag
* @flags: customization flags
*/
struct dpu_crtc_res {
struct list_head list;
u32 type;
u64 tag;
atomic_t refcount;
struct dpu_crtc_res_ops ops;
void *val;
u32 flags;
};
/**
* dpu_crtc_respool - crtc resource pool
* @rp_lock: pointer to serialization lock
* @rp_head: pointer to head of active resource pools of this crtc
* @rp_list: list of crtc resource pool
* @sequence_id: sequence identifier, incremented per state duplication
* @res_list: list of resource managed by this resource pool
* @ops: resource operations for parent resource pool
*/
struct dpu_crtc_respool {
struct mutex *rp_lock;
struct list_head *rp_head;
struct list_head rp_list;
u32 sequence_id;
struct list_head res_list;
struct dpu_crtc_res_ops ops;
};
/**
* struct dpu_crtc_state - dpu container for atomic crtc state
* @base: Base drm crtc state structure
* @is_ppsplit : Whether current topology requires PPSplit special handling
* @bw_control : true if bw/clk controlled by core bw/clk properties
* @bw_split_vote : true if bw controlled by llcc/dram bw properties
* @lm_bounds : LM boundaries based on current mode full resolution, no ROI.
* Origin top left of CRTC.
* @property_state: Local storage for msm_prop properties
* @property_values: Current crtc property values
* @input_fence_timeout_ns : Cached input fence timeout, in ns
* @new_perf: new performance state being requested
*/
struct dpu_crtc_state {
struct drm_crtc_state base;
bool bw_control;
bool bw_split_vote;
bool is_ppsplit;
struct drm_rect lm_bounds[CRTC_DUAL_MIXERS];
uint64_t input_fence_timeout_ns;
struct dpu_core_perf_params new_perf;
struct dpu_crtc_respool rp;
};
#define to_dpu_crtc_state(x) \
container_of(x, struct dpu_crtc_state, base)
/**
* dpu_crtc_get_mixer_width - get the mixer width
* Mixer width will be same as panel width(/2 for split)
*/
static inline int dpu_crtc_get_mixer_width(struct dpu_crtc *dpu_crtc,
struct dpu_crtc_state *cstate, struct drm_display_mode *mode)
{
u32 mixer_width;
if (!dpu_crtc || !cstate || !mode)
return 0;
mixer_width = (dpu_crtc->num_mixers == CRTC_DUAL_MIXERS ?
mode->hdisplay / CRTC_DUAL_MIXERS : mode->hdisplay);
return mixer_width;
}
/**
* dpu_crtc_get_mixer_height - get the mixer height
* Mixer height will be same as panel height
*/
static inline int dpu_crtc_get_mixer_height(struct dpu_crtc *dpu_crtc,
struct dpu_crtc_state *cstate, struct drm_display_mode *mode)
{
if (!dpu_crtc || !cstate || !mode)
return 0;
return mode->vdisplay;
}
/**
* dpu_crtc_frame_pending - retun the number of pending frames
* @crtc: Pointer to drm crtc object
*/
static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc)
{
struct dpu_crtc *dpu_crtc;
if (!crtc)
return -EINVAL;
dpu_crtc = to_dpu_crtc(crtc);
return atomic_read(&dpu_crtc->frame_pending);
}
/**
* dpu_crtc_vblank - enable or disable vblanks for this crtc
* @crtc: Pointer to drm crtc object
* @en: true to enable vblanks, false to disable
*/
int dpu_crtc_vblank(struct drm_crtc *crtc, bool en);
/**
* dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
* @crtc: Pointer to drm crtc object
*/
void dpu_crtc_commit_kickoff(struct drm_crtc *crtc);
/**
* dpu_crtc_complete_commit - callback signalling completion of current commit
* @crtc: Pointer to drm crtc object
* @old_state: Pointer to drm crtc old state object
*/
void dpu_crtc_complete_commit(struct drm_crtc *crtc,
struct drm_crtc_state *old_state);
/**
* dpu_crtc_init - create a new crtc object
* @dev: dpu device
* @plane: base plane
* @Return: new crtc object or error
*/
struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane);
/**
* dpu_crtc_register_custom_event - api for enabling/disabling crtc event
* @kms: Pointer to dpu_kms
* @crtc_drm: Pointer to crtc object
* @event: Event that client is interested
* @en: Flag to enable/disable the event
*/
int dpu_crtc_register_custom_event(struct dpu_kms *kms,
struct drm_crtc *crtc_drm, u32 event, bool en);
/**
* dpu_crtc_get_intf_mode - get interface mode of the given crtc
* @crtc: Pointert to crtc
*/
enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc);
/**
* dpu_crtc_get_client_type - check the crtc type- rt, nrt etc.
* @crtc: Pointer to crtc
*/
static inline enum dpu_crtc_client_type dpu_crtc_get_client_type(
struct drm_crtc *crtc)
{
struct dpu_crtc_state *cstate =
crtc ? to_dpu_crtc_state(crtc->state) : NULL;
if (!cstate)
return NRT_CLIENT;
return RT_CLIENT;
}
/**
* dpu_crtc_is_enabled - check if dpu crtc is enabled or not
* @crtc: Pointer to crtc
*/
static inline bool dpu_crtc_is_enabled(struct drm_crtc *crtc)
{
return crtc ? crtc->enabled : false;
}
/**
* dpu_crtc_event_queue - request event callback
* @crtc: Pointer to drm crtc structure
* @func: Pointer to callback function
* @usr: Pointer to user data to be passed to callback
* Returns: Zero on success
*/
int dpu_crtc_event_queue(struct drm_crtc *crtc,
void (*func)(struct drm_crtc *crtc, void *usr), void *usr);
/**
* dpu_crtc_res_add - add given resource to resource pool in crtc state
* @state: Pointer to drm crtc state
* @type: Resource type
* @tag: Search tag for given resource
* @val: Resource handle
* @ops: Resource callback operations
* return: 0 if success; error code otherwise
*/
int dpu_crtc_res_add(struct drm_crtc_state *state, u32 type, u64 tag,
void *val, struct dpu_crtc_res_ops *ops);
/**
* dpu_crtc_res_get - get given resource from resource pool in crtc state
* @state: Pointer to drm crtc state
* @type: Resource type
* @tag: Search tag for given resource
* return: Resource handle if success; pointer error or null otherwise
*/
void *dpu_crtc_res_get(struct drm_crtc_state *state, u32 type, u64 tag);
/**
* dpu_crtc_res_put - return given resource to resource pool in crtc state
* @state: Pointer to drm crtc state
* @type: Resource type
* @tag: Search tag for given resource
* return: None
*/
void dpu_crtc_res_put(struct drm_crtc_state *state, u32 type, u64 tag);
#endif /* _DPU_CRTC_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,103 @@
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef DPU_DBG_H_
#define DPU_DBG_H_
#include <stdarg.h>
#include <linux/debugfs.h>
#include <linux/list.h>
enum dpu_dbg_dump_flag {
DPU_DBG_DUMP_IN_LOG = BIT(0),
DPU_DBG_DUMP_IN_MEM = BIT(1),
};
#if defined(CONFIG_DEBUG_FS)
/**
* dpu_dbg_init_dbg_buses - initialize debug bus dumping support for the chipset
* @hwversion: Chipset revision
*/
void dpu_dbg_init_dbg_buses(u32 hwversion);
/**
* dpu_dbg_init - initialize global dpu debug facilities: regdump
* @dev: device handle
* Returns: 0 or -ERROR
*/
int dpu_dbg_init(struct device *dev);
/**
* dpu_dbg_debugfs_register - register entries at the given debugfs dir
* @debugfs_root: debugfs root in which to create dpu debug entries
* Returns: 0 or -ERROR
*/
int dpu_dbg_debugfs_register(struct dentry *debugfs_root);
/**
* dpu_dbg_destroy - destroy the global dpu debug facilities
* Returns: none
*/
void dpu_dbg_destroy(void);
/**
* dpu_dbg_dump - trigger dumping of all dpu_dbg facilities
* @queue_work: whether to queue the dumping work to the work_struct
* @name: string indicating origin of dump
* @dump_dbgbus: dump the dpu debug bus
* @dump_vbif_rt: dump the vbif rt bus
* Returns: none
*/
void dpu_dbg_dump(bool queue_work, const char *name, bool dump_dbgbus_dpu,
bool dump_dbgbus_vbif_rt);
/**
* dpu_dbg_set_dpu_top_offset - set the target specific offset from mdss base
* address of the top registers. Used for accessing debug bus controls.
* @blk_off: offset from mdss base of the top block
*/
void dpu_dbg_set_dpu_top_offset(u32 blk_off);
#else
static inline void dpu_dbg_init_dbg_buses(u32 hwversion)
{
}
static inline int dpu_dbg_init(struct device *dev)
{
return 0;
}
static inline int dpu_dbg_debugfs_register(struct dentry *debugfs_root)
{
return 0;
}
static inline void dpu_dbg_destroy(void)
{
}
static inline void dpu_dbg_dump(bool queue_work, const char *name,
bool dump_dbgbus_dpu, bool dump_dbgbus_vbif_rt)
{
}
static inline void dpu_dbg_set_dpu_top_offset(u32 blk_off)
{
}
#endif /* defined(CONFIG_DEBUG_FS) */
#endif /* DPU_DBG_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,191 @@
/*
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __DPU_ENCODER_H__
#define __DPU_ENCODER_H__
#include <drm/drm_crtc.h>
#include "dpu_hw_mdss.h"
#define DPU_ENCODER_FRAME_EVENT_DONE BIT(0)
#define DPU_ENCODER_FRAME_EVENT_ERROR BIT(1)
#define DPU_ENCODER_FRAME_EVENT_PANEL_DEAD BIT(2)
#define DPU_ENCODER_FRAME_EVENT_IDLE BIT(3)
#define IDLE_TIMEOUT (66 - 16/2)
/**
* Encoder functions and data types
* @intfs: Interfaces this encoder is using, INTF_MODE_NONE if unused
* @needs_cdm: Encoder requests a CDM based on pixel format conversion needs
* @display_num_of_h_tiles: Number of horizontal tiles in case of split
* interface
* @topology: Topology of the display
*/
struct dpu_encoder_hw_resources {
enum dpu_intf_mode intfs[INTF_MAX];
bool needs_cdm;
u32 display_num_of_h_tiles;
};
/**
* dpu_encoder_kickoff_params - info encoder requires at kickoff
* @affected_displays: bitmask, bit set means the ROI of the commit lies within
* the bounds of the physical display at the bit index
*/
struct dpu_encoder_kickoff_params {
unsigned long affected_displays;
};
/**
* dpu_encoder_get_hw_resources - Populate table of required hardware resources
* @encoder: encoder pointer
* @hw_res: resource table to populate with encoder required resources
* @conn_state: report hw reqs based on this proposed connector state
*/
void dpu_encoder_get_hw_resources(struct drm_encoder *encoder,
struct dpu_encoder_hw_resources *hw_res,
struct drm_connector_state *conn_state);
/**
* dpu_encoder_register_vblank_callback - provide callback to encoder that
* will be called on the next vblank.
* @encoder: encoder pointer
* @cb: callback pointer, provide NULL to deregister and disable IRQs
* @data: user data provided to callback
*/
void dpu_encoder_register_vblank_callback(struct drm_encoder *encoder,
void (*cb)(void *), void *data);
/**
* dpu_encoder_register_frame_event_callback - provide callback to encoder that
* will be called after the request is complete, or other events.
* @encoder: encoder pointer
* @cb: callback pointer, provide NULL to deregister
* @data: user data provided to callback
*/
void dpu_encoder_register_frame_event_callback(struct drm_encoder *encoder,
void (*cb)(void *, u32), void *data);
/**
* dpu_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl
* path (i.e. ctl flush and start) at next appropriate time.
* Immediately: if no previous commit is outstanding.
* Delayed: Block until next trigger can be issued.
* @encoder: encoder pointer
* @params: kickoff time parameters
*/
void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder,
struct dpu_encoder_kickoff_params *params);
/**
* dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous
* kickoff and trigger the ctl prepare progress for command mode display.
* @encoder: encoder pointer
*/
void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *encoder);
/**
* dpu_encoder_kickoff - trigger a double buffer flip of the ctl path
* (i.e. ctl flush and start) immediately.
* @encoder: encoder pointer
*/
void dpu_encoder_kickoff(struct drm_encoder *encoder);
/**
* dpu_encoder_wait_for_event - Waits for encoder events
* @encoder: encoder pointer
* @event: event to wait for
* MSM_ENC_COMMIT_DONE - Wait for hardware to have flushed the current pending
* frames to hardware at a vblank or ctl_start
* Encoders will map this differently depending on the
* panel type.
* vid mode -> vsync_irq
* cmd mode -> ctl_start
* MSM_ENC_TX_COMPLETE - Wait for the hardware to transfer all the pixels to
* the panel. Encoders will map this differently
* depending on the panel type.
* vid mode -> vsync_irq
* cmd mode -> pp_done
* Returns: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
*/
int dpu_encoder_wait_for_event(struct drm_encoder *drm_encoder,
enum msm_event_wait event);
/*
* dpu_encoder_get_intf_mode - get interface mode of the given encoder
* @encoder: Pointer to drm encoder object
*/
enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder);
/**
* dpu_encoder_virt_restore - restore the encoder configs
* @encoder: encoder pointer
*/
void dpu_encoder_virt_restore(struct drm_encoder *encoder);
/**
* dpu_encoder_check_mode - check if given mode is supported or not
* @drm_enc: Pointer to drm encoder object
* @mode: Mode to be checked
* @Return: true if it is cmd mode
*/
bool dpu_encoder_check_mode(struct drm_encoder *drm_enc, u32 mode);
/**
* dpu_encoder_init - initialize virtual encoder object
* @dev: Pointer to drm device structure
* @disp_info: Pointer to display information structure
* Returns: Pointer to newly created drm encoder
*/
struct drm_encoder *dpu_encoder_init(
struct drm_device *dev,
int drm_enc_mode);
/**
* dpu_encoder_setup - setup dpu_encoder for the display probed
* @dev: Pointer to drm device structure
* @enc: Pointer to the drm_encoder
* @disp_info: Pointer to the display info
*/
int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
struct msm_display_info *disp_info);
/**
* dpu_encoder_destroy - destroy previously initialized virtual encoder
* @drm_enc: Pointer to previously created drm encoder structure
*/
void dpu_encoder_destroy(struct drm_encoder *drm_enc);
/**
* dpu_encoder_prepare_commit - prepare encoder at the very beginning of an
* atomic commit, before any registers are written
* @drm_enc: Pointer to previously created drm encoder structure
*/
void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc);
/**
* dpu_encoder_set_idle_timeout - set the idle timeout for video
* and command mode encoders.
* @drm_enc: Pointer to previously created drm encoder structure
* @idle_timeout: idle timeout duration in milliseconds
*/
void dpu_encoder_set_idle_timeout(struct drm_encoder *drm_enc,
u32 idle_timeout);
#endif /* __DPU_ENCODER_H__ */

View File

@ -0,0 +1,453 @@
/*
* Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __DPU_ENCODER_PHYS_H__
#define __DPU_ENCODER_PHYS_H__
#include <linux/jiffies.h>
#include "dpu_kms.h"
#include "dpu_hw_intf.h"
#include "dpu_hw_pingpong.h"
#include "dpu_hw_ctl.h"
#include "dpu_hw_top.h"
#include "dpu_hw_cdm.h"
#include "dpu_encoder.h"
#define DPU_ENCODER_NAME_MAX 16
/* wait for at most 2 vsync for lowest refresh rate (24hz) */
#define KICKOFF_TIMEOUT_MS 84
#define KICKOFF_TIMEOUT_JIFFIES msecs_to_jiffies(KICKOFF_TIMEOUT_MS)
/**
* enum dpu_enc_split_role - Role this physical encoder will play in a
* split-panel configuration, where one panel is master, and others slaves.
* Masters have extra responsibilities, like managing the VBLANK IRQ.
* @ENC_ROLE_SOLO: This is the one and only panel. This encoder is master.
* @ENC_ROLE_MASTER: This encoder is the master of a split panel config.
* @ENC_ROLE_SLAVE: This encoder is not the master of a split panel config.
*/
enum dpu_enc_split_role {
ENC_ROLE_SOLO,
ENC_ROLE_MASTER,
ENC_ROLE_SLAVE,
};
/**
* enum dpu_enc_enable_state - current enabled state of the physical encoder
* @DPU_ENC_DISABLING: Encoder transitioning to disable state
* Events bounding transition are encoder type specific
* @DPU_ENC_DISABLED: Encoder is disabled
* @DPU_ENC_ENABLING: Encoder transitioning to enabled
* Events bounding transition are encoder type specific
* @DPU_ENC_ENABLED: Encoder is enabled
* @DPU_ENC_ERR_NEEDS_HW_RESET: Encoder is enabled, but requires a hw_reset
* to recover from a previous error
*/
enum dpu_enc_enable_state {
DPU_ENC_DISABLING,
DPU_ENC_DISABLED,
DPU_ENC_ENABLING,
DPU_ENC_ENABLED,
DPU_ENC_ERR_NEEDS_HW_RESET
};
struct dpu_encoder_phys;
/**
* struct dpu_encoder_virt_ops - Interface the containing virtual encoder
* provides for the physical encoders to use to callback.
* @handle_vblank_virt: Notify virtual encoder of vblank IRQ reception
* Note: This is called from IRQ handler context.
* @handle_underrun_virt: Notify virtual encoder of underrun IRQ reception
* Note: This is called from IRQ handler context.
* @handle_frame_done: Notify virtual encoder that this phys encoder
* completes last request frame.
*/
struct dpu_encoder_virt_ops {
void (*handle_vblank_virt)(struct drm_encoder *,
struct dpu_encoder_phys *phys);
void (*handle_underrun_virt)(struct drm_encoder *,
struct dpu_encoder_phys *phys);
void (*handle_frame_done)(struct drm_encoder *,
struct dpu_encoder_phys *phys, u32 event);
};
/**
* struct dpu_encoder_phys_ops - Interface the physical encoders provide to
* the containing virtual encoder.
* @late_register: DRM Call. Add Userspace interfaces, debugfs.
* @prepare_commit: MSM Atomic Call, start of atomic commit sequence
* @is_master: Whether this phys_enc is the current master
* encoder. Can be switched at enable time. Based
* on split_role and current mode (CMD/VID).
* @mode_fixup: DRM Call. Fixup a DRM mode.
* @mode_set: DRM Call. Set a DRM mode.
* This likely caches the mode, for use at enable.
* @enable: DRM Call. Enable a DRM mode.
* @disable: DRM Call. Disable mode.
* @atomic_check: DRM Call. Atomic check new DRM state.
* @destroy: DRM Call. Destroy and release resources.
* @get_hw_resources: Populate the structure with the hardware
* resources that this phys_enc is using.
* Expect no overlap between phys_encs.
* @control_vblank_irq Register/Deregister for VBLANK IRQ
* @wait_for_commit_done: Wait for hardware to have flushed the
* current pending frames to hardware
* @wait_for_tx_complete: Wait for hardware to transfer the pixels
* to the panel
* @wait_for_vblank: Wait for VBLANK, for sub-driver internal use
* @prepare_for_kickoff: Do any work necessary prior to a kickoff
* For CMD encoder, may wait for previous tx done
* @handle_post_kickoff: Do any work necessary post-kickoff work
* @trigger_start: Process start event on physical encoder
* @needs_single_flush: Whether encoder slaves need to be flushed
* @setup_misr: Sets up MISR, enable and disables based on sysfs
* @collect_misr: Collects MISR data on frame update
* @hw_reset: Issue HW recovery such as CTL reset and clear
* DPU_ENC_ERR_NEEDS_HW_RESET state
* @irq_control: Handler to enable/disable all the encoder IRQs
* @prepare_idle_pc: phys encoder can update the vsync_enable status
* on idle power collapse prepare
* @restore: Restore all the encoder configs.
* @get_line_count: Obtain current vertical line count
*/
struct dpu_encoder_phys_ops {
int (*late_register)(struct dpu_encoder_phys *encoder,
struct dentry *debugfs_root);
void (*prepare_commit)(struct dpu_encoder_phys *encoder);
bool (*is_master)(struct dpu_encoder_phys *encoder);
bool (*mode_fixup)(struct dpu_encoder_phys *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void (*mode_set)(struct dpu_encoder_phys *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void (*enable)(struct dpu_encoder_phys *encoder);
void (*disable)(struct dpu_encoder_phys *encoder);
int (*atomic_check)(struct dpu_encoder_phys *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state);
void (*destroy)(struct dpu_encoder_phys *encoder);
void (*get_hw_resources)(struct dpu_encoder_phys *encoder,
struct dpu_encoder_hw_resources *hw_res,
struct drm_connector_state *conn_state);
int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable);
int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc);
int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc);
int (*wait_for_vblank)(struct dpu_encoder_phys *phys_enc);
void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc,
struct dpu_encoder_kickoff_params *params);
void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc);
void (*trigger_start)(struct dpu_encoder_phys *phys_enc);
bool (*needs_single_flush)(struct dpu_encoder_phys *phys_enc);
void (*setup_misr)(struct dpu_encoder_phys *phys_encs,
bool enable, u32 frame_count);
u32 (*collect_misr)(struct dpu_encoder_phys *phys_enc);
void (*hw_reset)(struct dpu_encoder_phys *phys_enc);
void (*irq_control)(struct dpu_encoder_phys *phys, bool enable);
void (*prepare_idle_pc)(struct dpu_encoder_phys *phys_enc);
void (*restore)(struct dpu_encoder_phys *phys);
int (*get_line_count)(struct dpu_encoder_phys *phys);
};
/**
* enum dpu_intr_idx - dpu encoder interrupt index
* @INTR_IDX_VSYNC: Vsync interrupt for video mode panel
* @INTR_IDX_PINGPONG: Pingpong done unterrupt for cmd mode panel
* @INTR_IDX_UNDERRUN: Underrun unterrupt for video and cmd mode panel
* @INTR_IDX_RDPTR: Readpointer done unterrupt for cmd mode panel
*/
enum dpu_intr_idx {
INTR_IDX_VSYNC,
INTR_IDX_PINGPONG,
INTR_IDX_UNDERRUN,
INTR_IDX_CTL_START,
INTR_IDX_RDPTR,
INTR_IDX_MAX,
};
/**
* dpu_encoder_irq - tracking structure for interrupts
* @name: string name of interrupt
* @intr_type: Encoder interrupt type
* @intr_idx: Encoder interrupt enumeration
* @hw_idx: HW Block ID
* @irq_idx: IRQ interface lookup index from DPU IRQ framework
* will be -EINVAL if IRQ is not registered
* @irq_cb: interrupt callback
*/
struct dpu_encoder_irq {
const char *name;
enum dpu_intr_type intr_type;
enum dpu_intr_idx intr_idx;
int hw_idx;
int irq_idx;
struct dpu_irq_callback cb;
};
/**
* struct dpu_encoder_phys - physical encoder that drives a single INTF block
* tied to a specific panel / sub-panel. Abstract type, sub-classed by
* phys_vid or phys_cmd for video mode or command mode encs respectively.
* @parent: Pointer to the containing virtual encoder
* @connector: If a mode is set, cached pointer to the active connector
* @ops: Operations exposed to the virtual encoder
* @parent_ops: Callbacks exposed by the parent to the phys_enc
* @hw_mdptop: Hardware interface to the top registers
* @hw_ctl: Hardware interface to the ctl registers
* @hw_cdm: Hardware interface to the cdm registers
* @cdm_cfg: Chroma-down hardware configuration
* @hw_pp: Hardware interface to the ping pong registers
* @dpu_kms: Pointer to the dpu_kms top level
* @cached_mode: DRM mode cached at mode_set time, acted on in enable
* @enabled: Whether the encoder has enabled and running a mode
* @split_role: Role to play in a split-panel configuration
* @intf_mode: Interface mode
* @intf_idx: Interface index on dpu hardware
* @topology_name: topology selected for the display
* @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
* @enable_state: Enable state tracking
* @vblank_refcount: Reference count of vblank request
* @vsync_cnt: Vsync count for the physical encoder
* @underrun_cnt: Underrun count for the physical encoder
* @pending_kickoff_cnt: Atomic counter tracking the number of kickoffs
* vs. the number of done/vblank irqs. Should hover
* between 0-2 Incremented when a new kickoff is
* scheduled. Decremented in irq handler
* @pending_ctlstart_cnt: Atomic counter tracking the number of ctl start
* pending.
* @pending_kickoff_wq: Wait queue for blocking until kickoff completes
* @irq: IRQ tracking structures
*/
struct dpu_encoder_phys {
struct drm_encoder *parent;
struct drm_connector *connector;
struct dpu_encoder_phys_ops ops;
const struct dpu_encoder_virt_ops *parent_ops;
struct dpu_hw_mdp *hw_mdptop;
struct dpu_hw_ctl *hw_ctl;
struct dpu_hw_cdm *hw_cdm;
struct dpu_hw_cdm_cfg cdm_cfg;
struct dpu_hw_pingpong *hw_pp;
struct dpu_kms *dpu_kms;
struct drm_display_mode cached_mode;
enum dpu_enc_split_role split_role;
enum dpu_intf_mode intf_mode;
enum dpu_intf intf_idx;
enum dpu_rm_topology_name topology_name;
spinlock_t *enc_spinlock;
enum dpu_enc_enable_state enable_state;
atomic_t vblank_refcount;
atomic_t vsync_cnt;
atomic_t underrun_cnt;
atomic_t pending_ctlstart_cnt;
atomic_t pending_kickoff_cnt;
wait_queue_head_t pending_kickoff_wq;
struct dpu_encoder_irq irq[INTR_IDX_MAX];
};
static inline int dpu_encoder_phys_inc_pending(struct dpu_encoder_phys *phys)
{
atomic_inc_return(&phys->pending_ctlstart_cnt);
return atomic_inc_return(&phys->pending_kickoff_cnt);
}
/**
* struct dpu_encoder_phys_vid - sub-class of dpu_encoder_phys to handle video
* mode specific operations
* @base: Baseclass physical encoder structure
* @hw_intf: Hardware interface to the intf registers
* @timing_params: Current timing parameter
*/
struct dpu_encoder_phys_vid {
struct dpu_encoder_phys base;
struct dpu_hw_intf *hw_intf;
struct intf_timing_params timing_params;
};
/**
* struct dpu_encoder_phys_cmd - sub-class of dpu_encoder_phys to handle command
* mode specific operations
* @base: Baseclass physical encoder structure
* @intf_idx: Intf Block index used by this phys encoder
* @stream_sel: Stream selection for multi-stream interfaces
* @serialize_wait4pp: serialize wait4pp feature waits for pp_done interrupt
* after ctl_start instead of before next frame kickoff
* @pp_timeout_report_cnt: number of pingpong done irq timeout errors
* @pending_vblank_cnt: Atomic counter tracking pending wait for VBLANK
* @pending_vblank_wq: Wait queue for blocking until VBLANK received
*/
struct dpu_encoder_phys_cmd {
struct dpu_encoder_phys base;
int stream_sel;
bool serialize_wait4pp;
int pp_timeout_report_cnt;
atomic_t pending_vblank_cnt;
wait_queue_head_t pending_vblank_wq;
};
/**
* struct dpu_enc_phys_init_params - initialization parameters for phys encs
* @dpu_kms: Pointer to the dpu_kms top level
* @parent: Pointer to the containing virtual encoder
* @parent_ops: Callbacks exposed by the parent to the phys_enc
* @split_role: Role to play in a split-panel configuration
* @intf_idx: Interface index this phys_enc will control
* @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
*/
struct dpu_enc_phys_init_params {
struct dpu_kms *dpu_kms;
struct drm_encoder *parent;
const struct dpu_encoder_virt_ops *parent_ops;
enum dpu_enc_split_role split_role;
enum dpu_intf intf_idx;
spinlock_t *enc_spinlock;
};
/**
* dpu_encoder_wait_info - container for passing arguments to irq wait functions
* @wq: wait queue structure
* @atomic_cnt: wait until atomic_cnt equals zero
* @timeout_ms: timeout value in milliseconds
*/
struct dpu_encoder_wait_info {
wait_queue_head_t *wq;
atomic_t *atomic_cnt;
s64 timeout_ms;
};
/**
* dpu_encoder_phys_vid_init - Construct a new video mode physical encoder
* @p: Pointer to init params structure
* Return: Error code or newly allocated encoder
*/
struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
struct dpu_enc_phys_init_params *p);
/**
* dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder
* @p: Pointer to init params structure
* Return: Error code or newly allocated encoder
*/
struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
struct dpu_enc_phys_init_params *p);
/**
* dpu_encoder_helper_trigger_start - control start helper function
* This helper function may be optionally specified by physical
* encoders if they require ctl_start triggering.
* @phys_enc: Pointer to physical encoder structure
*/
void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc);
/**
* dpu_encoder_helper_wait_event_timeout - wait for event with timeout
* taking into account that jiffies may jump between reads leading to
* incorrectly detected timeouts. Prevent failure in this scenario by
* making sure that elapsed time during wait is valid.
* @drm_id: drm object id for logging
* @hw_id: hw instance id for logging
* @info: wait info structure
*/
int dpu_encoder_helper_wait_event_timeout(
int32_t drm_id,
int32_t hw_id,
struct dpu_encoder_wait_info *info);
/**
* dpu_encoder_helper_hw_reset - issue ctl hw reset
* This helper function may be optionally specified by physical
* encoders if they require ctl hw reset. If state is currently
* DPU_ENC_ERR_NEEDS_HW_RESET, it is set back to DPU_ENC_ENABLED.
* @phys_enc: Pointer to physical encoder structure
*/
void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc);
static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
struct dpu_encoder_phys *phys_enc)
{
if (!phys_enc || phys_enc->enable_state == DPU_ENC_DISABLING)
return BLEND_3D_NONE;
if (phys_enc->split_role == ENC_ROLE_SOLO &&
phys_enc->topology_name == DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE)
return BLEND_3D_H_ROW_INT;
return BLEND_3D_NONE;
}
/**
* dpu_encoder_helper_split_config - split display configuration helper function
* This helper function may be used by physical encoders to configure
* the split display related registers.
* @phys_enc: Pointer to physical encoder structure
* @interface: enum dpu_intf setting
*/
void dpu_encoder_helper_split_config(
struct dpu_encoder_phys *phys_enc,
enum dpu_intf interface);
/**
* dpu_encoder_helper_hw_release - prepare for h/w reset during disable
* @phys_enc: Pointer to physical encoder structure
* @fb: Optional fb for specifying new mixer output resolution, may be NULL
* Return: Zero on success
*/
int dpu_encoder_helper_hw_release(struct dpu_encoder_phys *phys_enc,
struct drm_framebuffer *fb);
/**
* dpu_encoder_helper_report_irq_timeout - utility to report error that irq has
* timed out, including reporting frame error event to crtc and debug dump
* @phys_enc: Pointer to physical encoder structure
* @intr_idx: Failing interrupt index
*/
void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
enum dpu_intr_idx intr_idx);
/**
* dpu_encoder_helper_wait_for_irq - utility to wait on an irq.
* note: will call dpu_encoder_helper_wait_for_irq on timeout
* @phys_enc: Pointer to physical encoder structure
* @intr_idx: encoder interrupt index
* @wait_info: wait info struct
* @Return: 0 or -ERROR
*/
int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
enum dpu_intr_idx intr_idx,
struct dpu_encoder_wait_info *wait_info);
/**
* dpu_encoder_helper_register_irq - register and enable an irq
* @phys_enc: Pointer to physical encoder structure
* @intr_idx: encoder interrupt index
* @Return: 0 or -ERROR
*/
int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
enum dpu_intr_idx intr_idx);
/**
* dpu_encoder_helper_unregister_irq - unregister and disable an irq
* @phys_enc: Pointer to physical encoder structure
* @intr_idx: encoder interrupt index
* @Return: 0 or -ERROR
*/
int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
enum dpu_intr_idx intr_idx);
#endif /* __dpu_encoder_phys_H__ */

View File

@ -0,0 +1,905 @@
/*
* Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include "dpu_encoder_phys.h"
#include "dpu_hw_interrupts.h"
#include "dpu_core_irq.h"
#include "dpu_formats.h"
#include "dpu_trace.h"
#define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
(e) && (e)->base.parent ? \
(e)->base.parent->base.id : -1, \
(e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
#define DPU_ERROR_CMDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
(e) && (e)->base.parent ? \
(e)->base.parent->base.id : -1, \
(e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
#define to_dpu_encoder_phys_cmd(x) \
container_of(x, struct dpu_encoder_phys_cmd, base)
#define PP_TIMEOUT_MAX_TRIALS 10
/*
* Tearcheck sync start and continue thresholds are empirically found
* based on common panels In the future, may want to allow panels to override
* these default values
*/
#define DEFAULT_TEARCHECK_SYNC_THRESH_START 4
#define DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE 4
#define DPU_ENC_WR_PTR_START_TIMEOUT_US 20000
static inline int _dpu_encoder_phys_cmd_get_idle_timeout(
struct dpu_encoder_phys_cmd *cmd_enc)
{
return KICKOFF_TIMEOUT_MS;
}
static inline bool dpu_encoder_phys_cmd_is_master(
struct dpu_encoder_phys *phys_enc)
{
return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false;
}
static bool dpu_encoder_phys_cmd_mode_fixup(
struct dpu_encoder_phys *phys_enc,
const struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
if (phys_enc)
DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc), "\n");
return true;
}
static void _dpu_encoder_phys_cmd_update_intf_cfg(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
struct dpu_hw_ctl *ctl;
struct dpu_hw_intf_cfg intf_cfg = { 0 };
if (!phys_enc)
return;
ctl = phys_enc->hw_ctl;
if (!ctl || !ctl->ops.setup_intf_cfg)
return;
intf_cfg.intf = phys_enc->intf_idx;
intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_CMD;
intf_cfg.stream_sel = cmd_enc->stream_sel;
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
}
static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
{
struct dpu_encoder_phys *phys_enc = arg;
unsigned long lock_flags;
int new_cnt;
u32 event = DPU_ENCODER_FRAME_EVENT_DONE;
if (!phys_enc || !phys_enc->hw_pp)
return;
DPU_ATRACE_BEGIN("pp_done_irq");
/* notify all synchronous clients first, then asynchronous clients */
if (phys_enc->parent_ops->handle_frame_done)
phys_enc->parent_ops->handle_frame_done(phys_enc->parent,
phys_enc, event);
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
trace_dpu_enc_phys_cmd_pp_tx_done(DRMID(phys_enc->parent),
phys_enc->hw_pp->idx - PINGPONG_0,
new_cnt, event);
/* Signal any waiting atomic commit thread */
wake_up_all(&phys_enc->pending_kickoff_wq);
DPU_ATRACE_END("pp_done_irq");
}
static void dpu_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
{
struct dpu_encoder_phys *phys_enc = arg;
struct dpu_encoder_phys_cmd *cmd_enc;
if (!phys_enc || !phys_enc->hw_pp)
return;
DPU_ATRACE_BEGIN("rd_ptr_irq");
cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
if (phys_enc->parent_ops->handle_vblank_virt)
phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
phys_enc);
atomic_add_unless(&cmd_enc->pending_vblank_cnt, -1, 0);
wake_up_all(&cmd_enc->pending_vblank_wq);
DPU_ATRACE_END("rd_ptr_irq");
}
static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
{
struct dpu_encoder_phys *phys_enc = arg;
struct dpu_encoder_phys_cmd *cmd_enc;
if (!phys_enc || !phys_enc->hw_ctl)
return;
DPU_ATRACE_BEGIN("ctl_start_irq");
cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
/* Signal any waiting ctl start interrupt */
wake_up_all(&phys_enc->pending_kickoff_wq);
DPU_ATRACE_END("ctl_start_irq");
}
static void dpu_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
{
struct dpu_encoder_phys *phys_enc = arg;
if (!phys_enc)
return;
if (phys_enc->parent_ops->handle_underrun_virt)
phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
phys_enc);
}
static void _dpu_encoder_phys_cmd_setup_irq_hw_idx(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_irq *irq;
irq = &phys_enc->irq[INTR_IDX_CTL_START];
irq->hw_idx = phys_enc->hw_ctl->idx;
irq->irq_idx = -EINVAL;
irq = &phys_enc->irq[INTR_IDX_PINGPONG];
irq->hw_idx = phys_enc->hw_pp->idx;
irq->irq_idx = -EINVAL;
irq = &phys_enc->irq[INTR_IDX_RDPTR];
irq->hw_idx = phys_enc->hw_pp->idx;
irq->irq_idx = -EINVAL;
irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
irq->hw_idx = phys_enc->intf_idx;
irq->irq_idx = -EINVAL;
}
static void dpu_encoder_phys_cmd_mode_set(
struct dpu_encoder_phys *phys_enc,
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
struct dpu_rm *rm = &phys_enc->dpu_kms->rm;
struct dpu_rm_hw_iter iter;
int i, instance;
if (!phys_enc || !mode || !adj_mode) {
DPU_ERROR("invalid args\n");
return;
}
phys_enc->cached_mode = *adj_mode;
DPU_DEBUG_CMDENC(cmd_enc, "caching mode:\n");
drm_mode_debug_printmodeline(adj_mode);
instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
/* Retrieve previously allocated HW Resources. Shouldn't fail */
dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_CTL);
for (i = 0; i <= instance; i++) {
if (dpu_rm_get_hw(rm, &iter))
phys_enc->hw_ctl = (struct dpu_hw_ctl *)iter.hw;
}
if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
DPU_ERROR_CMDENC(cmd_enc, "failed to init ctl: %ld\n",
PTR_ERR(phys_enc->hw_ctl));
phys_enc->hw_ctl = NULL;
return;
}
_dpu_encoder_phys_cmd_setup_irq_hw_idx(phys_enc);
}
static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR;
bool do_log = false;
if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_ctl)
return -EINVAL;
cmd_enc->pp_timeout_report_cnt++;
if (cmd_enc->pp_timeout_report_cnt == PP_TIMEOUT_MAX_TRIALS) {
frame_event |= DPU_ENCODER_FRAME_EVENT_PANEL_DEAD;
do_log = true;
} else if (cmd_enc->pp_timeout_report_cnt == 1) {
do_log = true;
}
trace_dpu_enc_phys_cmd_pdone_timeout(DRMID(phys_enc->parent),
phys_enc->hw_pp->idx - PINGPONG_0,
cmd_enc->pp_timeout_report_cnt,
atomic_read(&phys_enc->pending_kickoff_cnt),
frame_event);
/* to avoid flooding, only log first time, and "dead" time */
if (do_log) {
DRM_ERROR("id:%d pp:%d kickoff timeout %d cnt %d koff_cnt %d\n",
DRMID(phys_enc->parent),
phys_enc->hw_pp->idx - PINGPONG_0,
phys_enc->hw_ctl->idx - CTL_0,
cmd_enc->pp_timeout_report_cnt,
atomic_read(&phys_enc->pending_kickoff_cnt));
dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_RDPTR);
dpu_dbg_dump(false, __func__, true, true);
}
atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
/* request a ctl reset before the next kickoff */
phys_enc->enable_state = DPU_ENC_ERR_NEEDS_HW_RESET;
if (phys_enc->parent_ops->handle_frame_done)
phys_enc->parent_ops->handle_frame_done(
phys_enc->parent, phys_enc, frame_event);
return -ETIMEDOUT;
}
static int _dpu_encoder_phys_cmd_wait_for_idle(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
struct dpu_encoder_wait_info wait_info;
int ret;
if (!phys_enc) {
DPU_ERROR("invalid encoder\n");
return -EINVAL;
}
wait_info.wq = &phys_enc->pending_kickoff_wq;
wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_PINGPONG,
&wait_info);
if (ret == -ETIMEDOUT)
_dpu_encoder_phys_cmd_handle_ppdone_timeout(phys_enc);
else if (!ret)
cmd_enc->pp_timeout_report_cnt = 0;
return ret;
}
static int dpu_encoder_phys_cmd_control_vblank_irq(
struct dpu_encoder_phys *phys_enc,
bool enable)
{
int ret = 0;
int refcount;
if (!phys_enc || !phys_enc->hw_pp) {
DPU_ERROR("invalid encoder\n");
return -EINVAL;
}
refcount = atomic_read(&phys_enc->vblank_refcount);
/* Slave encoders don't report vblank */
if (!dpu_encoder_phys_cmd_is_master(phys_enc))
goto end;
/* protect against negative */
if (!enable && refcount == 0) {
ret = -EINVAL;
goto end;
}
DRM_DEBUG_KMS("id:%u pp:%d enable=%s/%d\n", DRMID(phys_enc->parent),
phys_enc->hw_pp->idx - PINGPONG_0,
enable ? "true" : "false", refcount);
if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
ret = dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_RDPTR);
else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
ret = dpu_encoder_helper_unregister_irq(phys_enc,
INTR_IDX_RDPTR);
end:
if (ret) {
DRM_ERROR("vblank irq err id:%u pp:%d ret:%d, enable %s/%d\n",
DRMID(phys_enc->parent),
phys_enc->hw_pp->idx - PINGPONG_0, ret,
enable ? "true" : "false", refcount);
}
return ret;
}
void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc,
bool enable)
{
struct dpu_encoder_phys_cmd *cmd_enc;
if (!phys_enc)
return;
cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent),
phys_enc->hw_pp->idx - PINGPONG_0,
enable, atomic_read(&phys_enc->vblank_refcount));
if (enable) {
dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_PINGPONG);
dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
if (dpu_encoder_phys_cmd_is_master(phys_enc))
dpu_encoder_helper_register_irq(phys_enc,
INTR_IDX_CTL_START);
} else {
if (dpu_encoder_phys_cmd_is_master(phys_enc))
dpu_encoder_helper_unregister_irq(phys_enc,
INTR_IDX_CTL_START);
dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_PINGPONG);
}
}
static void dpu_encoder_phys_cmd_tearcheck_config(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
struct dpu_hw_tear_check tc_cfg = { 0 };
struct drm_display_mode *mode;
bool tc_enable = true;
u32 vsync_hz;
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
if (!phys_enc || !phys_enc->hw_pp) {
DPU_ERROR("invalid encoder\n");
return;
}
mode = &phys_enc->cached_mode;
DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
if (!phys_enc->hw_pp->ops.setup_tearcheck ||
!phys_enc->hw_pp->ops.enable_tearcheck) {
DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
return;
}
dpu_kms = phys_enc->dpu_kms;
if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev_private) {
DPU_ERROR("invalid device\n");
return;
}
priv = dpu_kms->dev->dev_private;
/*
* TE default: dsi byte clock calculated base on 70 fps;
* around 14 ms to complete a kickoff cycle if te disabled;
* vclk_line base on 60 fps; write is faster than read;
* init == start == rdptr;
*
* vsync_count is ratio of MDP VSYNC clock frequency to LCD panel
* frequency divided by the no. of rows (lines) in the LCDpanel.
*/
vsync_hz = dpu_kms_get_clk_rate(dpu_kms, "vsync_clk");
if (vsync_hz <= 0) {
DPU_DEBUG_CMDENC(cmd_enc, "invalid - vsync_hz %u\n",
vsync_hz);
return;
}
tc_cfg.vsync_count = vsync_hz / (mode->vtotal * mode->vrefresh);
/* enable external TE after kickoff to avoid premature autorefresh */
tc_cfg.hw_vsync_mode = 0;
/*
* By setting sync_cfg_height to near max register value, we essentially
* disable dpu hw generated TE signal, since hw TE will arrive first.
* Only caveat is if due to error, we hit wrap-around.
*/
tc_cfg.sync_cfg_height = 0xFFF0;
tc_cfg.vsync_init_val = mode->vdisplay;
tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START;
tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
tc_cfg.start_pos = mode->vdisplay;
tc_cfg.rd_ptr_irq = mode->vdisplay + 1;
DPU_DEBUG_CMDENC(cmd_enc,
"tc %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n",
phys_enc->hw_pp->idx - PINGPONG_0, vsync_hz,
mode->vtotal, mode->vrefresh);
DPU_DEBUG_CMDENC(cmd_enc,
"tc %d enable %u start_pos %u rd_ptr_irq %u\n",
phys_enc->hw_pp->idx - PINGPONG_0, tc_enable, tc_cfg.start_pos,
tc_cfg.rd_ptr_irq);
DPU_DEBUG_CMDENC(cmd_enc,
"tc %d hw_vsync_mode %u vsync_count %u vsync_init_val %u\n",
phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.hw_vsync_mode,
tc_cfg.vsync_count, tc_cfg.vsync_init_val);
DPU_DEBUG_CMDENC(cmd_enc,
"tc %d cfgheight %u thresh_start %u thresh_cont %u\n",
phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.sync_cfg_height,
tc_cfg.sync_threshold_start, tc_cfg.sync_threshold_continue);
phys_enc->hw_pp->ops.setup_tearcheck(phys_enc->hw_pp, &tc_cfg);
phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, tc_enable);
}
static void _dpu_encoder_phys_cmd_pingpong_config(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp
|| !phys_enc->hw_ctl->ops.setup_intf_cfg) {
DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != 0);
return;
}
DPU_DEBUG_CMDENC(cmd_enc, "pp %d, enabling mode:\n",
phys_enc->hw_pp->idx - PINGPONG_0);
drm_mode_debug_printmodeline(&phys_enc->cached_mode);
_dpu_encoder_phys_cmd_update_intf_cfg(phys_enc);
dpu_encoder_phys_cmd_tearcheck_config(phys_enc);
}
static bool dpu_encoder_phys_cmd_needs_single_flush(
struct dpu_encoder_phys *phys_enc)
{
/**
* we do separate flush for each CTL and let
* CTL_START synchronize them
*/
return false;
}
static void dpu_encoder_phys_cmd_enable_helper(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_ctl *ctl;
u32 flush_mask = 0;
if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp) {
DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
return;
}
dpu_encoder_helper_split_config(phys_enc, phys_enc->intf_idx);
_dpu_encoder_phys_cmd_pingpong_config(phys_enc);
if (!dpu_encoder_phys_cmd_is_master(phys_enc))
goto skip_flush;
ctl = phys_enc->hw_ctl;
ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->intf_idx);
ctl->ops.update_pending_flush(ctl, flush_mask);
skip_flush:
return;
}
static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
if (!phys_enc || !phys_enc->hw_pp) {
DPU_ERROR("invalid phys encoder\n");
return;
}
DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
if (phys_enc->enable_state == DPU_ENC_ENABLED) {
DPU_ERROR("already enabled\n");
return;
}
dpu_encoder_phys_cmd_enable_helper(phys_enc);
phys_enc->enable_state = DPU_ENC_ENABLED;
}
static void _dpu_encoder_phys_cmd_connect_te(
struct dpu_encoder_phys *phys_enc, bool enable)
{
if (!phys_enc || !phys_enc->hw_pp ||
!phys_enc->hw_pp->ops.connect_external_te)
return;
trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
phys_enc->hw_pp->ops.connect_external_te(phys_enc->hw_pp, enable);
}
static void dpu_encoder_phys_cmd_prepare_idle_pc(
struct dpu_encoder_phys *phys_enc)
{
_dpu_encoder_phys_cmd_connect_te(phys_enc, false);
}
static int dpu_encoder_phys_cmd_get_line_count(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_pingpong *hw_pp;
if (!phys_enc || !phys_enc->hw_pp)
return -EINVAL;
if (!dpu_encoder_phys_cmd_is_master(phys_enc))
return -EINVAL;
hw_pp = phys_enc->hw_pp;
if (!hw_pp->ops.get_line_count)
return -EINVAL;
return hw_pp->ops.get_line_count(hw_pp);
}
static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
if (!phys_enc || !phys_enc->hw_pp) {
DPU_ERROR("invalid encoder\n");
return;
}
DRM_DEBUG_KMS("id:%u pp:%d state:%d\n", DRMID(phys_enc->parent),
phys_enc->hw_pp->idx - PINGPONG_0,
phys_enc->enable_state);
if (phys_enc->enable_state == DPU_ENC_DISABLED) {
DPU_ERROR_CMDENC(cmd_enc, "already disabled\n");
return;
}
if (phys_enc->hw_pp->ops.enable_tearcheck)
phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, false);
phys_enc->enable_state = DPU_ENC_DISABLED;
}
static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
if (!phys_enc) {
DPU_ERROR("invalid encoder\n");
return;
}
kfree(cmd_enc);
}
static void dpu_encoder_phys_cmd_get_hw_resources(
struct dpu_encoder_phys *phys_enc,
struct dpu_encoder_hw_resources *hw_res,
struct drm_connector_state *conn_state)
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
if (!phys_enc) {
DPU_ERROR("invalid encoder\n");
return;
}
if ((phys_enc->intf_idx - INTF_0) >= INTF_MAX) {
DPU_ERROR("invalid intf idx:%d\n", phys_enc->intf_idx);
return;
}
DPU_DEBUG_CMDENC(cmd_enc, "\n");
hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_CMD;
}
static void dpu_encoder_phys_cmd_prepare_for_kickoff(
struct dpu_encoder_phys *phys_enc,
struct dpu_encoder_kickoff_params *params)
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
int ret;
if (!phys_enc || !phys_enc->hw_pp) {
DPU_ERROR("invalid encoder\n");
return;
}
DRM_DEBUG_KMS("id:%u pp:%d pending_cnt:%d\n", DRMID(phys_enc->parent),
phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(&phys_enc->pending_kickoff_cnt));
/*
* Mark kickoff request as outstanding. If there are more than one,
* outstanding, then we have to wait for the previous one to complete
*/
ret = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
if (ret) {
/* force pending_kickoff_cnt 0 to discard failed kickoff */
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
DRM_ERROR("failed wait_for_idle: id:%u ret:%d pp:%d\n",
DRMID(phys_enc->parent), ret,
phys_enc->hw_pp->idx - PINGPONG_0);
}
DPU_DEBUG_CMDENC(cmd_enc, "pp:%d pending_cnt %d\n",
phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(&phys_enc->pending_kickoff_cnt));
}
static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
struct dpu_encoder_wait_info wait_info;
int ret;
if (!phys_enc || !phys_enc->hw_ctl) {
DPU_ERROR("invalid argument(s)\n");
return -EINVAL;
}
wait_info.wq = &phys_enc->pending_kickoff_wq;
wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_CTL_START,
&wait_info);
if (ret == -ETIMEDOUT) {
DPU_ERROR_CMDENC(cmd_enc, "ctl start interrupt wait failed\n");
ret = -EINVAL;
} else if (!ret)
ret = 0;
return ret;
}
static int dpu_encoder_phys_cmd_wait_for_tx_complete(
struct dpu_encoder_phys *phys_enc)
{
int rc;
struct dpu_encoder_phys_cmd *cmd_enc;
if (!phys_enc)
return -EINVAL;
cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
if (rc) {
DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n",
DRMID(phys_enc->parent), rc,
phys_enc->intf_idx - INTF_0);
}
return rc;
}
static int dpu_encoder_phys_cmd_wait_for_commit_done(
struct dpu_encoder_phys *phys_enc)
{
int rc = 0;
struct dpu_encoder_phys_cmd *cmd_enc;
if (!phys_enc)
return -EINVAL;
cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
/* only required for master controller */
if (dpu_encoder_phys_cmd_is_master(phys_enc))
rc = _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
/* required for both controllers */
if (!rc && cmd_enc->serialize_wait4pp)
dpu_encoder_phys_cmd_prepare_for_kickoff(phys_enc, NULL);
return rc;
}
static int dpu_encoder_phys_cmd_wait_for_vblank(
struct dpu_encoder_phys *phys_enc)
{
int rc = 0;
struct dpu_encoder_phys_cmd *cmd_enc;
struct dpu_encoder_wait_info wait_info;
if (!phys_enc)
return -EINVAL;
cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
/* only required for master controller */
if (!dpu_encoder_phys_cmd_is_master(phys_enc))
return rc;
wait_info.wq = &cmd_enc->pending_vblank_wq;
wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt;
wait_info.timeout_ms = _dpu_encoder_phys_cmd_get_idle_timeout(cmd_enc);
atomic_inc(&cmd_enc->pending_vblank_cnt);
rc = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_RDPTR,
&wait_info);
return rc;
}
static void dpu_encoder_phys_cmd_handle_post_kickoff(
struct dpu_encoder_phys *phys_enc)
{
if (!phys_enc)
return;
/**
* re-enable external TE, either for the first time after enabling
* or if disabled for Autorefresh
*/
_dpu_encoder_phys_cmd_connect_te(phys_enc, true);
}
static void dpu_encoder_phys_cmd_trigger_start(
struct dpu_encoder_phys *phys_enc)
{
if (!phys_enc)
return;
dpu_encoder_helper_trigger_start(phys_enc);
}
static void dpu_encoder_phys_cmd_init_ops(
struct dpu_encoder_phys_ops *ops)
{
ops->is_master = dpu_encoder_phys_cmd_is_master;
ops->mode_set = dpu_encoder_phys_cmd_mode_set;
ops->mode_fixup = dpu_encoder_phys_cmd_mode_fixup;
ops->enable = dpu_encoder_phys_cmd_enable;
ops->disable = dpu_encoder_phys_cmd_disable;
ops->destroy = dpu_encoder_phys_cmd_destroy;
ops->get_hw_resources = dpu_encoder_phys_cmd_get_hw_resources;
ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq;
ops->wait_for_commit_done = dpu_encoder_phys_cmd_wait_for_commit_done;
ops->prepare_for_kickoff = dpu_encoder_phys_cmd_prepare_for_kickoff;
ops->wait_for_tx_complete = dpu_encoder_phys_cmd_wait_for_tx_complete;
ops->wait_for_vblank = dpu_encoder_phys_cmd_wait_for_vblank;
ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
ops->hw_reset = dpu_encoder_helper_hw_reset;
ops->irq_control = dpu_encoder_phys_cmd_irq_control;
ops->restore = dpu_encoder_phys_cmd_enable_helper;
ops->prepare_idle_pc = dpu_encoder_phys_cmd_prepare_idle_pc;
ops->handle_post_kickoff = dpu_encoder_phys_cmd_handle_post_kickoff;
ops->get_line_count = dpu_encoder_phys_cmd_get_line_count;
}
struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
struct dpu_enc_phys_init_params *p)
{
struct dpu_encoder_phys *phys_enc = NULL;
struct dpu_encoder_phys_cmd *cmd_enc = NULL;
struct dpu_hw_mdp *hw_mdp;
struct dpu_encoder_irq *irq;
int i, ret = 0;
DPU_DEBUG("intf %d\n", p->intf_idx - INTF_0);
cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL);
if (!cmd_enc) {
ret = -ENOMEM;
DPU_ERROR("failed to allocate\n");
goto fail;
}
phys_enc = &cmd_enc->base;
hw_mdp = dpu_rm_get_mdp(&p->dpu_kms->rm);
if (IS_ERR_OR_NULL(hw_mdp)) {
ret = PTR_ERR(hw_mdp);
DPU_ERROR("failed to get mdptop\n");
goto fail_mdp_init;
}
phys_enc->hw_mdptop = hw_mdp;
phys_enc->intf_idx = p->intf_idx;
dpu_encoder_phys_cmd_init_ops(&phys_enc->ops);
phys_enc->parent = p->parent;
phys_enc->parent_ops = p->parent_ops;
phys_enc->dpu_kms = p->dpu_kms;
phys_enc->split_role = p->split_role;
phys_enc->intf_mode = INTF_MODE_CMD;
phys_enc->enc_spinlock = p->enc_spinlock;
cmd_enc->stream_sel = 0;
phys_enc->enable_state = DPU_ENC_DISABLED;
for (i = 0; i < INTR_IDX_MAX; i++) {
irq = &phys_enc->irq[i];
INIT_LIST_HEAD(&irq->cb.list);
irq->irq_idx = -EINVAL;
irq->hw_idx = -EINVAL;
irq->cb.arg = phys_enc;
}
irq = &phys_enc->irq[INTR_IDX_CTL_START];
irq->name = "ctl_start";
irq->intr_type = DPU_IRQ_TYPE_CTL_START;
irq->intr_idx = INTR_IDX_CTL_START;
irq->cb.func = dpu_encoder_phys_cmd_ctl_start_irq;
irq = &phys_enc->irq[INTR_IDX_PINGPONG];
irq->name = "pp_done";
irq->intr_type = DPU_IRQ_TYPE_PING_PONG_COMP;
irq->intr_idx = INTR_IDX_PINGPONG;
irq->cb.func = dpu_encoder_phys_cmd_pp_tx_done_irq;
irq = &phys_enc->irq[INTR_IDX_RDPTR];
irq->name = "pp_rd_ptr";
irq->intr_type = DPU_IRQ_TYPE_PING_PONG_RD_PTR;
irq->intr_idx = INTR_IDX_RDPTR;
irq->cb.func = dpu_encoder_phys_cmd_pp_rd_ptr_irq;
irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
irq->name = "underrun";
irq->intr_type = DPU_IRQ_TYPE_INTF_UNDER_RUN;
irq->intr_idx = INTR_IDX_UNDERRUN;
irq->cb.func = dpu_encoder_phys_cmd_underrun_irq;
atomic_set(&phys_enc->vblank_refcount, 0);
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
atomic_set(&cmd_enc->pending_vblank_cnt, 0);
init_waitqueue_head(&phys_enc->pending_kickoff_wq);
init_waitqueue_head(&cmd_enc->pending_vblank_wq);
DPU_DEBUG_CMDENC(cmd_enc, "created\n");
return phys_enc;
fail_mdp_init:
kfree(cmd_enc);
fail:
return ERR_PTR(ret);
}

View File

@ -0,0 +1,922 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include "dpu_encoder_phys.h"
#include "dpu_hw_interrupts.h"
#include "dpu_core_irq.h"
#include "dpu_formats.h"
#include "dpu_trace.h"
#define DPU_DEBUG_VIDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
(e) && (e)->base.parent ? \
(e)->base.parent->base.id : -1, \
(e) && (e)->hw_intf ? \
(e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
#define DPU_ERROR_VIDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
(e) && (e)->base.parent ? \
(e)->base.parent->base.id : -1, \
(e) && (e)->hw_intf ? \
(e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
#define to_dpu_encoder_phys_vid(x) \
container_of(x, struct dpu_encoder_phys_vid, base)
static bool dpu_encoder_phys_vid_is_master(
struct dpu_encoder_phys *phys_enc)
{
bool ret = false;
if (phys_enc->split_role != ENC_ROLE_SLAVE)
ret = true;
return ret;
}
static void drm_mode_to_intf_timing_params(
const struct dpu_encoder_phys_vid *vid_enc,
const struct drm_display_mode *mode,
struct intf_timing_params *timing)
{
memset(timing, 0, sizeof(*timing));
if ((mode->htotal < mode->hsync_end)
|| (mode->hsync_start < mode->hdisplay)
|| (mode->vtotal < mode->vsync_end)
|| (mode->vsync_start < mode->vdisplay)
|| (mode->hsync_end < mode->hsync_start)
|| (mode->vsync_end < mode->vsync_start)) {
DPU_ERROR(
"invalid params - hstart:%d,hend:%d,htot:%d,hdisplay:%d\n",
mode->hsync_start, mode->hsync_end,
mode->htotal, mode->hdisplay);
DPU_ERROR("vstart:%d,vend:%d,vtot:%d,vdisplay:%d\n",
mode->vsync_start, mode->vsync_end,
mode->vtotal, mode->vdisplay);
return;
}
/*
* https://www.kernel.org/doc/htmldocs/drm/ch02s05.html
* Active Region Front Porch Sync Back Porch
* <-----------------><------------><-----><----------->
* <- [hv]display --->
* <--------- [hv]sync_start ------>
* <----------------- [hv]sync_end ------->
* <---------------------------- [hv]total ------------->
*/
timing->width = mode->hdisplay; /* active width */
timing->height = mode->vdisplay; /* active height */
timing->xres = timing->width;
timing->yres = timing->height;
timing->h_back_porch = mode->htotal - mode->hsync_end;
timing->h_front_porch = mode->hsync_start - mode->hdisplay;
timing->v_back_porch = mode->vtotal - mode->vsync_end;
timing->v_front_porch = mode->vsync_start - mode->vdisplay;
timing->hsync_pulse_width = mode->hsync_end - mode->hsync_start;
timing->vsync_pulse_width = mode->vsync_end - mode->vsync_start;
timing->hsync_polarity = (mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0;
timing->vsync_polarity = (mode->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
timing->border_clr = 0;
timing->underflow_clr = 0xff;
timing->hsync_skew = mode->hskew;
/* DSI controller cannot handle active-low sync signals. */
if (vid_enc->hw_intf->cap->type == INTF_DSI) {
timing->hsync_polarity = 0;
timing->vsync_polarity = 0;
}
/*
* For edp only:
* DISPLAY_V_START = (VBP * HCYCLE) + HBP
* DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
*/
/*
* if (vid_enc->hw->cap->type == INTF_EDP) {
* display_v_start += mode->htotal - mode->hsync_start;
* display_v_end -= mode->hsync_start - mode->hdisplay;
* }
*/
}
static inline u32 get_horizontal_total(const struct intf_timing_params *timing)
{
u32 active = timing->xres;
u32 inactive =
timing->h_back_porch + timing->h_front_porch +
timing->hsync_pulse_width;
return active + inactive;
}
static inline u32 get_vertical_total(const struct intf_timing_params *timing)
{
u32 active = timing->yres;
u32 inactive =
timing->v_back_porch + timing->v_front_porch +
timing->vsync_pulse_width;
return active + inactive;
}
/*
* programmable_fetch_get_num_lines:
* Number of fetch lines in vertical front porch
* @timing: Pointer to the intf timing information for the requested mode
*
* Returns the number of fetch lines in vertical front porch at which mdp
* can start fetching the next frame.
*
* Number of needed prefetch lines is anything that cannot be absorbed in the
* start of frame time (back porch + vsync pulse width).
*
* Some panels have very large VFP, however we only need a total number of
* lines based on the chip worst case latencies.
*/
static u32 programmable_fetch_get_num_lines(
struct dpu_encoder_phys_vid *vid_enc,
const struct intf_timing_params *timing)
{
u32 worst_case_needed_lines =
vid_enc->hw_intf->cap->prog_fetch_lines_worst_case;
u32 start_of_frame_lines =
timing->v_back_porch + timing->vsync_pulse_width;
u32 needed_vfp_lines = worst_case_needed_lines - start_of_frame_lines;
u32 actual_vfp_lines = 0;
/* Fetch must be outside active lines, otherwise undefined. */
if (start_of_frame_lines >= worst_case_needed_lines) {
DPU_DEBUG_VIDENC(vid_enc,
"prog fetch is not needed, large vbp+vsw\n");
actual_vfp_lines = 0;
} else if (timing->v_front_porch < needed_vfp_lines) {
/* Warn fetch needed, but not enough porch in panel config */
pr_warn_once
("low vbp+vfp may lead to perf issues in some cases\n");
DPU_DEBUG_VIDENC(vid_enc,
"less vfp than fetch req, using entire vfp\n");
actual_vfp_lines = timing->v_front_porch;
} else {
DPU_DEBUG_VIDENC(vid_enc, "room in vfp for needed prefetch\n");
actual_vfp_lines = needed_vfp_lines;
}
DPU_DEBUG_VIDENC(vid_enc,
"v_front_porch %u v_back_porch %u vsync_pulse_width %u\n",
timing->v_front_porch, timing->v_back_porch,
timing->vsync_pulse_width);
DPU_DEBUG_VIDENC(vid_enc,
"wc_lines %u needed_vfp_lines %u actual_vfp_lines %u\n",
worst_case_needed_lines, needed_vfp_lines, actual_vfp_lines);
return actual_vfp_lines;
}
/*
* programmable_fetch_config: Programs HW to prefetch lines by offsetting
* the start of fetch into the vertical front porch for cases where the
* vsync pulse width and vertical back porch time is insufficient
*
* Gets # of lines to pre-fetch, then calculate VSYNC counter value.
* HW layer requires VSYNC counter of first pixel of tgt VFP line.
*
* @timing: Pointer to the intf timing information for the requested mode
*/
static void programmable_fetch_config(struct dpu_encoder_phys *phys_enc,
const struct intf_timing_params *timing)
{
struct dpu_encoder_phys_vid *vid_enc =
to_dpu_encoder_phys_vid(phys_enc);
struct intf_prog_fetch f = { 0 };
u32 vfp_fetch_lines = 0;
u32 horiz_total = 0;
u32 vert_total = 0;
u32 vfp_fetch_start_vsync_counter = 0;
unsigned long lock_flags;
if (WARN_ON_ONCE(!vid_enc->hw_intf->ops.setup_prg_fetch))
return;
vfp_fetch_lines = programmable_fetch_get_num_lines(vid_enc, timing);
if (vfp_fetch_lines) {
vert_total = get_vertical_total(timing);
horiz_total = get_horizontal_total(timing);
vfp_fetch_start_vsync_counter =
(vert_total - vfp_fetch_lines) * horiz_total + 1;
f.enable = 1;
f.fetch_start = vfp_fetch_start_vsync_counter;
}
DPU_DEBUG_VIDENC(vid_enc,
"vfp_fetch_lines %u vfp_fetch_start_vsync_counter %u\n",
vfp_fetch_lines, vfp_fetch_start_vsync_counter);
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
vid_enc->hw_intf->ops.setup_prg_fetch(vid_enc->hw_intf, &f);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
}
static bool dpu_encoder_phys_vid_mode_fixup(
struct dpu_encoder_phys *phys_enc,
const struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
if (phys_enc)
DPU_DEBUG_VIDENC(to_dpu_encoder_phys_vid(phys_enc), "\n");
/*
* Modifying mode has consequences when the mode comes back to us
*/
return true;
}
static void dpu_encoder_phys_vid_setup_timing_engine(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_vid *vid_enc;
struct drm_display_mode mode;
struct intf_timing_params timing_params = { 0 };
const struct dpu_format *fmt = NULL;
u32 fmt_fourcc = DRM_FORMAT_RGB888;
unsigned long lock_flags;
struct dpu_hw_intf_cfg intf_cfg = { 0 };
if (!phys_enc || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
DPU_ERROR("invalid encoder %d\n", phys_enc != 0);
return;
}
mode = phys_enc->cached_mode;
vid_enc = to_dpu_encoder_phys_vid(phys_enc);
if (!vid_enc->hw_intf->ops.setup_timing_gen) {
DPU_ERROR("timing engine setup is not supported\n");
return;
}
DPU_DEBUG_VIDENC(vid_enc, "enabling mode:\n");
drm_mode_debug_printmodeline(&mode);
if (phys_enc->split_role != ENC_ROLE_SOLO) {
mode.hdisplay >>= 1;
mode.htotal >>= 1;
mode.hsync_start >>= 1;
mode.hsync_end >>= 1;
DPU_DEBUG_VIDENC(vid_enc,
"split_role %d, halve horizontal %d %d %d %d\n",
phys_enc->split_role,
mode.hdisplay, mode.htotal,
mode.hsync_start, mode.hsync_end);
}
drm_mode_to_intf_timing_params(vid_enc, &mode, &timing_params);
fmt = dpu_get_dpu_format(fmt_fourcc);
DPU_DEBUG_VIDENC(vid_enc, "fmt_fourcc 0x%X\n", fmt_fourcc);
intf_cfg.intf = vid_enc->hw_intf->idx;
intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_VID;
intf_cfg.stream_sel = 0; /* Don't care value for video mode */
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
vid_enc->hw_intf->ops.setup_timing_gen(vid_enc->hw_intf,
&timing_params, fmt);
phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
programmable_fetch_config(phys_enc, &timing_params);
vid_enc->timing_params = timing_params;
}
static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
{
struct dpu_encoder_phys *phys_enc = arg;
struct dpu_hw_ctl *hw_ctl;
unsigned long lock_flags;
u32 flush_register = 0;
int new_cnt = -1, old_cnt = -1;
if (!phys_enc)
return;
hw_ctl = phys_enc->hw_ctl;
if (!hw_ctl)
return;
DPU_ATRACE_BEGIN("vblank_irq");
if (phys_enc->parent_ops->handle_vblank_virt)
phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
phys_enc);
old_cnt = atomic_read(&phys_enc->pending_kickoff_cnt);
/*
* only decrement the pending flush count if we've actually flushed
* hardware. due to sw irq latency, vblank may have already happened
* so we need to double-check with hw that it accepted the flush bits
*/
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
if (hw_ctl && hw_ctl->ops.get_flush_register)
flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
if (flush_register == 0)
new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
-1, 0);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
/* Signal any waiting atomic commit thread */
wake_up_all(&phys_enc->pending_kickoff_wq);
DPU_ATRACE_END("vblank_irq");
}
static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
{
struct dpu_encoder_phys *phys_enc = arg;
if (!phys_enc)
return;
if (phys_enc->parent_ops->handle_underrun_virt)
phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
phys_enc);
}
static bool _dpu_encoder_phys_is_dual_ctl(struct dpu_encoder_phys *phys_enc)
{
if (!phys_enc)
return false;
if (phys_enc->topology_name == DPU_RM_TOPOLOGY_DUALPIPE)
return true;
return false;
}
static bool dpu_encoder_phys_vid_needs_single_flush(
struct dpu_encoder_phys *phys_enc)
{
return (phys_enc && _dpu_encoder_phys_is_dual_ctl(phys_enc));
}
static void _dpu_encoder_phys_vid_setup_irq_hw_idx(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_irq *irq;
/*
* Initialize irq->hw_idx only when irq is not registered.
* Prevent invalidating irq->irq_idx as modeset may be
* called many times during dfps.
*/
irq = &phys_enc->irq[INTR_IDX_VSYNC];
if (irq->irq_idx < 0)
irq->hw_idx = phys_enc->intf_idx;
irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
if (irq->irq_idx < 0)
irq->hw_idx = phys_enc->intf_idx;
}
static void dpu_encoder_phys_vid_mode_set(
struct dpu_encoder_phys *phys_enc,
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
struct dpu_rm *rm;
struct dpu_rm_hw_iter iter;
int i, instance;
struct dpu_encoder_phys_vid *vid_enc;
if (!phys_enc || !phys_enc->dpu_kms) {
DPU_ERROR("invalid encoder/kms\n");
return;
}
rm = &phys_enc->dpu_kms->rm;
vid_enc = to_dpu_encoder_phys_vid(phys_enc);
if (adj_mode) {
phys_enc->cached_mode = *adj_mode;
drm_mode_debug_printmodeline(adj_mode);
DPU_DEBUG_VIDENC(vid_enc, "caching mode:\n");
}
instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
/* Retrieve previously allocated HW Resources. Shouldn't fail */
dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_CTL);
for (i = 0; i <= instance; i++) {
if (dpu_rm_get_hw(rm, &iter))
phys_enc->hw_ctl = (struct dpu_hw_ctl *)iter.hw;
}
if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
DPU_ERROR_VIDENC(vid_enc, "failed to init ctl, %ld\n",
PTR_ERR(phys_enc->hw_ctl));
phys_enc->hw_ctl = NULL;
return;
}
_dpu_encoder_phys_vid_setup_irq_hw_idx(phys_enc);
}
static int dpu_encoder_phys_vid_control_vblank_irq(
struct dpu_encoder_phys *phys_enc,
bool enable)
{
int ret = 0;
struct dpu_encoder_phys_vid *vid_enc;
int refcount;
if (!phys_enc) {
DPU_ERROR("invalid encoder\n");
return -EINVAL;
}
refcount = atomic_read(&phys_enc->vblank_refcount);
vid_enc = to_dpu_encoder_phys_vid(phys_enc);
/* Slave encoders don't report vblank */
if (!dpu_encoder_phys_vid_is_master(phys_enc))
goto end;
/* protect against negative */
if (!enable && refcount == 0) {
ret = -EINVAL;
goto end;
}
DRM_DEBUG_KMS("id:%u enable=%d/%d\n", DRMID(phys_enc->parent), enable,
atomic_read(&phys_enc->vblank_refcount));
if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
ret = dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_VSYNC);
else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
ret = dpu_encoder_helper_unregister_irq(phys_enc,
INTR_IDX_VSYNC);
end:
if (ret) {
DRM_ERROR("failed: id:%u intf:%d ret:%d enable:%d refcnt:%d\n",
DRMID(phys_enc->parent),
vid_enc->hw_intf->idx - INTF_0, ret, enable,
refcount);
}
return ret;
}
static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
{
struct msm_drm_private *priv;
struct dpu_encoder_phys_vid *vid_enc;
struct dpu_hw_intf *intf;
struct dpu_hw_ctl *ctl;
u32 flush_mask = 0;
if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
!phys_enc->parent->dev->dev_private) {
DPU_ERROR("invalid encoder/device\n");
return;
}
priv = phys_enc->parent->dev->dev_private;
vid_enc = to_dpu_encoder_phys_vid(phys_enc);
intf = vid_enc->hw_intf;
ctl = phys_enc->hw_ctl;
if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
return;
}
DPU_DEBUG_VIDENC(vid_enc, "\n");
if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
return;
dpu_encoder_helper_split_config(phys_enc, vid_enc->hw_intf->idx);
dpu_encoder_phys_vid_setup_timing_engine(phys_enc);
/*
* For single flush cases (dual-ctl or pp-split), skip setting the
* flush bit for the slave intf, since both intfs use same ctl
* and HW will only flush the master.
*/
if (dpu_encoder_phys_vid_needs_single_flush(phys_enc) &&
!dpu_encoder_phys_vid_is_master(phys_enc))
goto skip_flush;
ctl->ops.get_bitmask_intf(ctl, &flush_mask, intf->idx);
ctl->ops.update_pending_flush(ctl, flush_mask);
skip_flush:
DPU_DEBUG_VIDENC(vid_enc, "update pending flush ctl %d flush_mask %x\n",
ctl->idx - CTL_0, flush_mask);
/* ctl_flush & timing engine enable will be triggered by framework */
if (phys_enc->enable_state == DPU_ENC_DISABLED)
phys_enc->enable_state = DPU_ENC_ENABLING;
}
static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_vid *vid_enc;
if (!phys_enc) {
DPU_ERROR("invalid encoder\n");
return;
}
vid_enc = to_dpu_encoder_phys_vid(phys_enc);
DPU_DEBUG_VIDENC(vid_enc, "\n");
kfree(vid_enc);
}
static void dpu_encoder_phys_vid_get_hw_resources(
struct dpu_encoder_phys *phys_enc,
struct dpu_encoder_hw_resources *hw_res,
struct drm_connector_state *conn_state)
{
struct dpu_encoder_phys_vid *vid_enc;
if (!phys_enc || !hw_res) {
DPU_ERROR("invalid arg(s), enc %d hw_res %d conn_state %d\n",
phys_enc != 0, hw_res != 0, conn_state != 0);
return;
}
vid_enc = to_dpu_encoder_phys_vid(phys_enc);
if (!vid_enc->hw_intf) {
DPU_ERROR("invalid arg(s), hw_intf\n");
return;
}
DPU_DEBUG_VIDENC(vid_enc, "\n");
hw_res->intfs[vid_enc->hw_intf->idx - INTF_0] = INTF_MODE_VIDEO;
}
static int _dpu_encoder_phys_vid_wait_for_vblank(
struct dpu_encoder_phys *phys_enc, bool notify)
{
struct dpu_encoder_wait_info wait_info;
int ret;
if (!phys_enc) {
pr_err("invalid encoder\n");
return -EINVAL;
}
wait_info.wq = &phys_enc->pending_kickoff_wq;
wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
if (!dpu_encoder_phys_vid_is_master(phys_enc)) {
if (notify && phys_enc->parent_ops->handle_frame_done)
phys_enc->parent_ops->handle_frame_done(
phys_enc->parent, phys_enc,
DPU_ENCODER_FRAME_EVENT_DONE);
return 0;
}
/* Wait for kickoff to complete */
ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_VSYNC,
&wait_info);
if (ret == -ETIMEDOUT) {
dpu_encoder_helper_report_irq_timeout(phys_enc, INTR_IDX_VSYNC);
} else if (!ret && notify && phys_enc->parent_ops->handle_frame_done)
phys_enc->parent_ops->handle_frame_done(
phys_enc->parent, phys_enc,
DPU_ENCODER_FRAME_EVENT_DONE);
return ret;
}
static int dpu_encoder_phys_vid_wait_for_vblank(
struct dpu_encoder_phys *phys_enc)
{
return _dpu_encoder_phys_vid_wait_for_vblank(phys_enc, true);
}
static void dpu_encoder_phys_vid_prepare_for_kickoff(
struct dpu_encoder_phys *phys_enc,
struct dpu_encoder_kickoff_params *params)
{
struct dpu_encoder_phys_vid *vid_enc;
struct dpu_hw_ctl *ctl;
int rc;
if (!phys_enc || !params) {
DPU_ERROR("invalid encoder/parameters\n");
return;
}
vid_enc = to_dpu_encoder_phys_vid(phys_enc);
ctl = phys_enc->hw_ctl;
if (!ctl || !ctl->ops.wait_reset_status)
return;
/*
* hw supports hardware initiated ctl reset, so before we kickoff a new
* frame, need to check and wait for hw initiated ctl reset completion
*/
rc = ctl->ops.wait_reset_status(ctl);
if (rc) {
DPU_ERROR_VIDENC(vid_enc, "ctl %d reset failure: %d\n",
ctl->idx, rc);
dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_VSYNC);
dpu_dbg_dump(false, __func__, true, true);
}
}
static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
{
struct msm_drm_private *priv;
struct dpu_encoder_phys_vid *vid_enc;
unsigned long lock_flags;
int ret;
if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
!phys_enc->parent->dev->dev_private) {
DPU_ERROR("invalid encoder/device\n");
return;
}
priv = phys_enc->parent->dev->dev_private;
vid_enc = to_dpu_encoder_phys_vid(phys_enc);
if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
return;
}
DPU_DEBUG_VIDENC(vid_enc, "\n");
if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
return;
if (phys_enc->enable_state == DPU_ENC_DISABLED) {
DPU_ERROR("already disabled\n");
return;
}
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 0);
if (dpu_encoder_phys_vid_is_master(phys_enc))
dpu_encoder_phys_inc_pending(phys_enc);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
/*
* Wait for a vsync so we know the ENABLE=0 latched before
* the (connector) source of the vsync's gets disabled,
* otherwise we end up in a funny state if we re-enable
* before the disable latches, which results that some of
* the settings changes for the new modeset (like new
* scanout buffer) don't latch properly..
*/
if (dpu_encoder_phys_vid_is_master(phys_enc)) {
ret = _dpu_encoder_phys_vid_wait_for_vblank(phys_enc, false);
if (ret) {
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
DRMID(phys_enc->parent),
vid_enc->hw_intf->idx - INTF_0, ret);
}
}
phys_enc->enable_state = DPU_ENC_DISABLED;
}
static void dpu_encoder_phys_vid_handle_post_kickoff(
struct dpu_encoder_phys *phys_enc)
{
unsigned long lock_flags;
struct dpu_encoder_phys_vid *vid_enc;
if (!phys_enc) {
DPU_ERROR("invalid encoder\n");
return;
}
vid_enc = to_dpu_encoder_phys_vid(phys_enc);
DPU_DEBUG_VIDENC(vid_enc, "enable_state %d\n", phys_enc->enable_state);
/*
* Video mode must flush CTL before enabling timing engine
* Video encoders need to turn on their interfaces now
*/
if (phys_enc->enable_state == DPU_ENC_ENABLING) {
trace_dpu_enc_phys_vid_post_kickoff(DRMID(phys_enc->parent),
vid_enc->hw_intf->idx - INTF_0);
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 1);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
phys_enc->enable_state = DPU_ENC_ENABLED;
}
}
static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
bool enable)
{
struct dpu_encoder_phys_vid *vid_enc;
int ret;
if (!phys_enc)
return;
vid_enc = to_dpu_encoder_phys_vid(phys_enc);
trace_dpu_enc_phys_vid_irq_ctrl(DRMID(phys_enc->parent),
vid_enc->hw_intf->idx - INTF_0,
enable,
atomic_read(&phys_enc->vblank_refcount));
if (enable) {
ret = dpu_encoder_phys_vid_control_vblank_irq(phys_enc, true);
if (ret)
return;
dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
} else {
dpu_encoder_phys_vid_control_vblank_irq(phys_enc, false);
dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
}
}
static void dpu_encoder_phys_vid_setup_misr(struct dpu_encoder_phys *phys_enc,
bool enable, u32 frame_count)
{
struct dpu_encoder_phys_vid *vid_enc;
if (!phys_enc)
return;
vid_enc = to_dpu_encoder_phys_vid(phys_enc);
if (vid_enc->hw_intf && vid_enc->hw_intf->ops.setup_misr)
vid_enc->hw_intf->ops.setup_misr(vid_enc->hw_intf,
enable, frame_count);
}
static u32 dpu_encoder_phys_vid_collect_misr(struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_vid *vid_enc;
if (!phys_enc)
return 0;
vid_enc = to_dpu_encoder_phys_vid(phys_enc);
return vid_enc->hw_intf && vid_enc->hw_intf->ops.collect_misr ?
vid_enc->hw_intf->ops.collect_misr(vid_enc->hw_intf) : 0;
}
static int dpu_encoder_phys_vid_get_line_count(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_vid *vid_enc;
if (!phys_enc)
return -EINVAL;
if (!dpu_encoder_phys_vid_is_master(phys_enc))
return -EINVAL;
vid_enc = to_dpu_encoder_phys_vid(phys_enc);
if (!vid_enc->hw_intf || !vid_enc->hw_intf->ops.get_line_count)
return -EINVAL;
return vid_enc->hw_intf->ops.get_line_count(vid_enc->hw_intf);
}
static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
{
ops->is_master = dpu_encoder_phys_vid_is_master;
ops->mode_set = dpu_encoder_phys_vid_mode_set;
ops->mode_fixup = dpu_encoder_phys_vid_mode_fixup;
ops->enable = dpu_encoder_phys_vid_enable;
ops->disable = dpu_encoder_phys_vid_disable;
ops->destroy = dpu_encoder_phys_vid_destroy;
ops->get_hw_resources = dpu_encoder_phys_vid_get_hw_resources;
ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq;
ops->wait_for_commit_done = dpu_encoder_phys_vid_wait_for_vblank;
ops->wait_for_vblank = dpu_encoder_phys_vid_wait_for_vblank;
ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_vblank;
ops->irq_control = dpu_encoder_phys_vid_irq_control;
ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff;
ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
ops->needs_single_flush = dpu_encoder_phys_vid_needs_single_flush;
ops->setup_misr = dpu_encoder_phys_vid_setup_misr;
ops->collect_misr = dpu_encoder_phys_vid_collect_misr;
ops->hw_reset = dpu_encoder_helper_hw_reset;
ops->get_line_count = dpu_encoder_phys_vid_get_line_count;
}
struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
struct dpu_enc_phys_init_params *p)
{
struct dpu_encoder_phys *phys_enc = NULL;
struct dpu_encoder_phys_vid *vid_enc = NULL;
struct dpu_rm_hw_iter iter;
struct dpu_hw_mdp *hw_mdp;
struct dpu_encoder_irq *irq;
int i, ret = 0;
if (!p) {
ret = -EINVAL;
goto fail;
}
vid_enc = kzalloc(sizeof(*vid_enc), GFP_KERNEL);
if (!vid_enc) {
ret = -ENOMEM;
goto fail;
}
phys_enc = &vid_enc->base;
hw_mdp = dpu_rm_get_mdp(&p->dpu_kms->rm);
if (IS_ERR_OR_NULL(hw_mdp)) {
ret = PTR_ERR(hw_mdp);
DPU_ERROR("failed to get mdptop\n");
goto fail;
}
phys_enc->hw_mdptop = hw_mdp;
phys_enc->intf_idx = p->intf_idx;
/**
* hw_intf resource permanently assigned to this encoder
* Other resources allocated at atomic commit time by use case
*/
dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_INTF);
while (dpu_rm_get_hw(&p->dpu_kms->rm, &iter)) {
struct dpu_hw_intf *hw_intf = (struct dpu_hw_intf *)iter.hw;
if (hw_intf->idx == p->intf_idx) {
vid_enc->hw_intf = hw_intf;
break;
}
}
if (!vid_enc->hw_intf) {
ret = -EINVAL;
DPU_ERROR("failed to get hw_intf\n");
goto fail;
}
DPU_DEBUG_VIDENC(vid_enc, "\n");
dpu_encoder_phys_vid_init_ops(&phys_enc->ops);
phys_enc->parent = p->parent;
phys_enc->parent_ops = p->parent_ops;
phys_enc->dpu_kms = p->dpu_kms;
phys_enc->split_role = p->split_role;
phys_enc->intf_mode = INTF_MODE_VIDEO;
phys_enc->enc_spinlock = p->enc_spinlock;
for (i = 0; i < INTR_IDX_MAX; i++) {
irq = &phys_enc->irq[i];
INIT_LIST_HEAD(&irq->cb.list);
irq->irq_idx = -EINVAL;
irq->hw_idx = -EINVAL;
irq->cb.arg = phys_enc;
}
irq = &phys_enc->irq[INTR_IDX_VSYNC];
irq->name = "vsync_irq";
irq->intr_type = DPU_IRQ_TYPE_INTF_VSYNC;
irq->intr_idx = INTR_IDX_VSYNC;
irq->cb.func = dpu_encoder_phys_vid_vblank_irq;
irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
irq->name = "underrun";
irq->intr_type = DPU_IRQ_TYPE_INTF_UNDER_RUN;
irq->intr_idx = INTR_IDX_UNDERRUN;
irq->cb.func = dpu_encoder_phys_vid_underrun_irq;
atomic_set(&phys_enc->vblank_refcount, 0);
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
init_waitqueue_head(&phys_enc->pending_kickoff_wq);
phys_enc->enable_state = DPU_ENC_DISABLED;
DPU_DEBUG_VIDENC(vid_enc, "created intf idx:%d\n", p->intf_idx);
return phys_enc;
fail:
DPU_ERROR("failed to create encoder\n");
if (vid_enc)
dpu_encoder_phys_vid_destroy(phys_enc);
return ERR_PTR(ret);
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,136 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _DPU_FORMATS_H
#define _DPU_FORMATS_H
#include <drm/drm_fourcc.h>
#include "msm_gem.h"
#include "dpu_hw_mdss.h"
/**
* dpu_get_dpu_format_ext() - Returns dpu format structure pointer.
* @format: DRM FourCC Code
* @modifiers: format modifier array from client, one per plane
*/
const struct dpu_format *dpu_get_dpu_format_ext(
const uint32_t format,
const uint64_t modifier);
#define dpu_get_dpu_format(f) dpu_get_dpu_format_ext(f, 0)
/**
* dpu_get_msm_format - get an dpu_format by its msm_format base
* callback function registers with the msm_kms layer
* @kms: kms driver
* @format: DRM FourCC Code
* @modifiers: data layout modifier
*/
const struct msm_format *dpu_get_msm_format(
struct msm_kms *kms,
const uint32_t format,
const uint64_t modifiers);
/**
* dpu_populate_formats - populate the given array with fourcc codes supported
* @format_list: pointer to list of possible formats
* @pixel_formats: array to populate with fourcc codes
* @pixel_modifiers: array to populate with drm modifiers, can be NULL
* @pixel_formats_max: length of pixel formats array
* Return: number of elements populated
*/
uint32_t dpu_populate_formats(
const struct dpu_format_extended *format_list,
uint32_t *pixel_formats,
uint64_t *pixel_modifiers,
uint32_t pixel_formats_max);
/**
* dpu_format_get_plane_sizes - calculate size and layout of given buffer format
* @fmt: pointer to dpu_format
* @w: width of the buffer
* @h: height of the buffer
* @layout: layout of the buffer
* @pitches: array of size [DPU_MAX_PLANES] to populate
* pitch for each plane
*
* Return: size of the buffer
*/
int dpu_format_get_plane_sizes(
const struct dpu_format *fmt,
const uint32_t w,
const uint32_t h,
struct dpu_hw_fmt_layout *layout,
const uint32_t *pitches);
/**
* dpu_format_get_block_size - get block size of given format when
* operating in block mode
* @fmt: pointer to dpu_format
* @w: pointer to width of the block
* @h: pointer to height of the block
*
* Return: 0 if success; error oode otherwise
*/
int dpu_format_get_block_size(const struct dpu_format *fmt,
uint32_t *w, uint32_t *h);
/**
* dpu_format_check_modified_format - validate format and buffers for
* dpu non-standard, i.e. modified format
* @kms: kms driver
* @msm_fmt: pointer to the msm_fmt base pointer of an dpu_format
* @cmd: fb_cmd2 structure user request
* @bos: gem buffer object list
*
* Return: error code on failure, 0 on success
*/
int dpu_format_check_modified_format(
const struct msm_kms *kms,
const struct msm_format *msm_fmt,
const struct drm_mode_fb_cmd2 *cmd,
struct drm_gem_object **bos);
/**
* dpu_format_populate_layout - populate the given format layout based on
* mmu, fb, and format found in the fb
* @aspace: address space pointer
* @fb: framebuffer pointer
* @fmtl: format layout structure to populate
*
* Return: error code on failure, -EAGAIN if success but the addresses
* are the same as before or 0 if new addresses were populated
*/
int dpu_format_populate_layout(
struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct dpu_hw_fmt_layout *fmtl);
/**
* dpu_format_get_framebuffer_size - get framebuffer memory size
* @format: DRM pixel format
* @width: pixel width
* @height: pixel height
* @pitches: array of size [DPU_MAX_PLANES] to populate
* pitch for each plane
* @modifiers: drm modifier
*
* Return: memory size required for frame buffer
*/
uint32_t dpu_format_get_framebuffer_size(
const uint32_t format,
const uint32_t width,
const uint32_t height,
const uint32_t *pitches,
const uint64_t modifiers);
#endif /*_DPU_FORMATS_H */

View File

@ -0,0 +1,155 @@
/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include <linux/mutex.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include "dpu_hw_mdss.h"
#include "dpu_hw_blk.h"
/* Serialization lock for dpu_hw_blk_list */
static DEFINE_MUTEX(dpu_hw_blk_lock);
/* List of all hw block objects */
static LIST_HEAD(dpu_hw_blk_list);
/**
* dpu_hw_blk_init - initialize hw block object
* @type: hw block type - enum dpu_hw_blk_type
* @id: instance id of the hw block
* @ops: Pointer to block operations
* return: 0 if success; error code otherwise
*/
int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
struct dpu_hw_blk_ops *ops)
{
if (!hw_blk) {
pr_err("invalid parameters\n");
return -EINVAL;
}
INIT_LIST_HEAD(&hw_blk->list);
hw_blk->type = type;
hw_blk->id = id;
atomic_set(&hw_blk->refcount, 0);
if (ops)
hw_blk->ops = *ops;
mutex_lock(&dpu_hw_blk_lock);
list_add(&hw_blk->list, &dpu_hw_blk_list);
mutex_unlock(&dpu_hw_blk_lock);
return 0;
}
/**
* dpu_hw_blk_destroy - destroy hw block object.
* @hw_blk: pointer to hw block object
* return: none
*/
void dpu_hw_blk_destroy(struct dpu_hw_blk *hw_blk)
{
if (!hw_blk) {
pr_err("invalid parameters\n");
return;
}
if (atomic_read(&hw_blk->refcount))
pr_err("hw_blk:%d.%d invalid refcount\n", hw_blk->type,
hw_blk->id);
mutex_lock(&dpu_hw_blk_lock);
list_del(&hw_blk->list);
mutex_unlock(&dpu_hw_blk_lock);
}
/**
* dpu_hw_blk_get - get hw_blk from free pool
* @hw_blk: if specified, increment reference count only
* @type: if hw_blk is not specified, allocate the next available of this type
* @id: if specified (>= 0), allocate the given instance of the above type
* return: pointer to hw block object
*/
struct dpu_hw_blk *dpu_hw_blk_get(struct dpu_hw_blk *hw_blk, u32 type, int id)
{
struct dpu_hw_blk *curr;
int rc, refcount;
if (!hw_blk) {
mutex_lock(&dpu_hw_blk_lock);
list_for_each_entry(curr, &dpu_hw_blk_list, list) {
if ((curr->type != type) ||
(id >= 0 && curr->id != id) ||
(id < 0 &&
atomic_read(&curr->refcount)))
continue;
hw_blk = curr;
break;
}
mutex_unlock(&dpu_hw_blk_lock);
}
if (!hw_blk) {
pr_debug("no hw_blk:%d\n", type);
return NULL;
}
refcount = atomic_inc_return(&hw_blk->refcount);
if (refcount == 1 && hw_blk->ops.start) {
rc = hw_blk->ops.start(hw_blk);
if (rc) {
pr_err("failed to start hw_blk:%d rc:%d\n", type, rc);
goto error_start;
}
}
pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type,
hw_blk->id, refcount);
return hw_blk;
error_start:
dpu_hw_blk_put(hw_blk);
return ERR_PTR(rc);
}
/**
* dpu_hw_blk_put - put hw_blk to free pool if decremented refcount is zero
* @hw_blk: hw block to be freed
* @free_blk: function to be called when reference count goes to zero
*/
void dpu_hw_blk_put(struct dpu_hw_blk *hw_blk)
{
if (!hw_blk) {
pr_err("invalid parameters\n");
return;
}
pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type, hw_blk->id,
atomic_read(&hw_blk->refcount));
if (!atomic_read(&hw_blk->refcount)) {
pr_err("hw_blk:%d.%d invalid put\n", hw_blk->type, hw_blk->id);
return;
}
if (atomic_dec_return(&hw_blk->refcount))
return;
if (hw_blk->ops.stop)
hw_blk->ops.stop(hw_blk);
}

View File

@ -0,0 +1,53 @@
/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _DPU_HW_BLK_H
#define _DPU_HW_BLK_H
#include <linux/types.h>
#include <linux/list.h>
#include <linux/atomic.h>
struct dpu_hw_blk;
/**
* struct dpu_hw_blk_ops - common hardware block operations
* @start: start operation on first get
* @stop: stop operation on last put
*/
struct dpu_hw_blk_ops {
int (*start)(struct dpu_hw_blk *);
void (*stop)(struct dpu_hw_blk *);
};
/**
* struct dpu_hw_blk - definition of hardware block object
* @list: list of hardware blocks
* @type: hardware block type
* @id: instance id
* @refcount: reference/usage count
*/
struct dpu_hw_blk {
struct list_head list;
u32 type;
int id;
atomic_t refcount;
struct dpu_hw_blk_ops ops;
};
int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
struct dpu_hw_blk_ops *ops);
void dpu_hw_blk_destroy(struct dpu_hw_blk *hw_blk);
struct dpu_hw_blk *dpu_hw_blk_get(struct dpu_hw_blk *hw_blk, u32 type, int id);
void dpu_hw_blk_put(struct dpu_hw_blk *hw_blk);
#endif /*_DPU_HW_BLK_H */

View File

@ -0,0 +1,511 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include <linux/slab.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include "dpu_hw_mdss.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_catalog_format.h"
#include "dpu_kms.h"
#define VIG_SDM845_MASK \
(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_SCALER_QSEED3) | BIT(DPU_SSPP_QOS) |\
BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_QOS_8LVL) |\
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT))
#define DMA_SDM845_MASK \
(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
#define MIXER_SDM845_MASK \
(BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER))
#define PINGPONG_SDM845_MASK BIT(DPU_PINGPONG_DITHER)
#define PINGPONG_SDM845_SPLIT_MASK \
(PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2))
#define DEFAULT_PIXEL_RAM_SIZE (50 * 1024)
#define DEFAULT_DPU_LINE_WIDTH 2048
#define DEFAULT_DPU_OUTPUT_LINE_WIDTH 2560
#define MAX_HORZ_DECIMATION 4
#define MAX_VERT_DECIMATION 4
#define MAX_UPSCALE_RATIO 20
#define MAX_DOWNSCALE_RATIO 4
#define SSPP_UNITY_SCALE 1
#define STRCAT(X, Y) (X Y)
/*************************************************************
* DPU sub blocks config
*************************************************************/
/* DPU top level caps */
static const struct dpu_caps sdm845_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
.qseed_type = DPU_SSPP_SCALER_QSEED3,
.smart_dma_rev = DPU_SSPP_SMART_DMA_V2,
.ubwc_version = DPU_HW_UBWC_VER_20,
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
};
static struct dpu_mdp_cfg sdm845_mdp[] = {
{
.name = "top_0", .id = MDP_TOP,
.base = 0x0, .len = 0x45C,
.features = 0,
.highest_bank_bit = 0x2,
.has_dest_scaler = true,
.clk_ctrls[DPU_CLK_CTRL_VIG0] = {
.reg_off = 0x2AC, .bit_off = 0},
.clk_ctrls[DPU_CLK_CTRL_VIG1] = {
.reg_off = 0x2B4, .bit_off = 0},
.clk_ctrls[DPU_CLK_CTRL_VIG2] = {
.reg_off = 0x2BC, .bit_off = 0},
.clk_ctrls[DPU_CLK_CTRL_VIG3] = {
.reg_off = 0x2C4, .bit_off = 0},
.clk_ctrls[DPU_CLK_CTRL_DMA0] = {
.reg_off = 0x2AC, .bit_off = 8},
.clk_ctrls[DPU_CLK_CTRL_DMA1] = {
.reg_off = 0x2B4, .bit_off = 8},
.clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
.reg_off = 0x2BC, .bit_off = 8},
.clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
.reg_off = 0x2C4, .bit_off = 8},
},
};
/*************************************************************
* CTL sub blocks config
*************************************************************/
static struct dpu_ctl_cfg sdm845_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0xE4,
.features = BIT(DPU_CTL_SPLIT_DISPLAY)
},
{
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0xE4,
.features = BIT(DPU_CTL_SPLIT_DISPLAY)
},
{
.name = "ctl_2", .id = CTL_2,
.base = 0x1400, .len = 0xE4,
.features = 0
},
{
.name = "ctl_3", .id = CTL_3,
.base = 0x1600, .len = 0xE4,
.features = 0
},
{
.name = "ctl_4", .id = CTL_4,
.base = 0x1800, .len = 0xE4,
.features = 0
},
};
/*************************************************************
* SSPP sub blocks config
*************************************************************/
/* SSPP common configuration */
static const struct dpu_sspp_blks_common sdm845_sspp_common = {
.maxlinewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
.maxhdeciexp = MAX_HORZ_DECIMATION,
.maxvdeciexp = MAX_VERT_DECIMATION,
};
#define _VIG_SBLK(num, sdma_pri) \
{ \
.common = &sdm845_sspp_common, \
.maxdwnscale = MAX_DOWNSCALE_RATIO, \
.maxupscale = MAX_UPSCALE_RATIO, \
.smart_dma_priority = sdma_pri, \
.src_blk = {.name = STRCAT("sspp_src_", num), \
.id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
.scaler_blk = {.name = STRCAT("sspp_scaler", num), \
.id = DPU_SSPP_SCALER_QSEED3, \
.base = 0xa00, .len = 0xa0,}, \
.csc_blk = {.name = STRCAT("sspp_csc", num), \
.id = DPU_SSPP_CSC_10BIT, \
.base = 0x1a00, .len = 0x100,}, \
.format_list = plane_formats_yuv, \
.virt_format_list = plane_formats, \
}
#define _DMA_SBLK(num, sdma_pri) \
{ \
.common = &sdm845_sspp_common, \
.maxdwnscale = SSPP_UNITY_SCALE, \
.maxupscale = SSPP_UNITY_SCALE, \
.smart_dma_priority = sdma_pri, \
.src_blk = {.name = STRCAT("sspp_src_", num), \
.id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
.format_list = plane_formats, \
.virt_format_list = plane_formats, \
}
static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 = _VIG_SBLK("0", 5);
static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 = _VIG_SBLK("1", 6);
static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 = _VIG_SBLK("2", 7);
static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 = _VIG_SBLK("3", 8);
static const struct dpu_sspp_sub_blks sdm845_dma_sblk_0 = _DMA_SBLK("8", 1);
static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK("9", 2);
static const struct dpu_sspp_sub_blks sdm845_dma_sblk_2 = _DMA_SBLK("10", 3);
static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK("11", 4);
#define SSPP_VIG_BLK(_name, _id, _base, _sblk, _xinid, _clkctrl) \
{ \
.name = _name, .id = _id, \
.base = _base, .len = 0x1c8, \
.features = VIG_SDM845_MASK, \
.sblk = &_sblk, \
.xin_id = _xinid, \
.type = SSPP_TYPE_VIG, \
.clk_ctrl = _clkctrl \
}
#define SSPP_DMA_BLK(_name, _id, _base, _sblk, _xinid, _clkctrl) \
{ \
.name = _name, .id = _id, \
.base = _base, .len = 0x1c8, \
.features = DMA_SDM845_MASK, \
.sblk = &_sblk, \
.xin_id = _xinid, \
.type = SSPP_TYPE_DMA, \
.clk_ctrl = _clkctrl \
}
static struct dpu_sspp_cfg sdm845_sspp[] = {
SSPP_VIG_BLK("sspp_0", SSPP_VIG0, 0x4000,
sdm845_vig_sblk_0, 0, DPU_CLK_CTRL_VIG0),
SSPP_VIG_BLK("sspp_1", SSPP_VIG1, 0x6000,
sdm845_vig_sblk_1, 4, DPU_CLK_CTRL_VIG1),
SSPP_VIG_BLK("sspp_2", SSPP_VIG2, 0x8000,
sdm845_vig_sblk_2, 8, DPU_CLK_CTRL_VIG2),
SSPP_VIG_BLK("sspp_3", SSPP_VIG3, 0xa000,
sdm845_vig_sblk_3, 12, DPU_CLK_CTRL_VIG3),
SSPP_DMA_BLK("sspp_8", SSPP_DMA0, 0x24000,
sdm845_dma_sblk_0, 1, DPU_CLK_CTRL_DMA0),
SSPP_DMA_BLK("sspp_9", SSPP_DMA1, 0x26000,
sdm845_dma_sblk_1, 5, DPU_CLK_CTRL_DMA1),
SSPP_DMA_BLK("sspp_10", SSPP_DMA2, 0x28000,
sdm845_dma_sblk_2, 9, DPU_CLK_CTRL_CURSOR0),
SSPP_DMA_BLK("sspp_11", SSPP_DMA3, 0x2a000,
sdm845_dma_sblk_3, 13, DPU_CLK_CTRL_CURSOR1),
};
/*************************************************************
* MIXER sub blocks config
*************************************************************/
static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
.maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.maxblendstages = 11, /* excluding base layer */
.blendstage_base = { /* offsets relative to mixer base */
0x20, 0x38, 0x50, 0x68, 0x80, 0x98,
0xb0, 0xc8, 0xe0, 0xf8, 0x110
},
};
#define LM_BLK(_name, _id, _base, _ds, _pp, _lmpair) \
{ \
.name = _name, .id = _id, \
.base = _base, .len = 0x320, \
.features = MIXER_SDM845_MASK, \
.sblk = &sdm845_lm_sblk, \
.ds = _ds, \
.pingpong = _pp, \
.lm_pair_mask = (1 << _lmpair) \
}
static struct dpu_lm_cfg sdm845_lm[] = {
LM_BLK("lm_0", LM_0, 0x44000, DS_0, PINGPONG_0, LM_1),
LM_BLK("lm_1", LM_1, 0x45000, DS_1, PINGPONG_1, LM_0),
LM_BLK("lm_2", LM_2, 0x46000, DS_MAX, PINGPONG_2, LM_5),
LM_BLK("lm_3", LM_3, 0x0, DS_MAX, PINGPONG_MAX, 0),
LM_BLK("lm_4", LM_4, 0x0, DS_MAX, PINGPONG_MAX, 0),
LM_BLK("lm_5", LM_5, 0x49000, DS_MAX, PINGPONG_3, LM_2),
};
/*************************************************************
* DS sub blocks config
*************************************************************/
static const struct dpu_ds_top_cfg sdm845_ds_top = {
.name = "ds_top_0", .id = DS_TOP,
.base = 0x60000, .len = 0xc,
.maxinputwidth = DEFAULT_DPU_LINE_WIDTH,
.maxoutputwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.maxupscale = MAX_UPSCALE_RATIO,
};
#define DS_BLK(_name, _id, _base) \
{\
.name = _name, .id = _id, \
.base = _base, .len = 0x800, \
.features = DPU_SSPP_SCALER_QSEED3, \
.top = &sdm845_ds_top \
}
static struct dpu_ds_cfg sdm845_ds[] = {
DS_BLK("ds_0", DS_0, 0x800),
DS_BLK("ds_1", DS_1, 0x1000),
};
/*************************************************************
* PINGPONG sub blocks config
*************************************************************/
static const struct dpu_pingpong_sub_blks sdm845_pp_sblk_te = {
.te2 = {.id = DPU_PINGPONG_TE2, .base = 0x2000, .len = 0x0,
.version = 0x1},
.dither = {.id = DPU_PINGPONG_DITHER, .base = 0x30e0,
.len = 0x20, .version = 0x10000},
};
static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = {
.dither = {.id = DPU_PINGPONG_DITHER, .base = 0x30e0,
.len = 0x20, .version = 0x10000},
};
#define PP_BLK_TE(_name, _id, _base) \
{\
.name = _name, .id = _id, \
.base = _base, .len = 0xd4, \
.features = PINGPONG_SDM845_SPLIT_MASK, \
.sblk = &sdm845_pp_sblk_te \
}
#define PP_BLK(_name, _id, _base) \
{\
.name = _name, .id = _id, \
.base = _base, .len = 0xd4, \
.features = PINGPONG_SDM845_MASK, \
.sblk = &sdm845_pp_sblk \
}
static struct dpu_pingpong_cfg sdm845_pp[] = {
PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000),
PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800),
PP_BLK("pingpong_2", PINGPONG_2, 0x71000),
PP_BLK("pingpong_3", PINGPONG_3, 0x71800),
};
/*************************************************************
* INTF sub blocks config
*************************************************************/
#define INTF_BLK(_name, _id, _base, _type, _ctrl_id) \
{\
.name = _name, .id = _id, \
.base = _base, .len = 0x280, \
.type = _type, \
.controller_id = _ctrl_id, \
.prog_fetch_lines_worst_case = 24 \
}
static struct dpu_intf_cfg sdm845_intf[] = {
INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0),
INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0),
INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1),
INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1),
};
/*************************************************************
* CDM sub blocks config
*************************************************************/
static struct dpu_cdm_cfg sdm845_cdm[] = {
{
.name = "cdm_0", .id = CDM_0,
.base = 0x79200, .len = 0x224,
.features = 0,
.intf_connect = BIT(INTF_3),
},
};
/*************************************************************
* VBIF sub blocks config
*************************************************************/
/* VBIF QOS remap */
static u32 sdm845_rt_pri_lvl[] = {3, 3, 4, 4, 5, 5, 6, 6};
static u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3};
static struct dpu_vbif_cfg sdm845_vbif[] = {
{
.name = "vbif_0", .id = VBIF_0,
.base = 0, .len = 0x1040,
.features = BIT(DPU_VBIF_QOS_REMAP),
.xin_halt_timeout = 0x4000,
.qos_rt_tbl = {
.npriority_lvl = ARRAY_SIZE(sdm845_rt_pri_lvl),
.priority_lvl = sdm845_rt_pri_lvl,
},
.qos_nrt_tbl = {
.npriority_lvl = ARRAY_SIZE(sdm845_nrt_pri_lvl),
.priority_lvl = sdm845_nrt_pri_lvl,
},
.memtype_count = 14,
.memtype = {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3},
},
};
static struct dpu_reg_dma_cfg sdm845_regdma = {
.base = 0x0, .version = 0x1, .trigger_sel_off = 0x119c
};
/*************************************************************
* PERF data config
*************************************************************/
/* SSPP QOS LUTs */
static struct dpu_qos_lut_entry sdm845_qos_linear[] = {
{.fl = 4, .lut = 0x357},
{.fl = 5, .lut = 0x3357},
{.fl = 6, .lut = 0x23357},
{.fl = 7, .lut = 0x223357},
{.fl = 8, .lut = 0x2223357},
{.fl = 9, .lut = 0x22223357},
{.fl = 10, .lut = 0x222223357},
{.fl = 11, .lut = 0x2222223357},
{.fl = 12, .lut = 0x22222223357},
{.fl = 13, .lut = 0x222222223357},
{.fl = 14, .lut = 0x1222222223357},
{.fl = 0, .lut = 0x11222222223357}
};
static struct dpu_qos_lut_entry sdm845_qos_macrotile[] = {
{.fl = 10, .lut = 0x344556677},
{.fl = 11, .lut = 0x3344556677},
{.fl = 12, .lut = 0x23344556677},
{.fl = 13, .lut = 0x223344556677},
{.fl = 14, .lut = 0x1223344556677},
{.fl = 0, .lut = 0x112233344556677},
};
static struct dpu_qos_lut_entry sdm845_qos_nrt[] = {
{.fl = 0, .lut = 0x0},
};
static struct dpu_perf_cfg sdm845_perf_data = {
.max_bw_low = 6800000,
.max_bw_high = 6800000,
.min_core_ib = 2400000,
.min_llcc_ib = 800000,
.min_dram_ib = 800000,
.core_ib_ff = "6.0",
.core_clk_ff = "1.0",
.comp_ratio_rt =
"NV12/5/1/1.23 AB24/5/1/1.23 XB24/5/1/1.23",
.comp_ratio_nrt =
"NV12/5/1/1.25 AB24/5/1/1.25 XB24/5/1/1.25",
.undersized_prefill_lines = 2,
.xtra_prefill_lines = 2,
.dest_scale_prefill_lines = 3,
.macrotile_prefill_lines = 4,
.yuv_nv12_prefill_lines = 8,
.linear_prefill_lines = 1,
.downscaling_prefill_lines = 1,
.amortizable_threshold = 25,
.min_prefill_lines = 24,
.danger_lut_tbl = {0xf, 0xffff, 0x0},
.qos_lut_tbl = {
{.nentry = ARRAY_SIZE(sdm845_qos_linear),
.entries = sdm845_qos_linear
},
{.nentry = ARRAY_SIZE(sdm845_qos_macrotile),
.entries = sdm845_qos_macrotile
},
{.nentry = ARRAY_SIZE(sdm845_qos_nrt),
.entries = sdm845_qos_nrt
},
},
.cdp_cfg = {
{.rd_enable = 1, .wr_enable = 1},
{.rd_enable = 1, .wr_enable = 0}
},
};
/*************************************************************
* Hardware catalog init
*************************************************************/
/*
* sdm845_cfg_init(): populate sdm845 dpu sub-blocks reg offsets
* and instance counts.
*/
void sdm845_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
{
*dpu_cfg = (struct dpu_mdss_cfg){
.caps = &sdm845_dpu_caps,
.mdp_count = ARRAY_SIZE(sdm845_mdp),
.mdp = sdm845_mdp,
.ctl_count = ARRAY_SIZE(sdm845_ctl),
.ctl = sdm845_ctl,
.sspp_count = ARRAY_SIZE(sdm845_sspp),
.sspp = sdm845_sspp,
.mixer_count = ARRAY_SIZE(sdm845_lm),
.mixer = sdm845_lm,
.ds_count = ARRAY_SIZE(sdm845_ds),
.ds = sdm845_ds,
.pingpong_count = ARRAY_SIZE(sdm845_pp),
.pingpong = sdm845_pp,
.cdm_count = ARRAY_SIZE(sdm845_cdm),
.cdm = sdm845_cdm,
.intf_count = ARRAY_SIZE(sdm845_intf),
.intf = sdm845_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),
.vbif = sdm845_vbif,
.reg_dma_count = 1,
.dma_cfg = sdm845_regdma,
.perf = sdm845_perf_data,
};
}
static struct dpu_mdss_hw_cfg_handler cfg_handler[] = {
{ .hw_rev = DPU_HW_VER_400, .cfg_init = sdm845_cfg_init},
{ .hw_rev = DPU_HW_VER_401, .cfg_init = sdm845_cfg_init},
};
void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg)
{
kfree(dpu_cfg);
}
struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev)
{
int i;
struct dpu_mdss_cfg *dpu_cfg;
dpu_cfg = kzalloc(sizeof(*dpu_cfg), GFP_KERNEL);
if (!dpu_cfg)
return ERR_PTR(-ENOMEM);
for (i = 0; i < ARRAY_SIZE(cfg_handler); i++) {
if (cfg_handler[i].hw_rev == hw_rev) {
cfg_handler[i].cfg_init(dpu_cfg);
dpu_cfg->hwversion = hw_rev;
return dpu_cfg;
}
}
DPU_ERROR("unsupported chipset id:%X\n", hw_rev);
dpu_hw_catalog_deinit(dpu_cfg);
return ERR_PTR(-ENODEV);
}

View File

@ -0,0 +1,804 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _DPU_HW_CATALOG_H
#define _DPU_HW_CATALOG_H
#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/bitmap.h>
#include <linux/err.h>
#include <drm/drmP.h>
/**
* Max hardware block count: For ex: max 12 SSPP pipes or
* 5 ctl paths. In all cases, it can have max 12 hardware blocks
* based on current design
*/
#define MAX_BLOCKS 12
#define DPU_HW_VER(MAJOR, MINOR, STEP) (((MAJOR & 0xF) << 28) |\
((MINOR & 0xFFF) << 16) |\
(STEP & 0xFFFF))
#define DPU_HW_MAJOR(rev) ((rev) >> 28)
#define DPU_HW_MINOR(rev) (((rev) >> 16) & 0xFFF)
#define DPU_HW_STEP(rev) ((rev) & 0xFFFF)
#define DPU_HW_MAJOR_MINOR(rev) ((rev) >> 16)
#define IS_DPU_MAJOR_MINOR_SAME(rev1, rev2) \
(DPU_HW_MAJOR_MINOR((rev1)) == DPU_HW_MAJOR_MINOR((rev2)))
#define DPU_HW_VER_170 DPU_HW_VER(1, 7, 0) /* 8996 v1.0 */
#define DPU_HW_VER_171 DPU_HW_VER(1, 7, 1) /* 8996 v2.0 */
#define DPU_HW_VER_172 DPU_HW_VER(1, 7, 2) /* 8996 v3.0 */
#define DPU_HW_VER_300 DPU_HW_VER(3, 0, 0) /* 8998 v1.0 */
#define DPU_HW_VER_301 DPU_HW_VER(3, 0, 1) /* 8998 v1.1 */
#define DPU_HW_VER_400 DPU_HW_VER(4, 0, 0) /* sdm845 v1.0 */
#define DPU_HW_VER_401 DPU_HW_VER(4, 0, 1) /* sdm845 v2.0 */
#define DPU_HW_VER_410 DPU_HW_VER(4, 1, 0) /* sdm670 v1.0 */
#define DPU_HW_VER_500 DPU_HW_VER(5, 0, 0) /* sdm855 v1.0 */
#define IS_MSM8996_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_170)
#define IS_MSM8998_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_300)
#define IS_SDM845_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_400)
#define IS_SDM670_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_410)
#define IS_SDM855_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_500)
#define DPU_HW_BLK_NAME_LEN 16
#define MAX_IMG_WIDTH 0x3fff
#define MAX_IMG_HEIGHT 0x3fff
#define CRTC_DUAL_MIXERS 2
#define MAX_XIN_COUNT 16
/**
* Supported UBWC feature versions
*/
enum {
DPU_HW_UBWC_VER_10 = 0x100,
DPU_HW_UBWC_VER_20 = 0x200,
DPU_HW_UBWC_VER_30 = 0x300,
};
#define IS_UBWC_20_SUPPORTED(rev) ((rev) >= DPU_HW_UBWC_VER_20)
/**
* MDP TOP BLOCK features
* @DPU_MDP_PANIC_PER_PIPE Panic configuration needs to be be done per pipe
* @DPU_MDP_10BIT_SUPPORT, Chipset supports 10 bit pixel formats
* @DPU_MDP_BWC, MDSS HW supports Bandwidth compression.
* @DPU_MDP_UBWC_1_0, This chipsets supports Universal Bandwidth
* compression initial revision
* @DPU_MDP_UBWC_1_5, Universal Bandwidth compression version 1.5
* @DPU_MDP_MAX Maximum value
*/
enum {
DPU_MDP_PANIC_PER_PIPE = 0x1,
DPU_MDP_10BIT_SUPPORT,
DPU_MDP_BWC,
DPU_MDP_UBWC_1_0,
DPU_MDP_UBWC_1_5,
DPU_MDP_MAX
};
/**
* SSPP sub-blocks/features
* @DPU_SSPP_SRC Src and fetch part of the pipes,
* @DPU_SSPP_SCALER_QSEED2, QSEED2 algorithm support
* @DPU_SSPP_SCALER_QSEED3, QSEED3 alogorithm support
* @DPU_SSPP_SCALER_RGB, RGB Scaler, supported by RGB pipes
* @DPU_SSPP_CSC, Support of Color space converion
* @DPU_SSPP_CSC_10BIT, Support of 10-bit Color space conversion
* @DPU_SSPP_CURSOR, SSPP can be used as a cursor layer
* @DPU_SSPP_QOS, SSPP support QoS control, danger/safe/creq
* @DPU_SSPP_QOS_8LVL, SSPP support 8-level QoS control
* @DPU_SSPP_EXCL_RECT, SSPP supports exclusion rect
* @DPU_SSPP_SMART_DMA_V1, SmartDMA 1.0 support
* @DPU_SSPP_SMART_DMA_V2, SmartDMA 2.0 support
* @DPU_SSPP_TS_PREFILL Supports prefill with traffic shaper
* @DPU_SSPP_TS_PREFILL_REC1 Supports prefill with traffic shaper multirec
* @DPU_SSPP_CDP Supports client driven prefetch
* @DPU_SSPP_MAX maximum value
*/
enum {
DPU_SSPP_SRC = 0x1,
DPU_SSPP_SCALER_QSEED2,
DPU_SSPP_SCALER_QSEED3,
DPU_SSPP_SCALER_RGB,
DPU_SSPP_CSC,
DPU_SSPP_CSC_10BIT,
DPU_SSPP_CURSOR,
DPU_SSPP_QOS,
DPU_SSPP_QOS_8LVL,
DPU_SSPP_EXCL_RECT,
DPU_SSPP_SMART_DMA_V1,
DPU_SSPP_SMART_DMA_V2,
DPU_SSPP_TS_PREFILL,
DPU_SSPP_TS_PREFILL_REC1,
DPU_SSPP_CDP,
DPU_SSPP_MAX
};
/*
* MIXER sub-blocks/features
* @DPU_MIXER_LAYER Layer mixer layer blend configuration,
* @DPU_MIXER_SOURCESPLIT Layer mixer supports source-split configuration
* @DPU_MIXER_GC Gamma correction block
* @DPU_DIM_LAYER Layer mixer supports dim layer
* @DPU_MIXER_MAX maximum value
*/
enum {
DPU_MIXER_LAYER = 0x1,
DPU_MIXER_SOURCESPLIT,
DPU_MIXER_GC,
DPU_DIM_LAYER,
DPU_MIXER_MAX
};
/**
* PINGPONG sub-blocks
* @DPU_PINGPONG_TE Tear check block
* @DPU_PINGPONG_TE2 Additional tear check block for split pipes
* @DPU_PINGPONG_SPLIT PP block supports split fifo
* @DPU_PINGPONG_SLAVE PP block is a suitable slave for split fifo
* @DPU_PINGPONG_DITHER, Dither blocks
* @DPU_PINGPONG_MAX
*/
enum {
DPU_PINGPONG_TE = 0x1,
DPU_PINGPONG_TE2,
DPU_PINGPONG_SPLIT,
DPU_PINGPONG_SLAVE,
DPU_PINGPONG_DITHER,
DPU_PINGPONG_MAX
};
/**
* CTL sub-blocks
* @DPU_CTL_SPLIT_DISPLAY CTL supports video mode split display
* @DPU_CTL_MAX
*/
enum {
DPU_CTL_SPLIT_DISPLAY = 0x1,
DPU_CTL_MAX
};
/**
* VBIF sub-blocks and features
* @DPU_VBIF_QOS_OTLIM VBIF supports OT Limit
* @DPU_VBIF_QOS_REMAP VBIF supports QoS priority remap
* @DPU_VBIF_MAX maximum value
*/
enum {
DPU_VBIF_QOS_OTLIM = 0x1,
DPU_VBIF_QOS_REMAP,
DPU_VBIF_MAX
};
/**
* MACRO DPU_HW_BLK_INFO - information of HW blocks inside DPU
* @name: string name for debug purposes
* @id: enum identifying this block
* @base: register base offset to mdss
* @len: length of hardware block
* @features bit mask identifying sub-blocks/features
*/
#define DPU_HW_BLK_INFO \
char name[DPU_HW_BLK_NAME_LEN]; \
u32 id; \
u32 base; \
u32 len; \
unsigned long features
/**
* MACRO DPU_HW_SUBBLK_INFO - information of HW sub-block inside DPU
* @name: string name for debug purposes
* @id: enum identifying this sub-block
* @base: offset of this sub-block relative to the block
* offset
* @len register block length of this sub-block
*/
#define DPU_HW_SUBBLK_INFO \
char name[DPU_HW_BLK_NAME_LEN]; \
u32 id; \
u32 base; \
u32 len
/**
* struct dpu_src_blk: SSPP part of the source pipes
* @info: HW register and features supported by this sub-blk
*/
struct dpu_src_blk {
DPU_HW_SUBBLK_INFO;
};
/**
* struct dpu_scaler_blk: Scaler information
* @info: HW register and features supported by this sub-blk
* @version: qseed block revision
*/
struct dpu_scaler_blk {
DPU_HW_SUBBLK_INFO;
u32 version;
};
struct dpu_csc_blk {
DPU_HW_SUBBLK_INFO;
};
/**
* struct dpu_pp_blk : Pixel processing sub-blk information
* @info: HW register and features supported by this sub-blk
* @version: HW Algorithm version
*/
struct dpu_pp_blk {
DPU_HW_SUBBLK_INFO;
u32 version;
};
/**
* struct dpu_format_extended - define dpu specific pixel format+modifier
* @fourcc_format: Base FOURCC pixel format code
* @modifier: 64-bit drm format modifier, same modifier must be applied to all
* framebuffer planes
*/
struct dpu_format_extended {
uint32_t fourcc_format;
uint64_t modifier;
};
/**
* enum dpu_qos_lut_usage - define QoS LUT use cases
*/
enum dpu_qos_lut_usage {
DPU_QOS_LUT_USAGE_LINEAR,
DPU_QOS_LUT_USAGE_MACROTILE,
DPU_QOS_LUT_USAGE_NRT,
DPU_QOS_LUT_USAGE_MAX,
};
/**
* struct dpu_qos_lut_entry - define QoS LUT table entry
* @fl: fill level, or zero on last entry to indicate default lut
* @lut: lut to use if equal to or less than fill level
*/
struct dpu_qos_lut_entry {
u32 fl;
u64 lut;
};
/**
* struct dpu_qos_lut_tbl - define QoS LUT table
* @nentry: number of entry in this table
* @entries: Pointer to table entries
*/
struct dpu_qos_lut_tbl {
u32 nentry;
struct dpu_qos_lut_entry *entries;
};
/**
* struct dpu_caps - define DPU capabilities
* @max_mixer_width max layer mixer line width support.
* @max_mixer_blendstages max layer mixer blend stages or
* supported z order
* @qseed_type qseed2 or qseed3 support.
* @smart_dma_rev Supported version of SmartDMA feature.
* @ubwc_version UBWC feature version (0x0 for not supported)
* @has_src_split source split feature status
* @has_dim_layer dim layer feature status
* @has_idle_pc indicate if idle power collapse feature is supported
*/
struct dpu_caps {
u32 max_mixer_width;
u32 max_mixer_blendstages;
u32 qseed_type;
u32 smart_dma_rev;
u32 ubwc_version;
bool has_src_split;
bool has_dim_layer;
bool has_idle_pc;
};
/**
* struct dpu_sspp_blks_common : SSPP sub-blocks common configuration
* @maxwidth: max pixelwidth supported by this pipe
* @pixel_ram_size: size of latency hiding and de-tiling buffer in bytes
* @maxhdeciexp: max horizontal decimation supported by this pipe
* (max is 2^value)
* @maxvdeciexp: max vertical decimation supported by this pipe
* (max is 2^value)
*/
struct dpu_sspp_blks_common {
u32 maxlinewidth;
u32 pixel_ram_size;
u32 maxhdeciexp;
u32 maxvdeciexp;
};
/**
* struct dpu_sspp_sub_blks : SSPP sub-blocks
* common: Pointer to common configurations shared by sub blocks
* @creq_vblank: creq priority during vertical blanking
* @danger_vblank: danger priority during vertical blanking
* @maxdwnscale: max downscale ratio supported(without DECIMATION)
* @maxupscale: maxupscale ratio supported
* @smart_dma_priority: hw priority of rect1 of multirect pipe
* @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps
* @src_blk:
* @scaler_blk:
* @csc_blk:
* @hsic:
* @memcolor:
* @pcc_blk:
* @igc_blk:
* @format_list: Pointer to list of supported formats
* @virt_format_list: Pointer to list of supported formats for virtual planes
*/
struct dpu_sspp_sub_blks {
const struct dpu_sspp_blks_common *common;
u32 creq_vblank;
u32 danger_vblank;
u32 maxdwnscale;
u32 maxupscale;
u32 smart_dma_priority;
u32 max_per_pipe_bw;
struct dpu_src_blk src_blk;
struct dpu_scaler_blk scaler_blk;
struct dpu_pp_blk csc_blk;
struct dpu_pp_blk hsic_blk;
struct dpu_pp_blk memcolor_blk;
struct dpu_pp_blk pcc_blk;
struct dpu_pp_blk igc_blk;
const struct dpu_format_extended *format_list;
const struct dpu_format_extended *virt_format_list;
};
/**
* struct dpu_lm_sub_blks: information of mixer block
* @maxwidth: Max pixel width supported by this mixer
* @maxblendstages: Max number of blend-stages supported
* @blendstage_base: Blend-stage register base offset
* @gc: gamma correction block
*/
struct dpu_lm_sub_blks {
u32 maxwidth;
u32 maxblendstages;
u32 blendstage_base[MAX_BLOCKS];
struct dpu_pp_blk gc;
};
struct dpu_pingpong_sub_blks {
struct dpu_pp_blk te;
struct dpu_pp_blk te2;
struct dpu_pp_blk dither;
};
/**
* dpu_clk_ctrl_type - Defines top level clock control signals
*/
enum dpu_clk_ctrl_type {
DPU_CLK_CTRL_NONE,
DPU_CLK_CTRL_VIG0,
DPU_CLK_CTRL_VIG1,
DPU_CLK_CTRL_VIG2,
DPU_CLK_CTRL_VIG3,
DPU_CLK_CTRL_VIG4,
DPU_CLK_CTRL_RGB0,
DPU_CLK_CTRL_RGB1,
DPU_CLK_CTRL_RGB2,
DPU_CLK_CTRL_RGB3,
DPU_CLK_CTRL_DMA0,
DPU_CLK_CTRL_DMA1,
DPU_CLK_CTRL_CURSOR0,
DPU_CLK_CTRL_CURSOR1,
DPU_CLK_CTRL_INLINE_ROT0_SSPP,
DPU_CLK_CTRL_MAX,
};
/* struct dpu_clk_ctrl_reg : Clock control register
* @reg_off: register offset
* @bit_off: bit offset
*/
struct dpu_clk_ctrl_reg {
u32 reg_off;
u32 bit_off;
};
/* struct dpu_mdp_cfg : MDP TOP-BLK instance info
* @id: index identifying this block
* @base: register base offset to mdss
* @features bit mask identifying sub-blocks/features
* @highest_bank_bit: UBWC parameter
* @ubwc_static: ubwc static configuration
* @ubwc_swizzle: ubwc default swizzle setting
* @has_dest_scaler: indicates support of destination scaler
* @clk_ctrls clock control register definition
*/
struct dpu_mdp_cfg {
DPU_HW_BLK_INFO;
u32 highest_bank_bit;
u32 ubwc_static;
u32 ubwc_swizzle;
bool has_dest_scaler;
struct dpu_clk_ctrl_reg clk_ctrls[DPU_CLK_CTRL_MAX];
};
/* struct dpu_mdp_cfg : MDP TOP-BLK instance info
* @id: index identifying this block
* @base: register base offset to mdss
* @features bit mask identifying sub-blocks/features
*/
struct dpu_ctl_cfg {
DPU_HW_BLK_INFO;
};
/**
* struct dpu_sspp_cfg - information of source pipes
* @id: index identifying this block
* @base register offset of this block
* @features bit mask identifying sub-blocks/features
* @sblk: SSPP sub-blocks information
* @xin_id: bus client identifier
* @clk_ctrl clock control identifier
* @type sspp type identifier
*/
struct dpu_sspp_cfg {
DPU_HW_BLK_INFO;
const struct dpu_sspp_sub_blks *sblk;
u32 xin_id;
enum dpu_clk_ctrl_type clk_ctrl;
u32 type;
};
/**
* struct dpu_lm_cfg - information of layer mixer blocks
* @id: index identifying this block
* @base register offset of this block
* @features bit mask identifying sub-blocks/features
* @sblk: LM Sub-blocks information
* @pingpong: ID of connected PingPong, PINGPONG_MAX if unsupported
* @ds: ID of connected DS, DS_MAX if unsupported
* @lm_pair_mask: Bitmask of LMs that can be controlled by same CTL
*/
struct dpu_lm_cfg {
DPU_HW_BLK_INFO;
const struct dpu_lm_sub_blks *sblk;
u32 pingpong;
u32 ds;
unsigned long lm_pair_mask;
};
/**
* struct dpu_ds_top_cfg - information of dest scaler top
* @id enum identifying this block
* @base register offset of this block
* @features bit mask identifying features
* @version hw version of dest scaler
* @maxinputwidth maximum input line width
* @maxoutputwidth maximum output line width
* @maxupscale maximum upscale ratio
*/
struct dpu_ds_top_cfg {
DPU_HW_BLK_INFO;
u32 version;
u32 maxinputwidth;
u32 maxoutputwidth;
u32 maxupscale;
};
/**
* struct dpu_ds_cfg - information of dest scaler blocks
* @id enum identifying this block
* @base register offset wrt DS top offset
* @features bit mask identifying features
* @version hw version of the qseed block
* @top DS top information
*/
struct dpu_ds_cfg {
DPU_HW_BLK_INFO;
u32 version;
const struct dpu_ds_top_cfg *top;
};
/**
* struct dpu_pingpong_cfg - information of PING-PONG blocks
* @id enum identifying this block
* @base register offset of this block
* @features bit mask identifying sub-blocks/features
* @sblk sub-blocks information
*/
struct dpu_pingpong_cfg {
DPU_HW_BLK_INFO;
const struct dpu_pingpong_sub_blks *sblk;
};
/**
* struct dpu_cdm_cfg - information of chroma down blocks
* @id enum identifying this block
* @base register offset of this block
* @features bit mask identifying sub-blocks/features
* @intf_connect Bitmask of INTF IDs this CDM can connect to
*/
struct dpu_cdm_cfg {
DPU_HW_BLK_INFO;
unsigned long intf_connect;
};
/**
* struct dpu_intf_cfg - information of timing engine blocks
* @id enum identifying this block
* @base register offset of this block
* @features bit mask identifying sub-blocks/features
* @type: Interface type(DSI, DP, HDMI)
* @controller_id: Controller Instance ID in case of multiple of intf type
* @prog_fetch_lines_worst_case Worst case latency num lines needed to prefetch
*/
struct dpu_intf_cfg {
DPU_HW_BLK_INFO;
u32 type; /* interface type*/
u32 controller_id;
u32 prog_fetch_lines_worst_case;
};
/**
* struct dpu_vbif_dynamic_ot_cfg - dynamic OT setting
* @pps pixel per seconds
* @ot_limit OT limit to use up to specified pixel per second
*/
struct dpu_vbif_dynamic_ot_cfg {
u64 pps;
u32 ot_limit;
};
/**
* struct dpu_vbif_dynamic_ot_tbl - dynamic OT setting table
* @count length of cfg
* @cfg pointer to array of configuration settings with
* ascending requirements
*/
struct dpu_vbif_dynamic_ot_tbl {
u32 count;
struct dpu_vbif_dynamic_ot_cfg *cfg;
};
/**
* struct dpu_vbif_qos_tbl - QoS priority table
* @npriority_lvl num of priority level
* @priority_lvl pointer to array of priority level in ascending order
*/
struct dpu_vbif_qos_tbl {
u32 npriority_lvl;
u32 *priority_lvl;
};
/**
* struct dpu_vbif_cfg - information of VBIF blocks
* @id enum identifying this block
* @base register offset of this block
* @features bit mask identifying sub-blocks/features
* @ot_rd_limit default OT read limit
* @ot_wr_limit default OT write limit
* @xin_halt_timeout maximum time (in usec) for xin to halt
* @dynamic_ot_rd_tbl dynamic OT read configuration table
* @dynamic_ot_wr_tbl dynamic OT write configuration table
* @qos_rt_tbl real-time QoS priority table
* @qos_nrt_tbl non-real-time QoS priority table
* @memtype_count number of defined memtypes
* @memtype array of xin memtype definitions
*/
struct dpu_vbif_cfg {
DPU_HW_BLK_INFO;
u32 default_ot_rd_limit;
u32 default_ot_wr_limit;
u32 xin_halt_timeout;
struct dpu_vbif_dynamic_ot_tbl dynamic_ot_rd_tbl;
struct dpu_vbif_dynamic_ot_tbl dynamic_ot_wr_tbl;
struct dpu_vbif_qos_tbl qos_rt_tbl;
struct dpu_vbif_qos_tbl qos_nrt_tbl;
u32 memtype_count;
u32 memtype[MAX_XIN_COUNT];
};
/**
* struct dpu_reg_dma_cfg - information of lut dma blocks
* @id enum identifying this block
* @base register offset of this block
* @features bit mask identifying sub-blocks/features
* @version version of lutdma hw block
* @trigger_sel_off offset to trigger select registers of lutdma
*/
struct dpu_reg_dma_cfg {
DPU_HW_BLK_INFO;
u32 version;
u32 trigger_sel_off;
};
/**
* Define CDP use cases
* @DPU_PERF_CDP_UDAGE_RT: real-time use cases
* @DPU_PERF_CDP_USAGE_NRT: non real-time use cases such as WFD
*/
enum {
DPU_PERF_CDP_USAGE_RT,
DPU_PERF_CDP_USAGE_NRT,
DPU_PERF_CDP_USAGE_MAX
};
/**
* struct dpu_perf_cdp_cfg - define CDP use case configuration
* @rd_enable: true if read pipe CDP is enabled
* @wr_enable: true if write pipe CDP is enabled
*/
struct dpu_perf_cdp_cfg {
bool rd_enable;
bool wr_enable;
};
/**
* struct dpu_perf_cfg - performance control settings
* @max_bw_low low threshold of maximum bandwidth (kbps)
* @max_bw_high high threshold of maximum bandwidth (kbps)
* @min_core_ib minimum bandwidth for core (kbps)
* @min_core_ib minimum mnoc ib vote in kbps
* @min_llcc_ib minimum llcc ib vote in kbps
* @min_dram_ib minimum dram ib vote in kbps
* @core_ib_ff core instantaneous bandwidth fudge factor
* @core_clk_ff core clock fudge factor
* @comp_ratio_rt string of 0 or more of <fourcc>/<ven>/<mod>/<comp ratio>
* @comp_ratio_nrt string of 0 or more of <fourcc>/<ven>/<mod>/<comp ratio>
* @undersized_prefill_lines undersized prefill in lines
* @xtra_prefill_lines extra prefill latency in lines
* @dest_scale_prefill_lines destination scaler latency in lines
* @macrotile_perfill_lines macrotile latency in lines
* @yuv_nv12_prefill_lines yuv_nv12 latency in lines
* @linear_prefill_lines linear latency in lines
* @downscaling_prefill_lines downscaling latency in lines
* @amortizable_theshold minimum y position for traffic shaping prefill
* @min_prefill_lines minimum pipeline latency in lines
* @safe_lut_tbl: LUT tables for safe signals
* @danger_lut_tbl: LUT tables for danger signals
* @qos_lut_tbl: LUT tables for QoS signals
* @cdp_cfg cdp use case configurations
*/
struct dpu_perf_cfg {
u32 max_bw_low;
u32 max_bw_high;
u32 min_core_ib;
u32 min_llcc_ib;
u32 min_dram_ib;
const char *core_ib_ff;
const char *core_clk_ff;
const char *comp_ratio_rt;
const char *comp_ratio_nrt;
u32 undersized_prefill_lines;
u32 xtra_prefill_lines;
u32 dest_scale_prefill_lines;
u32 macrotile_prefill_lines;
u32 yuv_nv12_prefill_lines;
u32 linear_prefill_lines;
u32 downscaling_prefill_lines;
u32 amortizable_threshold;
u32 min_prefill_lines;
u32 safe_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
u32 danger_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
struct dpu_qos_lut_tbl qos_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
struct dpu_perf_cdp_cfg cdp_cfg[DPU_PERF_CDP_USAGE_MAX];
};
/**
* struct dpu_mdss_cfg - information of MDSS HW
* This is the main catalog data structure representing
* this HW version. Contains number of instances,
* register offsets, capabilities of the all MDSS HW sub-blocks.
*
* @dma_formats Supported formats for dma pipe
* @cursor_formats Supported formats for cursor pipe
* @vig_formats Supported formats for vig pipe
*/
struct dpu_mdss_cfg {
u32 hwversion;
const struct dpu_caps *caps;
u32 mdp_count;
struct dpu_mdp_cfg *mdp;
u32 ctl_count;
struct dpu_ctl_cfg *ctl;
u32 sspp_count;
struct dpu_sspp_cfg *sspp;
u32 mixer_count;
struct dpu_lm_cfg *mixer;
u32 ds_count;
struct dpu_ds_cfg *ds;
u32 pingpong_count;
struct dpu_pingpong_cfg *pingpong;
u32 cdm_count;
struct dpu_cdm_cfg *cdm;
u32 intf_count;
struct dpu_intf_cfg *intf;
u32 vbif_count;
struct dpu_vbif_cfg *vbif;
u32 reg_dma_count;
struct dpu_reg_dma_cfg dma_cfg;
u32 ad_count;
/* Add additional block data structures here */
struct dpu_perf_cfg perf;
struct dpu_format_extended *dma_formats;
struct dpu_format_extended *cursor_formats;
struct dpu_format_extended *vig_formats;
};
struct dpu_mdss_hw_cfg_handler {
u32 hw_rev;
void (*cfg_init)(struct dpu_mdss_cfg *dpu_cfg);
};
/*
* Access Macros
*/
#define BLK_MDP(s) ((s)->mdp)
#define BLK_CTL(s) ((s)->ctl)
#define BLK_VIG(s) ((s)->vig)
#define BLK_RGB(s) ((s)->rgb)
#define BLK_DMA(s) ((s)->dma)
#define BLK_CURSOR(s) ((s)->cursor)
#define BLK_MIXER(s) ((s)->mixer)
#define BLK_DS(s) ((s)->ds)
#define BLK_PINGPONG(s) ((s)->pingpong)
#define BLK_CDM(s) ((s)->cdm)
#define BLK_INTF(s) ((s)->intf)
#define BLK_AD(s) ((s)->ad)
/**
* dpu_hw_catalog_init - dpu hardware catalog init API retrieves
* hardcoded target specific catalog information in config structure
* @hw_rev: caller needs provide the hardware revision.
*
* Return: dpu config structure
*/
struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev);
/**
* dpu_hw_catalog_deinit - dpu hardware catalog cleanup
* @dpu_cfg: pointer returned from init function
*/
void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg);
/**
* dpu_hw_sspp_multirect_enabled - check multirect enabled for the sspp
* @cfg: pointer to sspp cfg
*/
static inline bool dpu_hw_sspp_multirect_enabled(const struct dpu_sspp_cfg *cfg)
{
return test_bit(DPU_SSPP_SMART_DMA_V1, &cfg->features) ||
test_bit(DPU_SSPP_SMART_DMA_V2, &cfg->features);
}
#endif /* _DPU_HW_CATALOG_H */

View File

@ -0,0 +1,168 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "dpu_hw_mdss.h"
static const struct dpu_format_extended plane_formats[] = {
{DRM_FORMAT_ARGB8888, 0},
{DRM_FORMAT_ABGR8888, 0},
{DRM_FORMAT_RGBA8888, 0},
{DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
{DRM_FORMAT_BGRA8888, 0},
{DRM_FORMAT_XRGB8888, 0},
{DRM_FORMAT_RGBX8888, 0},
{DRM_FORMAT_BGRX8888, 0},
{DRM_FORMAT_XBGR8888, 0},
{DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
{DRM_FORMAT_RGB888, 0},
{DRM_FORMAT_BGR888, 0},
{DRM_FORMAT_RGB565, 0},
{DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
{DRM_FORMAT_BGR565, 0},
{DRM_FORMAT_ARGB1555, 0},
{DRM_FORMAT_ABGR1555, 0},
{DRM_FORMAT_RGBA5551, 0},
{DRM_FORMAT_BGRA5551, 0},
{DRM_FORMAT_XRGB1555, 0},
{DRM_FORMAT_XBGR1555, 0},
{DRM_FORMAT_RGBX5551, 0},
{DRM_FORMAT_BGRX5551, 0},
{DRM_FORMAT_ARGB4444, 0},
{DRM_FORMAT_ABGR4444, 0},
{DRM_FORMAT_RGBA4444, 0},
{DRM_FORMAT_BGRA4444, 0},
{DRM_FORMAT_XRGB4444, 0},
{DRM_FORMAT_XBGR4444, 0},
{DRM_FORMAT_RGBX4444, 0},
{DRM_FORMAT_BGRX4444, 0},
{0, 0},
};
static const struct dpu_format_extended plane_formats_yuv[] = {
{DRM_FORMAT_ARGB8888, 0},
{DRM_FORMAT_ABGR8888, 0},
{DRM_FORMAT_RGBA8888, 0},
{DRM_FORMAT_BGRX8888, 0},
{DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
{DRM_FORMAT_BGRA8888, 0},
{DRM_FORMAT_XRGB8888, 0},
{DRM_FORMAT_XBGR8888, 0},
{DRM_FORMAT_RGBX8888, 0},
{DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
{DRM_FORMAT_RGB888, 0},
{DRM_FORMAT_BGR888, 0},
{DRM_FORMAT_RGB565, 0},
{DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
{DRM_FORMAT_BGR565, 0},
{DRM_FORMAT_ARGB1555, 0},
{DRM_FORMAT_ABGR1555, 0},
{DRM_FORMAT_RGBA5551, 0},
{DRM_FORMAT_BGRA5551, 0},
{DRM_FORMAT_XRGB1555, 0},
{DRM_FORMAT_XBGR1555, 0},
{DRM_FORMAT_RGBX5551, 0},
{DRM_FORMAT_BGRX5551, 0},
{DRM_FORMAT_ARGB4444, 0},
{DRM_FORMAT_ABGR4444, 0},
{DRM_FORMAT_RGBA4444, 0},
{DRM_FORMAT_BGRA4444, 0},
{DRM_FORMAT_XRGB4444, 0},
{DRM_FORMAT_XBGR4444, 0},
{DRM_FORMAT_RGBX4444, 0},
{DRM_FORMAT_BGRX4444, 0},
{DRM_FORMAT_NV12, 0},
{DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
{DRM_FORMAT_NV21, 0},
{DRM_FORMAT_NV16, 0},
{DRM_FORMAT_NV61, 0},
{DRM_FORMAT_VYUY, 0},
{DRM_FORMAT_UYVY, 0},
{DRM_FORMAT_YUYV, 0},
{DRM_FORMAT_YVYU, 0},
{DRM_FORMAT_YUV420, 0},
{DRM_FORMAT_YVU420, 0},
{0, 0},
};
static const struct dpu_format_extended cursor_formats[] = {
{DRM_FORMAT_ARGB8888, 0},
{DRM_FORMAT_ABGR8888, 0},
{DRM_FORMAT_RGBA8888, 0},
{DRM_FORMAT_BGRA8888, 0},
{DRM_FORMAT_XRGB8888, 0},
{DRM_FORMAT_ARGB1555, 0},
{DRM_FORMAT_ABGR1555, 0},
{DRM_FORMAT_RGBA5551, 0},
{DRM_FORMAT_BGRA5551, 0},
{DRM_FORMAT_ARGB4444, 0},
{DRM_FORMAT_ABGR4444, 0},
{DRM_FORMAT_RGBA4444, 0},
{DRM_FORMAT_BGRA4444, 0},
{0, 0},
};
static const struct dpu_format_extended wb2_formats[] = {
{DRM_FORMAT_RGB565, 0},
{DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
{DRM_FORMAT_RGB888, 0},
{DRM_FORMAT_ARGB8888, 0},
{DRM_FORMAT_RGBA8888, 0},
{DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
{DRM_FORMAT_XRGB8888, 0},
{DRM_FORMAT_RGBX8888, 0},
{DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
{DRM_FORMAT_ARGB1555, 0},
{DRM_FORMAT_RGBA5551, 0},
{DRM_FORMAT_XRGB1555, 0},
{DRM_FORMAT_RGBX5551, 0},
{DRM_FORMAT_ARGB4444, 0},
{DRM_FORMAT_RGBA4444, 0},
{DRM_FORMAT_RGBX4444, 0},
{DRM_FORMAT_XRGB4444, 0},
{DRM_FORMAT_BGR565, 0},
{DRM_FORMAT_BGR888, 0},
{DRM_FORMAT_ABGR8888, 0},
{DRM_FORMAT_BGRA8888, 0},
{DRM_FORMAT_BGRX8888, 0},
{DRM_FORMAT_XBGR8888, 0},
{DRM_FORMAT_ABGR1555, 0},
{DRM_FORMAT_BGRA5551, 0},
{DRM_FORMAT_XBGR1555, 0},
{DRM_FORMAT_BGRX5551, 0},
{DRM_FORMAT_ABGR4444, 0},
{DRM_FORMAT_BGRA4444, 0},
{DRM_FORMAT_BGRX4444, 0},
{DRM_FORMAT_XBGR4444, 0},
{DRM_FORMAT_YUV420, 0},
{DRM_FORMAT_NV12, 0},
{DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
{DRM_FORMAT_NV16, 0},
{DRM_FORMAT_YUYV, 0},
{0, 0},
};
static const struct dpu_format_extended rgb_10bit_formats[] = {
{DRM_FORMAT_BGRA1010102, 0},
{DRM_FORMAT_BGRX1010102, 0},
{DRM_FORMAT_RGBA1010102, 0},
{DRM_FORMAT_RGBX1010102, 0},
{DRM_FORMAT_ABGR2101010, 0},
{DRM_FORMAT_ABGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED},
{DRM_FORMAT_XBGR2101010, 0},
{DRM_FORMAT_XBGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED},
{DRM_FORMAT_ARGB2101010, 0},
{DRM_FORMAT_XRGB2101010, 0},
};

View File

@ -0,0 +1,323 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "dpu_hw_mdss.h"
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_cdm.h"
#include "dpu_dbg.h"
#include "dpu_kms.h"
#define CDM_CSC_10_OPMODE 0x000
#define CDM_CSC_10_BASE 0x004
#define CDM_CDWN2_OP_MODE 0x100
#define CDM_CDWN2_CLAMP_OUT 0x104
#define CDM_CDWN2_PARAMS_3D_0 0x108
#define CDM_CDWN2_PARAMS_3D_1 0x10C
#define CDM_CDWN2_COEFF_COSITE_H_0 0x110
#define CDM_CDWN2_COEFF_COSITE_H_1 0x114
#define CDM_CDWN2_COEFF_COSITE_H_2 0x118
#define CDM_CDWN2_COEFF_OFFSITE_H_0 0x11C
#define CDM_CDWN2_COEFF_OFFSITE_H_1 0x120
#define CDM_CDWN2_COEFF_OFFSITE_H_2 0x124
#define CDM_CDWN2_COEFF_COSITE_V 0x128
#define CDM_CDWN2_COEFF_OFFSITE_V 0x12C
#define CDM_CDWN2_OUT_SIZE 0x130
#define CDM_HDMI_PACK_OP_MODE 0x200
#define CDM_CSC_10_MATRIX_COEFF_0 0x004
/**
* Horizontal coefficients for cosite chroma downscale
* s13 representation of coefficients
*/
static u32 cosite_h_coeff[] = {0x00000016, 0x000001cc, 0x0100009e};
/**
* Horizontal coefficients for offsite chroma downscale
*/
static u32 offsite_h_coeff[] = {0x000b0005, 0x01db01eb, 0x00e40046};
/**
* Vertical coefficients for cosite chroma downscale
*/
static u32 cosite_v_coeff[] = {0x00080004};
/**
* Vertical coefficients for offsite chroma downscale
*/
static u32 offsite_v_coeff[] = {0x00060002};
/* Limited Range rgb2yuv coeff with clamp and bias values for CSC 10 module */
static struct dpu_csc_cfg rgb2yuv_cfg = {
{
0x0083, 0x0102, 0x0032,
0x1fb5, 0x1f6c, 0x00e1,
0x00e1, 0x1f45, 0x1fdc
},
{ 0x00, 0x00, 0x00 },
{ 0x0040, 0x0200, 0x0200 },
{ 0x000, 0x3ff, 0x000, 0x3ff, 0x000, 0x3ff },
{ 0x040, 0x3ac, 0x040, 0x3c0, 0x040, 0x3c0 },
};
static struct dpu_cdm_cfg *_cdm_offset(enum dpu_cdm cdm,
struct dpu_mdss_cfg *m,
void __iomem *addr,
struct dpu_hw_blk_reg_map *b)
{
int i;
for (i = 0; i < m->cdm_count; i++) {
if (cdm == m->cdm[i].id) {
b->base_off = addr;
b->blk_off = m->cdm[i].base;
b->length = m->cdm[i].len;
b->hwversion = m->hwversion;
b->log_mask = DPU_DBG_MASK_CDM;
return &m->cdm[i];
}
}
return ERR_PTR(-EINVAL);
}
static int dpu_hw_cdm_setup_csc_10bit(struct dpu_hw_cdm *ctx,
struct dpu_csc_cfg *data)
{
dpu_hw_csc_setup(&ctx->hw, CDM_CSC_10_MATRIX_COEFF_0, data, true);
return 0;
}
static int dpu_hw_cdm_setup_cdwn(struct dpu_hw_cdm *ctx,
struct dpu_hw_cdm_cfg *cfg)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 opmode = 0;
u32 out_size = 0;
if (cfg->output_bit_depth == CDM_CDWN_OUTPUT_10BIT)
opmode &= ~BIT(7);
else
opmode |= BIT(7);
/* ENABLE DWNS_H bit */
opmode |= BIT(1);
switch (cfg->h_cdwn_type) {
case CDM_CDWN_DISABLE:
/* CLEAR METHOD_H field */
opmode &= ~(0x18);
/* CLEAR DWNS_H bit */
opmode &= ~BIT(1);
break;
case CDM_CDWN_PIXEL_DROP:
/* Clear METHOD_H field (pixel drop is 0) */
opmode &= ~(0x18);
break;
case CDM_CDWN_AVG:
/* Clear METHOD_H field (Average is 0x1) */
opmode &= ~(0x18);
opmode |= (0x1 << 0x3);
break;
case CDM_CDWN_COSITE:
/* Clear METHOD_H field (Average is 0x2) */
opmode &= ~(0x18);
opmode |= (0x2 << 0x3);
/* Co-site horizontal coefficients */
DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_0,
cosite_h_coeff[0]);
DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_1,
cosite_h_coeff[1]);
DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_2,
cosite_h_coeff[2]);
break;
case CDM_CDWN_OFFSITE:
/* Clear METHOD_H field (Average is 0x3) */
opmode &= ~(0x18);
opmode |= (0x3 << 0x3);
/* Off-site horizontal coefficients */
DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_0,
offsite_h_coeff[0]);
DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_1,
offsite_h_coeff[1]);
DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_2,
offsite_h_coeff[2]);
break;
default:
pr_err("%s invalid horz down sampling type\n", __func__);
return -EINVAL;
}
/* ENABLE DWNS_V bit */
opmode |= BIT(2);
switch (cfg->v_cdwn_type) {
case CDM_CDWN_DISABLE:
/* CLEAR METHOD_V field */
opmode &= ~(0x60);
/* CLEAR DWNS_V bit */
opmode &= ~BIT(2);
break;
case CDM_CDWN_PIXEL_DROP:
/* Clear METHOD_V field (pixel drop is 0) */
opmode &= ~(0x60);
break;
case CDM_CDWN_AVG:
/* Clear METHOD_V field (Average is 0x1) */
opmode &= ~(0x60);
opmode |= (0x1 << 0x5);
break;
case CDM_CDWN_COSITE:
/* Clear METHOD_V field (Average is 0x2) */
opmode &= ~(0x60);
opmode |= (0x2 << 0x5);
/* Co-site vertical coefficients */
DPU_REG_WRITE(c,
CDM_CDWN2_COEFF_COSITE_V,
cosite_v_coeff[0]);
break;
case CDM_CDWN_OFFSITE:
/* Clear METHOD_V field (Average is 0x3) */
opmode &= ~(0x60);
opmode |= (0x3 << 0x5);
/* Off-site vertical coefficients */
DPU_REG_WRITE(c,
CDM_CDWN2_COEFF_OFFSITE_V,
offsite_v_coeff[0]);
break;
default:
return -EINVAL;
}
if (cfg->v_cdwn_type || cfg->h_cdwn_type)
opmode |= BIT(0); /* EN CDWN module */
else
opmode &= ~BIT(0);
out_size = (cfg->output_width & 0xFFFF) |
((cfg->output_height & 0xFFFF) << 16);
DPU_REG_WRITE(c, CDM_CDWN2_OUT_SIZE, out_size);
DPU_REG_WRITE(c, CDM_CDWN2_OP_MODE, opmode);
DPU_REG_WRITE(c, CDM_CDWN2_CLAMP_OUT,
((0x3FF << 16) | 0x0));
return 0;
}
int dpu_hw_cdm_enable(struct dpu_hw_cdm *ctx,
struct dpu_hw_cdm_cfg *cdm)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
const struct dpu_format *fmt = cdm->output_fmt;
struct cdm_output_cfg cdm_cfg = { 0 };
u32 opmode = 0;
u32 csc = 0;
if (!DPU_FORMAT_IS_YUV(fmt))
return -EINVAL;
if (cdm->output_type == CDM_CDWN_OUTPUT_HDMI) {
if (fmt->chroma_sample != DPU_CHROMA_H1V2)
return -EINVAL; /*unsupported format */
opmode = BIT(0);
opmode |= (fmt->chroma_sample << 1);
cdm_cfg.intf_en = true;
}
csc |= BIT(2);
csc &= ~BIT(1);
csc |= BIT(0);
if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
DPU_REG_WRITE(c, CDM_CSC_10_OPMODE, csc);
DPU_REG_WRITE(c, CDM_HDMI_PACK_OP_MODE, opmode);
return 0;
}
void dpu_hw_cdm_disable(struct dpu_hw_cdm *ctx)
{
struct cdm_output_cfg cdm_cfg = { 0 };
if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
}
static void _setup_cdm_ops(struct dpu_hw_cdm_ops *ops,
unsigned long features)
{
ops->setup_csc_data = dpu_hw_cdm_setup_csc_10bit;
ops->setup_cdwn = dpu_hw_cdm_setup_cdwn;
ops->enable = dpu_hw_cdm_enable;
ops->disable = dpu_hw_cdm_disable;
}
static struct dpu_hw_blk_ops dpu_hw_ops = {
.start = NULL,
.stop = NULL,
};
struct dpu_hw_cdm *dpu_hw_cdm_init(enum dpu_cdm idx,
void __iomem *addr,
struct dpu_mdss_cfg *m,
struct dpu_hw_mdp *hw_mdp)
{
struct dpu_hw_cdm *c;
struct dpu_cdm_cfg *cfg;
int rc;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
cfg = _cdm_offset(idx, m, addr, &c->hw);
if (IS_ERR_OR_NULL(cfg)) {
kfree(c);
return ERR_PTR(-EINVAL);
}
c->idx = idx;
c->caps = cfg;
_setup_cdm_ops(&c->ops, c->caps->features);
c->hw_mdp = hw_mdp;
rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CDM, idx, &dpu_hw_ops);
if (rc) {
DPU_ERROR("failed to init hw blk %d\n", rc);
goto blk_init_error;
}
/*
* Perform any default initialization for the chroma down module
* @setup default csc coefficients
*/
dpu_hw_cdm_setup_csc_10bit(c, &rgb2yuv_cfg);
return c;
blk_init_error:
kzfree(c);
return ERR_PTR(rc);
}
void dpu_hw_cdm_destroy(struct dpu_hw_cdm *cdm)
{
if (cdm)
dpu_hw_blk_destroy(&cdm->base);
kfree(cdm);
}

View File

@ -0,0 +1,139 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _DPU_HW_CDM_H
#define _DPU_HW_CDM_H
#include "dpu_hw_mdss.h"
#include "dpu_hw_top.h"
#include "dpu_hw_blk.h"
struct dpu_hw_cdm;
struct dpu_hw_cdm_cfg {
u32 output_width;
u32 output_height;
u32 output_bit_depth;
u32 h_cdwn_type;
u32 v_cdwn_type;
const struct dpu_format *output_fmt;
u32 output_type;
int flags;
};
enum dpu_hw_cdwn_type {
CDM_CDWN_DISABLE,
CDM_CDWN_PIXEL_DROP,
CDM_CDWN_AVG,
CDM_CDWN_COSITE,
CDM_CDWN_OFFSITE,
};
enum dpu_hw_cdwn_output_type {
CDM_CDWN_OUTPUT_HDMI,
CDM_CDWN_OUTPUT_WB,
};
enum dpu_hw_cdwn_output_bit_depth {
CDM_CDWN_OUTPUT_8BIT,
CDM_CDWN_OUTPUT_10BIT,
};
/**
* struct dpu_hw_cdm_ops : Interface to the chroma down Hw driver functions
* Assumption is these functions will be called after
* clocks are enabled
* @setup_csc: Programs the csc matrix
* @setup_cdwn: Sets up the chroma down sub module
* @enable: Enables the output to interface and programs the
* output packer
* @disable: Puts the cdm in bypass mode
*/
struct dpu_hw_cdm_ops {
/**
* Programs the CSC matrix for conversion from RGB space to YUV space,
* it is optional to call this function as this matrix is automatically
* set during initialization, user should call this if it wants
* to program a different matrix than default matrix.
* @cdm: Pointer to the chroma down context structure
* @data Pointer to CSC configuration data
* return: 0 if success; error code otherwise
*/
int (*setup_csc_data)(struct dpu_hw_cdm *cdm,
struct dpu_csc_cfg *data);
/**
* Programs the Chroma downsample part.
* @cdm Pointer to chroma down context
*/
int (*setup_cdwn)(struct dpu_hw_cdm *cdm,
struct dpu_hw_cdm_cfg *cfg);
/**
* Enable the CDM module
* @cdm Pointer to chroma down context
*/
int (*enable)(struct dpu_hw_cdm *cdm,
struct dpu_hw_cdm_cfg *cfg);
/**
* Disable the CDM module
* @cdm Pointer to chroma down context
*/
void (*disable)(struct dpu_hw_cdm *cdm);
};
struct dpu_hw_cdm {
struct dpu_hw_blk base;
struct dpu_hw_blk_reg_map hw;
/* chroma down */
const struct dpu_cdm_cfg *caps;
enum dpu_cdm idx;
/* mdp top hw driver */
struct dpu_hw_mdp *hw_mdp;
/* ops */
struct dpu_hw_cdm_ops ops;
};
/**
* dpu_hw_cdm - convert base object dpu_hw_base to container
* @hw: Pointer to base hardware block
* return: Pointer to hardware block container
*/
static inline struct dpu_hw_cdm *to_dpu_hw_cdm(struct dpu_hw_blk *hw)
{
return container_of(hw, struct dpu_hw_cdm, base);
}
/**
* dpu_hw_cdm_init - initializes the cdm hw driver object.
* should be called once before accessing every cdm.
* @idx: cdm index for which driver object is required
* @addr: mapped register io address of MDP
* @m : pointer to mdss catalog data
* @hw_mdp: pointer to mdp top hw driver object
*/
struct dpu_hw_cdm *dpu_hw_cdm_init(enum dpu_cdm idx,
void __iomem *addr,
struct dpu_mdss_cfg *m,
struct dpu_hw_mdp *hw_mdp);
/**
* dpu_hw_cdm_destroy - destroys CDM driver context
* @cdm: pointer to CDM driver context
*/
void dpu_hw_cdm_destroy(struct dpu_hw_cdm *cdm);
#endif /*_DPU_HW_CDM_H */

View File

@ -0,0 +1,540 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/delay.h>
#include "dpu_hwio.h"
#include "dpu_hw_ctl.h"
#include "dpu_dbg.h"
#include "dpu_kms.h"
#define CTL_LAYER(lm) \
(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
#define CTL_LAYER_EXT(lm) \
(0x40 + (((lm) - LM_0) * 0x004))
#define CTL_LAYER_EXT2(lm) \
(0x70 + (((lm) - LM_0) * 0x004))
#define CTL_LAYER_EXT3(lm) \
(0xA0 + (((lm) - LM_0) * 0x004))
#define CTL_TOP 0x014
#define CTL_FLUSH 0x018
#define CTL_START 0x01C
#define CTL_PREPARE 0x0d0
#define CTL_SW_RESET 0x030
#define CTL_LAYER_EXTN_OFFSET 0x40
#define CTL_MIXER_BORDER_OUT BIT(24)
#define CTL_FLUSH_MASK_CTL BIT(17)
#define DPU_REG_RESET_TIMEOUT_US 2000
static struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
struct dpu_mdss_cfg *m,
void __iomem *addr,
struct dpu_hw_blk_reg_map *b)
{
int i;
for (i = 0; i < m->ctl_count; i++) {
if (ctl == m->ctl[i].id) {
b->base_off = addr;
b->blk_off = m->ctl[i].base;
b->length = m->ctl[i].len;
b->hwversion = m->hwversion;
b->log_mask = DPU_DBG_MASK_CTL;
return &m->ctl[i];
}
}
return ERR_PTR(-ENOMEM);
}
static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
enum dpu_lm lm)
{
int i;
int stages = -EINVAL;
for (i = 0; i < count; i++) {
if (lm == mixer[i].id) {
stages = mixer[i].sblk->maxblendstages;
break;
}
}
return stages;
}
static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
{
DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
}
static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
{
DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
}
static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
{
ctx->pending_flush_mask = 0x0;
}
static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
u32 flushbits)
{
ctx->pending_flush_mask |= flushbits;
}
static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
{
if (!ctx)
return 0x0;
return ctx->pending_flush_mask;
}
static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
{
DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
}
static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
return DPU_REG_READ(c, CTL_FLUSH);
}
static inline uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
enum dpu_sspp sspp)
{
uint32_t flushbits = 0;
switch (sspp) {
case SSPP_VIG0:
flushbits = BIT(0);
break;
case SSPP_VIG1:
flushbits = BIT(1);
break;
case SSPP_VIG2:
flushbits = BIT(2);
break;
case SSPP_VIG3:
flushbits = BIT(18);
break;
case SSPP_RGB0:
flushbits = BIT(3);
break;
case SSPP_RGB1:
flushbits = BIT(4);
break;
case SSPP_RGB2:
flushbits = BIT(5);
break;
case SSPP_RGB3:
flushbits = BIT(19);
break;
case SSPP_DMA0:
flushbits = BIT(11);
break;
case SSPP_DMA1:
flushbits = BIT(12);
break;
case SSPP_DMA2:
flushbits = BIT(24);
break;
case SSPP_DMA3:
flushbits = BIT(25);
break;
case SSPP_CURSOR0:
flushbits = BIT(22);
break;
case SSPP_CURSOR1:
flushbits = BIT(23);
break;
default:
break;
}
return flushbits;
}
static inline uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
enum dpu_lm lm)
{
uint32_t flushbits = 0;
switch (lm) {
case LM_0:
flushbits = BIT(6);
break;
case LM_1:
flushbits = BIT(7);
break;
case LM_2:
flushbits = BIT(8);
break;
case LM_3:
flushbits = BIT(9);
break;
case LM_4:
flushbits = BIT(10);
break;
case LM_5:
flushbits = BIT(20);
break;
default:
return -EINVAL;
}
flushbits |= CTL_FLUSH_MASK_CTL;
return flushbits;
}
static inline int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
u32 *flushbits, enum dpu_intf intf)
{
switch (intf) {
case INTF_0:
*flushbits |= BIT(31);
break;
case INTF_1:
*flushbits |= BIT(30);
break;
case INTF_2:
*flushbits |= BIT(29);
break;
case INTF_3:
*flushbits |= BIT(28);
break;
default:
return -EINVAL;
}
return 0;
}
static inline int dpu_hw_ctl_get_bitmask_cdm(struct dpu_hw_ctl *ctx,
u32 *flushbits, enum dpu_cdm cdm)
{
switch (cdm) {
case CDM_0:
*flushbits |= BIT(26);
break;
default:
return -EINVAL;
}
return 0;
}
static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
ktime_t timeout;
u32 status;
timeout = ktime_add_us(ktime_get(), timeout_us);
/*
* it takes around 30us to have mdp finish resetting its ctl path
* poll every 50us so that reset should be completed at 1st poll
*/
do {
status = DPU_REG_READ(c, CTL_SW_RESET);
status &= 0x1;
if (status)
usleep_range(20, 50);
} while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
return status;
}
static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
return -EINVAL;
return 0;
}
static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 status;
status = DPU_REG_READ(c, CTL_SW_RESET);
status &= 0x01;
if (!status)
return 0;
pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
return -EINVAL;
}
return 0;
}
static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
int i;
for (i = 0; i < ctx->mixer_count; i++) {
DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0);
DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0);
DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0);
}
}
static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
int i, j;
u8 stages;
int pipes_per_stage;
stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
if (stages < 0)
return;
if (test_bit(DPU_MIXER_SOURCESPLIT,
&ctx->mixer_hw_caps->features))
pipes_per_stage = PIPES_PER_STAGE;
else
pipes_per_stage = 1;
mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
if (!stage_cfg)
goto exit;
for (i = 0; i <= stages; i++) {
/* overflow to ext register if 'i + 1 > 7' */
mix = (i + 1) & 0x7;
ext = i >= 7;
for (j = 0 ; j < pipes_per_stage; j++) {
enum dpu_sspp_multirect_index rect_index =
stage_cfg->multirect_index[i][j];
switch (stage_cfg->stage[i][j]) {
case SSPP_VIG0:
if (rect_index == DPU_SSPP_RECT_1) {
mixercfg_ext3 |= ((i + 1) & 0xF) << 0;
} else {
mixercfg |= mix << 0;
mixercfg_ext |= ext << 0;
}
break;
case SSPP_VIG1:
if (rect_index == DPU_SSPP_RECT_1) {
mixercfg_ext3 |= ((i + 1) & 0xF) << 4;
} else {
mixercfg |= mix << 3;
mixercfg_ext |= ext << 2;
}
break;
case SSPP_VIG2:
if (rect_index == DPU_SSPP_RECT_1) {
mixercfg_ext3 |= ((i + 1) & 0xF) << 8;
} else {
mixercfg |= mix << 6;
mixercfg_ext |= ext << 4;
}
break;
case SSPP_VIG3:
if (rect_index == DPU_SSPP_RECT_1) {
mixercfg_ext3 |= ((i + 1) & 0xF) << 12;
} else {
mixercfg |= mix << 26;
mixercfg_ext |= ext << 6;
}
break;
case SSPP_RGB0:
mixercfg |= mix << 9;
mixercfg_ext |= ext << 8;
break;
case SSPP_RGB1:
mixercfg |= mix << 12;
mixercfg_ext |= ext << 10;
break;
case SSPP_RGB2:
mixercfg |= mix << 15;
mixercfg_ext |= ext << 12;
break;
case SSPP_RGB3:
mixercfg |= mix << 29;
mixercfg_ext |= ext << 14;
break;
case SSPP_DMA0:
if (rect_index == DPU_SSPP_RECT_1) {
mixercfg_ext2 |= ((i + 1) & 0xF) << 8;
} else {
mixercfg |= mix << 18;
mixercfg_ext |= ext << 16;
}
break;
case SSPP_DMA1:
if (rect_index == DPU_SSPP_RECT_1) {
mixercfg_ext2 |= ((i + 1) & 0xF) << 12;
} else {
mixercfg |= mix << 21;
mixercfg_ext |= ext << 18;
}
break;
case SSPP_DMA2:
if (rect_index == DPU_SSPP_RECT_1) {
mixercfg_ext2 |= ((i + 1) & 0xF) << 16;
} else {
mix |= (i + 1) & 0xF;
mixercfg_ext2 |= mix << 0;
}
break;
case SSPP_DMA3:
if (rect_index == DPU_SSPP_RECT_1) {
mixercfg_ext2 |= ((i + 1) & 0xF) << 20;
} else {
mix |= (i + 1) & 0xF;
mixercfg_ext2 |= mix << 4;
}
break;
case SSPP_CURSOR0:
mixercfg_ext |= ((i + 1) & 0xF) << 20;
break;
case SSPP_CURSOR1:
mixercfg_ext |= ((i + 1) & 0xF) << 26;
break;
default:
break;
}
}
}
exit:
DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
}
static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
struct dpu_hw_intf_cfg *cfg)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 intf_cfg = 0;
intf_cfg |= (cfg->intf & 0xF) << 4;
if (cfg->mode_3d) {
intf_cfg |= BIT(19);
intf_cfg |= (cfg->mode_3d - 0x1) << 20;
}
switch (cfg->intf_mode_sel) {
case DPU_CTL_MODE_SEL_VID:
intf_cfg &= ~BIT(17);
intf_cfg &= ~(0x3 << 15);
break;
case DPU_CTL_MODE_SEL_CMD:
intf_cfg |= BIT(17);
intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
break;
default:
pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
return;
}
DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
}
static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
unsigned long cap)
{
ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
ops->trigger_flush = dpu_hw_ctl_trigger_flush;
ops->get_flush_register = dpu_hw_ctl_get_flush_register;
ops->trigger_start = dpu_hw_ctl_trigger_start;
ops->trigger_pending = dpu_hw_ctl_trigger_pending;
ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
ops->reset = dpu_hw_ctl_reset_control;
ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
ops->get_bitmask_cdm = dpu_hw_ctl_get_bitmask_cdm;
};
static struct dpu_hw_blk_ops dpu_hw_ops = {
.start = NULL,
.stop = NULL,
};
struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
void __iomem *addr,
struct dpu_mdss_cfg *m)
{
struct dpu_hw_ctl *c;
struct dpu_ctl_cfg *cfg;
int rc;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
cfg = _ctl_offset(idx, m, addr, &c->hw);
if (IS_ERR_OR_NULL(cfg)) {
kfree(c);
pr_err("failed to create dpu_hw_ctl %d\n", idx);
return ERR_PTR(-EINVAL);
}
c->caps = cfg;
_setup_ctl_ops(&c->ops, c->caps->features);
c->idx = idx;
c->mixer_count = m->mixer_count;
c->mixer_hw_caps = m->mixer;
rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
if (rc) {
DPU_ERROR("failed to init hw blk %d\n", rc);
goto blk_init_error;
}
return c;
blk_init_error:
kzfree(c);
return ERR_PTR(rc);
}
void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
{
if (ctx)
dpu_hw_blk_destroy(&ctx->base);
kfree(ctx);
}

View File

@ -0,0 +1,218 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _DPU_HW_CTL_H
#define _DPU_HW_CTL_H
#include "dpu_hw_mdss.h"
#include "dpu_hw_util.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_sspp.h"
#include "dpu_hw_blk.h"
/**
* dpu_ctl_mode_sel: Interface mode selection
* DPU_CTL_MODE_SEL_VID: Video mode interface
* DPU_CTL_MODE_SEL_CMD: Command mode interface
*/
enum dpu_ctl_mode_sel {
DPU_CTL_MODE_SEL_VID = 0,
DPU_CTL_MODE_SEL_CMD
};
struct dpu_hw_ctl;
/**
* struct dpu_hw_stage_cfg - blending stage cfg
* @stage : SSPP_ID at each stage
* @multirect_index: index of the rectangle of SSPP.
*/
struct dpu_hw_stage_cfg {
enum dpu_sspp stage[DPU_STAGE_MAX][PIPES_PER_STAGE];
enum dpu_sspp_multirect_index multirect_index
[DPU_STAGE_MAX][PIPES_PER_STAGE];
};
/**
* struct dpu_hw_intf_cfg :Describes how the DPU writes data to output interface
* @intf : Interface id
* @mode_3d: 3d mux configuration
* @intf_mode_sel: Interface mode, cmd / vid
* @stream_sel: Stream selection for multi-stream interfaces
*/
struct dpu_hw_intf_cfg {
enum dpu_intf intf;
enum dpu_3d_blend_mode mode_3d;
enum dpu_ctl_mode_sel intf_mode_sel;
int stream_sel;
};
/**
* struct dpu_hw_ctl_ops - Interface to the wb Hw driver functions
* Assumption is these functions will be called after clocks are enabled
*/
struct dpu_hw_ctl_ops {
/**
* kickoff hw operation for Sw controlled interfaces
* DSI cmd mode and WB interface are SW controlled
* @ctx : ctl path ctx pointer
*/
void (*trigger_start)(struct dpu_hw_ctl *ctx);
/**
* kickoff prepare is in progress hw operation for sw
* controlled interfaces: DSI cmd mode and WB interface
* are SW controlled
* @ctx : ctl path ctx pointer
*/
void (*trigger_pending)(struct dpu_hw_ctl *ctx);
/**
* Clear the value of the cached pending_flush_mask
* No effect on hardware
* @ctx : ctl path ctx pointer
*/
void (*clear_pending_flush)(struct dpu_hw_ctl *ctx);
/**
* Query the value of the cached pending_flush_mask
* No effect on hardware
* @ctx : ctl path ctx pointer
*/
u32 (*get_pending_flush)(struct dpu_hw_ctl *ctx);
/**
* OR in the given flushbits to the cached pending_flush_mask
* No effect on hardware
* @ctx : ctl path ctx pointer
* @flushbits : module flushmask
*/
void (*update_pending_flush)(struct dpu_hw_ctl *ctx,
u32 flushbits);
/**
* Write the value of the pending_flush_mask to hardware
* @ctx : ctl path ctx pointer
*/
void (*trigger_flush)(struct dpu_hw_ctl *ctx);
/**
* Read the value of the flush register
* @ctx : ctl path ctx pointer
* @Return: value of the ctl flush register.
*/
u32 (*get_flush_register)(struct dpu_hw_ctl *ctx);
/**
* Setup ctl_path interface config
* @ctx
* @cfg : interface config structure pointer
*/
void (*setup_intf_cfg)(struct dpu_hw_ctl *ctx,
struct dpu_hw_intf_cfg *cfg);
int (*reset)(struct dpu_hw_ctl *c);
/*
* wait_reset_status - checks ctl reset status
* @ctx : ctl path ctx pointer
*
* This function checks the ctl reset status bit.
* If the reset bit is set, it keeps polling the status till the hw
* reset is complete.
* Returns: 0 on success or -error if reset incomplete within interval
*/
int (*wait_reset_status)(struct dpu_hw_ctl *ctx);
uint32_t (*get_bitmask_sspp)(struct dpu_hw_ctl *ctx,
enum dpu_sspp blk);
uint32_t (*get_bitmask_mixer)(struct dpu_hw_ctl *ctx,
enum dpu_lm blk);
int (*get_bitmask_intf)(struct dpu_hw_ctl *ctx,
u32 *flushbits,
enum dpu_intf blk);
int (*get_bitmask_cdm)(struct dpu_hw_ctl *ctx,
u32 *flushbits,
enum dpu_cdm blk);
/**
* Set all blend stages to disabled
* @ctx : ctl path ctx pointer
*/
void (*clear_all_blendstages)(struct dpu_hw_ctl *ctx);
/**
* Configure layer mixer to pipe configuration
* @ctx : ctl path ctx pointer
* @lm : layer mixer enumeration
* @cfg : blend stage configuration
*/
void (*setup_blendstage)(struct dpu_hw_ctl *ctx,
enum dpu_lm lm, struct dpu_hw_stage_cfg *cfg);
};
/**
* struct dpu_hw_ctl : CTL PATH driver object
* @base: hardware block base structure
* @hw: block register map object
* @idx: control path index
* @caps: control path capabilities
* @mixer_count: number of mixers
* @mixer_hw_caps: mixer hardware capabilities
* @pending_flush_mask: storage for pending ctl_flush managed via ops
* @ops: operation list
*/
struct dpu_hw_ctl {
struct dpu_hw_blk base;
struct dpu_hw_blk_reg_map hw;
/* ctl path */
int idx;
const struct dpu_ctl_cfg *caps;
int mixer_count;
const struct dpu_lm_cfg *mixer_hw_caps;
u32 pending_flush_mask;
/* ops */
struct dpu_hw_ctl_ops ops;
};
/**
* dpu_hw_ctl - convert base object dpu_hw_base to container
* @hw: Pointer to base hardware block
* return: Pointer to hardware block container
*/
static inline struct dpu_hw_ctl *to_dpu_hw_ctl(struct dpu_hw_blk *hw)
{
return container_of(hw, struct dpu_hw_ctl, base);
}
/**
* dpu_hw_ctl_init(): Initializes the ctl_path hw driver object.
* should be called before accessing every ctl path registers.
* @idx: ctl_path index for which driver object is required
* @addr: mapped register io address of MDP
* @m : pointer to mdss catalog data
*/
struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
void __iomem *addr,
struct dpu_mdss_cfg *m);
/**
* dpu_hw_ctl_destroy(): Destroys ctl driver context
* should be called to free the context
*/
void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx);
#endif /*_DPU_HW_CTL_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,257 @@
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _DPU_HW_INTERRUPTS_H
#define _DPU_HW_INTERRUPTS_H
#include <linux/types.h>
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_util.h"
#include "dpu_hw_mdss.h"
#define IRQ_SOURCE_MDP BIT(0)
#define IRQ_SOURCE_DSI0 BIT(4)
#define IRQ_SOURCE_DSI1 BIT(5)
#define IRQ_SOURCE_HDMI BIT(8)
#define IRQ_SOURCE_EDP BIT(12)
#define IRQ_SOURCE_MHL BIT(16)
/**
* dpu_intr_type - HW Interrupt Type
* @DPU_IRQ_TYPE_WB_ROT_COMP: WB rotator done
* @DPU_IRQ_TYPE_WB_WFD_COMP: WB WFD done
* @DPU_IRQ_TYPE_PING_PONG_COMP: PingPong done
* @DPU_IRQ_TYPE_PING_PONG_RD_PTR: PingPong read pointer
* @DPU_IRQ_TYPE_PING_PONG_WR_PTR: PingPong write pointer
* @DPU_IRQ_TYPE_PING_PONG_AUTO_REF: PingPong auto refresh
* @DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK: PingPong Tear check
* @DPU_IRQ_TYPE_PING_PONG_TE_CHECK: PingPong TE detection
* @DPU_IRQ_TYPE_INTF_UNDER_RUN: INTF underrun
* @DPU_IRQ_TYPE_INTF_VSYNC: INTF VSYNC
* @DPU_IRQ_TYPE_CWB_OVERFLOW: Concurrent WB overflow
* @DPU_IRQ_TYPE_HIST_VIG_DONE: VIG Histogram done
* @DPU_IRQ_TYPE_HIST_VIG_RSTSEQ: VIG Histogram reset
* @DPU_IRQ_TYPE_HIST_DSPP_DONE: DSPP Histogram done
* @DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ: DSPP Histogram reset
* @DPU_IRQ_TYPE_WD_TIMER: Watchdog timer
* @DPU_IRQ_TYPE_SFI_VIDEO_IN: Video static frame INTR into static
* @DPU_IRQ_TYPE_SFI_VIDEO_OUT: Video static frame INTR out-of static
* @DPU_IRQ_TYPE_SFI_CMD_0_IN: DSI CMD0 static frame INTR into static
* @DPU_IRQ_TYPE_SFI_CMD_0_OUT: DSI CMD0 static frame INTR out-of static
* @DPU_IRQ_TYPE_SFI_CMD_1_IN: DSI CMD1 static frame INTR into static
* @DPU_IRQ_TYPE_SFI_CMD_1_OUT: DSI CMD1 static frame INTR out-of static
* @DPU_IRQ_TYPE_SFI_CMD_2_IN: DSI CMD2 static frame INTR into static
* @DPU_IRQ_TYPE_SFI_CMD_2_OUT: DSI CMD2 static frame INTR out-of static
* @DPU_IRQ_TYPE_PROG_LINE: Programmable Line interrupt
* @DPU_IRQ_TYPE_AD4_BL_DONE: AD4 backlight
* @DPU_IRQ_TYPE_CTL_START: Control start
* @DPU_IRQ_TYPE_RESERVED: Reserved for expansion
*/
enum dpu_intr_type {
DPU_IRQ_TYPE_WB_ROT_COMP,
DPU_IRQ_TYPE_WB_WFD_COMP,
DPU_IRQ_TYPE_PING_PONG_COMP,
DPU_IRQ_TYPE_PING_PONG_RD_PTR,
DPU_IRQ_TYPE_PING_PONG_WR_PTR,
DPU_IRQ_TYPE_PING_PONG_AUTO_REF,
DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK,
DPU_IRQ_TYPE_PING_PONG_TE_CHECK,
DPU_IRQ_TYPE_INTF_UNDER_RUN,
DPU_IRQ_TYPE_INTF_VSYNC,
DPU_IRQ_TYPE_CWB_OVERFLOW,
DPU_IRQ_TYPE_HIST_VIG_DONE,
DPU_IRQ_TYPE_HIST_VIG_RSTSEQ,
DPU_IRQ_TYPE_HIST_DSPP_DONE,
DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ,
DPU_IRQ_TYPE_WD_TIMER,
DPU_IRQ_TYPE_SFI_VIDEO_IN,
DPU_IRQ_TYPE_SFI_VIDEO_OUT,
DPU_IRQ_TYPE_SFI_CMD_0_IN,
DPU_IRQ_TYPE_SFI_CMD_0_OUT,
DPU_IRQ_TYPE_SFI_CMD_1_IN,
DPU_IRQ_TYPE_SFI_CMD_1_OUT,
DPU_IRQ_TYPE_SFI_CMD_2_IN,
DPU_IRQ_TYPE_SFI_CMD_2_OUT,
DPU_IRQ_TYPE_PROG_LINE,
DPU_IRQ_TYPE_AD4_BL_DONE,
DPU_IRQ_TYPE_CTL_START,
DPU_IRQ_TYPE_RESERVED,
};
struct dpu_hw_intr;
/**
* Interrupt operations.
*/
struct dpu_hw_intr_ops {
/**
* set_mask - Programs the given interrupt register with the
* given interrupt mask. Register value will get overwritten.
* @intr: HW interrupt handle
* @reg_off: MDSS HW register offset
* @irqmask: IRQ mask value
*/
void (*set_mask)(
struct dpu_hw_intr *intr,
uint32_t reg,
uint32_t irqmask);
/**
* irq_idx_lookup - Lookup IRQ index on the HW interrupt type
* Used for all irq related ops
* @intr_type: Interrupt type defined in dpu_intr_type
* @instance_idx: HW interrupt block instance
* @return: irq_idx or -EINVAL for lookup fail
*/
int (*irq_idx_lookup)(
enum dpu_intr_type intr_type,
u32 instance_idx);
/**
* enable_irq - Enable IRQ based on lookup IRQ index
* @intr: HW interrupt handle
* @irq_idx: Lookup irq index return from irq_idx_lookup
* @return: 0 for success, otherwise failure
*/
int (*enable_irq)(
struct dpu_hw_intr *intr,
int irq_idx);
/**
* disable_irq - Disable IRQ based on lookup IRQ index
* @intr: HW interrupt handle
* @irq_idx: Lookup irq index return from irq_idx_lookup
* @return: 0 for success, otherwise failure
*/
int (*disable_irq)(
struct dpu_hw_intr *intr,
int irq_idx);
/**
* clear_all_irqs - Clears all the interrupts (i.e. acknowledges
* any asserted IRQs). Useful during reset.
* @intr: HW interrupt handle
* @return: 0 for success, otherwise failure
*/
int (*clear_all_irqs)(
struct dpu_hw_intr *intr);
/**
* disable_all_irqs - Disables all the interrupts. Useful during reset.
* @intr: HW interrupt handle
* @return: 0 for success, otherwise failure
*/
int (*disable_all_irqs)(
struct dpu_hw_intr *intr);
/**
* dispatch_irqs - IRQ dispatcher will call the given callback
* function when a matching interrupt status bit is
* found in the irq mapping table.
* @intr: HW interrupt handle
* @cbfunc: Callback function pointer
* @arg: Argument to pass back during callback
*/
void (*dispatch_irqs)(
struct dpu_hw_intr *intr,
void (*cbfunc)(void *arg, int irq_idx),
void *arg);
/**
* get_interrupt_statuses - Gets and store value from all interrupt
* status registers that are currently fired.
* @intr: HW interrupt handle
*/
void (*get_interrupt_statuses)(
struct dpu_hw_intr *intr);
/**
* clear_interrupt_status - Clears HW interrupt status based on given
* lookup IRQ index.
* @intr: HW interrupt handle
* @irq_idx: Lookup irq index return from irq_idx_lookup
*/
void (*clear_interrupt_status)(
struct dpu_hw_intr *intr,
int irq_idx);
/**
* clear_intr_status_nolock() - clears the HW interrupts without lock
* @intr: HW interrupt handle
* @irq_idx: Lookup irq index return from irq_idx_lookup
*/
void (*clear_intr_status_nolock)(
struct dpu_hw_intr *intr,
int irq_idx);
/**
* get_interrupt_status - Gets HW interrupt status, and clear if set,
* based on given lookup IRQ index.
* @intr: HW interrupt handle
* @irq_idx: Lookup irq index return from irq_idx_lookup
* @clear: True to clear irq after read
*/
u32 (*get_interrupt_status)(
struct dpu_hw_intr *intr,
int irq_idx,
bool clear);
/**
* get_valid_interrupts - Gets a mask of all valid interrupt sources
* within DPU. These are actually status bits
* within interrupt registers that specify the
* source of the interrupt in IRQs. For example,
* valid interrupt sources can be MDP, DSI,
* HDMI etc.
* @intr: HW interrupt handle
* @mask: Returning the interrupt source MASK
* @return: 0 for success, otherwise failure
*/
int (*get_valid_interrupts)(
struct dpu_hw_intr *intr,
uint32_t *mask);
};
/**
* struct dpu_hw_intr: hw interrupts handling data structure
* @hw: virtual address mapping
* @ops: function pointer mapping for IRQ handling
* @cache_irq_mask: array of IRQ enable masks reg storage created during init
* @save_irq_status: array of IRQ status reg storage created during init
* @irq_idx_tbl_size: total number of irq_idx mapped in the hw_interrupts
* @irq_lock: spinlock for accessing IRQ resources
*/
struct dpu_hw_intr {
struct dpu_hw_blk_reg_map hw;
struct dpu_hw_intr_ops ops;
u32 *cache_irq_mask;
u32 *save_irq_status;
u32 irq_idx_tbl_size;
spinlock_t irq_lock;
};
/**
* dpu_hw_intr_init(): Initializes the interrupts hw object
* @addr: mapped register io address of MDP
* @m : pointer to mdss catalog data
*/
struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
struct dpu_mdss_cfg *m);
/**
* dpu_hw_intr_destroy(): Cleanup interrutps hw object
* @intr: pointer to interrupts hw object
*/
void dpu_hw_intr_destroy(struct dpu_hw_intr *intr);
#endif

View File

@ -0,0 +1,349 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_intf.h"
#include "dpu_dbg.h"
#include "dpu_kms.h"
#define INTF_TIMING_ENGINE_EN 0x000
#define INTF_CONFIG 0x004
#define INTF_HSYNC_CTL 0x008
#define INTF_VSYNC_PERIOD_F0 0x00C
#define INTF_VSYNC_PERIOD_F1 0x010
#define INTF_VSYNC_PULSE_WIDTH_F0 0x014
#define INTF_VSYNC_PULSE_WIDTH_F1 0x018
#define INTF_DISPLAY_V_START_F0 0x01C
#define INTF_DISPLAY_V_START_F1 0x020
#define INTF_DISPLAY_V_END_F0 0x024
#define INTF_DISPLAY_V_END_F1 0x028
#define INTF_ACTIVE_V_START_F0 0x02C
#define INTF_ACTIVE_V_START_F1 0x030
#define INTF_ACTIVE_V_END_F0 0x034
#define INTF_ACTIVE_V_END_F1 0x038
#define INTF_DISPLAY_HCTL 0x03C
#define INTF_ACTIVE_HCTL 0x040
#define INTF_BORDER_COLOR 0x044
#define INTF_UNDERFLOW_COLOR 0x048
#define INTF_HSYNC_SKEW 0x04C
#define INTF_POLARITY_CTL 0x050
#define INTF_TEST_CTL 0x054
#define INTF_TP_COLOR0 0x058
#define INTF_TP_COLOR1 0x05C
#define INTF_FRAME_LINE_COUNT_EN 0x0A8
#define INTF_FRAME_COUNT 0x0AC
#define INTF_LINE_COUNT 0x0B0
#define INTF_DEFLICKER_CONFIG 0x0F0
#define INTF_DEFLICKER_STRNG_COEFF 0x0F4
#define INTF_DEFLICKER_WEAK_COEFF 0x0F8
#define INTF_DSI_CMD_MODE_TRIGGER_EN 0x084
#define INTF_PANEL_FORMAT 0x090
#define INTF_TPG_ENABLE 0x100
#define INTF_TPG_MAIN_CONTROL 0x104
#define INTF_TPG_VIDEO_CONFIG 0x108
#define INTF_TPG_COMPONENT_LIMITS 0x10C
#define INTF_TPG_RECTANGLE 0x110
#define INTF_TPG_INITIAL_VALUE 0x114
#define INTF_TPG_BLK_WHITE_PATTERN_FRAMES 0x118
#define INTF_TPG_RGB_MAPPING 0x11C
#define INTF_PROG_FETCH_START 0x170
#define INTF_PROG_ROT_START 0x174
#define INTF_FRAME_LINE_COUNT_EN 0x0A8
#define INTF_FRAME_COUNT 0x0AC
#define INTF_LINE_COUNT 0x0B0
#define INTF_MISR_CTRL 0x180
#define INTF_MISR_SIGNATURE 0x184
static struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf,
struct dpu_mdss_cfg *m,
void __iomem *addr,
struct dpu_hw_blk_reg_map *b)
{
int i;
for (i = 0; i < m->intf_count; i++) {
if ((intf == m->intf[i].id) &&
(m->intf[i].type != INTF_NONE)) {
b->base_off = addr;
b->blk_off = m->intf[i].base;
b->length = m->intf[i].len;
b->hwversion = m->hwversion;
b->log_mask = DPU_DBG_MASK_INTF;
return &m->intf[i];
}
}
return ERR_PTR(-EINVAL);
}
static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
const struct intf_timing_params *p,
const struct dpu_format *fmt)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 hsync_period, vsync_period;
u32 display_v_start, display_v_end;
u32 hsync_start_x, hsync_end_x;
u32 active_h_start, active_h_end;
u32 active_v_start, active_v_end;
u32 active_hctl, display_hctl, hsync_ctl;
u32 polarity_ctl, den_polarity, hsync_polarity, vsync_polarity;
u32 panel_format;
u32 intf_cfg;
/* read interface_cfg */
intf_cfg = DPU_REG_READ(c, INTF_CONFIG);
hsync_period = p->hsync_pulse_width + p->h_back_porch + p->width +
p->h_front_porch;
vsync_period = p->vsync_pulse_width + p->v_back_porch + p->height +
p->v_front_porch;
display_v_start = ((p->vsync_pulse_width + p->v_back_porch) *
hsync_period) + p->hsync_skew;
display_v_end = ((vsync_period - p->v_front_porch) * hsync_period) +
p->hsync_skew - 1;
if (ctx->cap->type == INTF_EDP || ctx->cap->type == INTF_DP) {
display_v_start += p->hsync_pulse_width + p->h_back_porch;
display_v_end -= p->h_front_porch;
}
hsync_start_x = p->h_back_porch + p->hsync_pulse_width;
hsync_end_x = hsync_period - p->h_front_porch - 1;
if (p->width != p->xres) {
active_h_start = hsync_start_x;
active_h_end = active_h_start + p->xres - 1;
} else {
active_h_start = 0;
active_h_end = 0;
}
if (p->height != p->yres) {
active_v_start = display_v_start;
active_v_end = active_v_start + (p->yres * hsync_period) - 1;
} else {
active_v_start = 0;
active_v_end = 0;
}
if (active_h_end) {
active_hctl = (active_h_end << 16) | active_h_start;
intf_cfg |= BIT(29); /* ACTIVE_H_ENABLE */
} else {
active_hctl = 0;
}
if (active_v_end)
intf_cfg |= BIT(30); /* ACTIVE_V_ENABLE */
hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
display_hctl = (hsync_end_x << 16) | hsync_start_x;
den_polarity = 0;
if (ctx->cap->type == INTF_HDMI) {
hsync_polarity = p->yres >= 720 ? 0 : 1;
vsync_polarity = p->yres >= 720 ? 0 : 1;
} else {
hsync_polarity = 0;
vsync_polarity = 0;
}
polarity_ctl = (den_polarity << 2) | /* DEN Polarity */
(vsync_polarity << 1) | /* VSYNC Polarity */
(hsync_polarity << 0); /* HSYNC Polarity */
if (!DPU_FORMAT_IS_YUV(fmt))
panel_format = (fmt->bits[C0_G_Y] |
(fmt->bits[C1_B_Cb] << 2) |
(fmt->bits[C2_R_Cr] << 4) |
(0x21 << 8));
else
/* Interface treats all the pixel data in RGB888 format */
panel_format = (COLOR_8BIT |
(COLOR_8BIT << 2) |
(COLOR_8BIT << 4) |
(0x21 << 8));
DPU_REG_WRITE(c, INTF_HSYNC_CTL, hsync_ctl);
DPU_REG_WRITE(c, INTF_VSYNC_PERIOD_F0, vsync_period * hsync_period);
DPU_REG_WRITE(c, INTF_VSYNC_PULSE_WIDTH_F0,
p->vsync_pulse_width * hsync_period);
DPU_REG_WRITE(c, INTF_DISPLAY_HCTL, display_hctl);
DPU_REG_WRITE(c, INTF_DISPLAY_V_START_F0, display_v_start);
DPU_REG_WRITE(c, INTF_DISPLAY_V_END_F0, display_v_end);
DPU_REG_WRITE(c, INTF_ACTIVE_HCTL, active_hctl);
DPU_REG_WRITE(c, INTF_ACTIVE_V_START_F0, active_v_start);
DPU_REG_WRITE(c, INTF_ACTIVE_V_END_F0, active_v_end);
DPU_REG_WRITE(c, INTF_BORDER_COLOR, p->border_clr);
DPU_REG_WRITE(c, INTF_UNDERFLOW_COLOR, p->underflow_clr);
DPU_REG_WRITE(c, INTF_HSYNC_SKEW, p->hsync_skew);
DPU_REG_WRITE(c, INTF_POLARITY_CTL, polarity_ctl);
DPU_REG_WRITE(c, INTF_FRAME_LINE_COUNT_EN, 0x3);
DPU_REG_WRITE(c, INTF_CONFIG, intf_cfg);
DPU_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format);
}
static void dpu_hw_intf_enable_timing_engine(
struct dpu_hw_intf *intf,
u8 enable)
{
struct dpu_hw_blk_reg_map *c = &intf->hw;
/* Note: Display interface select is handled in top block hw layer */
DPU_REG_WRITE(c, INTF_TIMING_ENGINE_EN, enable != 0);
}
static void dpu_hw_intf_setup_prg_fetch(
struct dpu_hw_intf *intf,
const struct intf_prog_fetch *fetch)
{
struct dpu_hw_blk_reg_map *c = &intf->hw;
int fetch_enable;
/*
* Fetch should always be outside the active lines. If the fetching
* is programmed within active region, hardware behavior is unknown.
*/
fetch_enable = DPU_REG_READ(c, INTF_CONFIG);
if (fetch->enable) {
fetch_enable |= BIT(31);
DPU_REG_WRITE(c, INTF_PROG_FETCH_START,
fetch->fetch_start);
} else {
fetch_enable &= ~BIT(31);
}
DPU_REG_WRITE(c, INTF_CONFIG, fetch_enable);
}
static void dpu_hw_intf_get_status(
struct dpu_hw_intf *intf,
struct intf_status *s)
{
struct dpu_hw_blk_reg_map *c = &intf->hw;
s->is_en = DPU_REG_READ(c, INTF_TIMING_ENGINE_EN);
if (s->is_en) {
s->frame_count = DPU_REG_READ(c, INTF_FRAME_COUNT);
s->line_count = DPU_REG_READ(c, INTF_LINE_COUNT);
} else {
s->line_count = 0;
s->frame_count = 0;
}
}
static void dpu_hw_intf_setup_misr(struct dpu_hw_intf *intf,
bool enable, u32 frame_count)
{
struct dpu_hw_blk_reg_map *c = &intf->hw;
u32 config = 0;
DPU_REG_WRITE(c, INTF_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
/* clear misr data */
wmb();
if (enable)
config = (frame_count & MISR_FRAME_COUNT_MASK) |
MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
DPU_REG_WRITE(c, INTF_MISR_CTRL, config);
}
static u32 dpu_hw_intf_collect_misr(struct dpu_hw_intf *intf)
{
struct dpu_hw_blk_reg_map *c = &intf->hw;
return DPU_REG_READ(c, INTF_MISR_SIGNATURE);
}
static u32 dpu_hw_intf_get_line_count(struct dpu_hw_intf *intf)
{
struct dpu_hw_blk_reg_map *c;
if (!intf)
return 0;
c = &intf->hw;
return DPU_REG_READ(c, INTF_LINE_COUNT);
}
static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
unsigned long cap)
{
ops->setup_timing_gen = dpu_hw_intf_setup_timing_engine;
ops->setup_prg_fetch = dpu_hw_intf_setup_prg_fetch;
ops->get_status = dpu_hw_intf_get_status;
ops->enable_timing = dpu_hw_intf_enable_timing_engine;
ops->setup_misr = dpu_hw_intf_setup_misr;
ops->collect_misr = dpu_hw_intf_collect_misr;
ops->get_line_count = dpu_hw_intf_get_line_count;
}
static struct dpu_hw_blk_ops dpu_hw_ops = {
.start = NULL,
.stop = NULL,
};
struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
void __iomem *addr,
struct dpu_mdss_cfg *m)
{
struct dpu_hw_intf *c;
struct dpu_intf_cfg *cfg;
int rc;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
cfg = _intf_offset(idx, m, addr, &c->hw);
if (IS_ERR_OR_NULL(cfg)) {
kfree(c);
pr_err("failed to create dpu_hw_intf %d\n", idx);
return ERR_PTR(-EINVAL);
}
/*
* Assign ops
*/
c->idx = idx;
c->cap = cfg;
c->mdss = m;
_setup_intf_ops(&c->ops, c->cap->features);
rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_INTF, idx, &dpu_hw_ops);
if (rc) {
DPU_ERROR("failed to init hw blk %d\n", rc);
goto blk_init_error;
}
return c;
blk_init_error:
kzfree(c);
return ERR_PTR(rc);
}
void dpu_hw_intf_destroy(struct dpu_hw_intf *intf)
{
if (intf)
dpu_hw_blk_destroy(&intf->base);
kfree(intf);
}

View File

@ -0,0 +1,128 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _DPU_HW_INTF_H
#define _DPU_HW_INTF_H
#include "dpu_hw_catalog.h"
#include "dpu_hw_mdss.h"
#include "dpu_hw_util.h"
#include "dpu_hw_blk.h"
struct dpu_hw_intf;
/* intf timing settings */
struct intf_timing_params {
u32 width; /* active width */
u32 height; /* active height */
u32 xres; /* Display panel width */
u32 yres; /* Display panel height */
u32 h_back_porch;
u32 h_front_porch;
u32 v_back_porch;
u32 v_front_porch;
u32 hsync_pulse_width;
u32 vsync_pulse_width;
u32 hsync_polarity;
u32 vsync_polarity;
u32 border_clr;
u32 underflow_clr;
u32 hsync_skew;
};
struct intf_prog_fetch {
u8 enable;
/* vsync counter for the front porch pixel line */
u32 fetch_start;
};
struct intf_status {
u8 is_en; /* interface timing engine is enabled or not */
u32 frame_count; /* frame count since timing engine enabled */
u32 line_count; /* current line count including blanking */
};
/**
* struct dpu_hw_intf_ops : Interface to the interface Hw driver functions
* Assumption is these functions will be called after clocks are enabled
* @ setup_timing_gen : programs the timing engine
* @ setup_prog_fetch : enables/disables the programmable fetch logic
* @ enable_timing: enable/disable timing engine
* @ get_status: returns if timing engine is enabled or not
* @ setup_misr: enables/disables MISR in HW register
* @ collect_misr: reads and stores MISR data from HW register
* @ get_line_count: reads current vertical line counter
*/
struct dpu_hw_intf_ops {
void (*setup_timing_gen)(struct dpu_hw_intf *intf,
const struct intf_timing_params *p,
const struct dpu_format *fmt);
void (*setup_prg_fetch)(struct dpu_hw_intf *intf,
const struct intf_prog_fetch *fetch);
void (*enable_timing)(struct dpu_hw_intf *intf,
u8 enable);
void (*get_status)(struct dpu_hw_intf *intf,
struct intf_status *status);
void (*setup_misr)(struct dpu_hw_intf *intf,
bool enable, u32 frame_count);
u32 (*collect_misr)(struct dpu_hw_intf *intf);
u32 (*get_line_count)(struct dpu_hw_intf *intf);
};
struct dpu_hw_intf {
struct dpu_hw_blk base;
struct dpu_hw_blk_reg_map hw;
/* intf */
enum dpu_intf idx;
const struct dpu_intf_cfg *cap;
const struct dpu_mdss_cfg *mdss;
/* ops */
struct dpu_hw_intf_ops ops;
};
/**
* to_dpu_hw_intf - convert base object dpu_hw_base to container
* @hw: Pointer to base hardware block
* return: Pointer to hardware block container
*/
static inline struct dpu_hw_intf *to_dpu_hw_intf(struct dpu_hw_blk *hw)
{
return container_of(hw, struct dpu_hw_intf, base);
}
/**
* dpu_hw_intf_init(): Initializes the intf driver for the passed
* interface idx.
* @idx: interface index for which driver object is required
* @addr: mapped register io address of MDP
* @m : pointer to mdss catalog data
*/
struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
void __iomem *addr,
struct dpu_mdss_cfg *m);
/**
* dpu_hw_intf_destroy(): Destroys INTF driver context
* @intf: Pointer to INTF driver context
*/
void dpu_hw_intf_destroy(struct dpu_hw_intf *intf);
#endif /*_DPU_HW_INTF_H */

View File

@ -0,0 +1,261 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "dpu_kms.h"
#include "dpu_hw_catalog.h"
#include "dpu_hwio.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_mdss.h"
#include "dpu_dbg.h"
#include "dpu_kms.h"
#define LM_OP_MODE 0x00
#define LM_OUT_SIZE 0x04
#define LM_BORDER_COLOR_0 0x08
#define LM_BORDER_COLOR_1 0x010
/* These register are offset to mixer base + stage base */
#define LM_BLEND0_OP 0x00
#define LM_BLEND0_CONST_ALPHA 0x04
#define LM_FG_COLOR_FILL_COLOR_0 0x08
#define LM_FG_COLOR_FILL_COLOR_1 0x0C
#define LM_FG_COLOR_FILL_SIZE 0x10
#define LM_FG_COLOR_FILL_XY 0x14
#define LM_BLEND0_FG_ALPHA 0x04
#define LM_BLEND0_BG_ALPHA 0x08
#define LM_MISR_CTRL 0x310
#define LM_MISR_SIGNATURE 0x314
static struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
struct dpu_mdss_cfg *m,
void __iomem *addr,
struct dpu_hw_blk_reg_map *b)
{
int i;
for (i = 0; i < m->mixer_count; i++) {
if (mixer == m->mixer[i].id) {
b->base_off = addr;
b->blk_off = m->mixer[i].base;
b->length = m->mixer[i].len;
b->hwversion = m->hwversion;
b->log_mask = DPU_DBG_MASK_LM;
return &m->mixer[i];
}
}
return ERR_PTR(-ENOMEM);
}
/**
* _stage_offset(): returns the relative offset of the blend registers
* for the stage to be setup
* @c: mixer ctx contains the mixer to be programmed
* @stage: stage index to setup
*/
static inline int _stage_offset(struct dpu_hw_mixer *ctx, enum dpu_stage stage)
{
const struct dpu_lm_sub_blks *sblk = ctx->cap->sblk;
int rc;
if (stage == DPU_STAGE_BASE)
rc = -EINVAL;
else if (stage <= sblk->maxblendstages)
rc = sblk->blendstage_base[stage - DPU_STAGE_0];
else
rc = -EINVAL;
return rc;
}
static void dpu_hw_lm_setup_out(struct dpu_hw_mixer *ctx,
struct dpu_hw_mixer_cfg *mixer)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 outsize;
u32 op_mode;
op_mode = DPU_REG_READ(c, LM_OP_MODE);
outsize = mixer->out_height << 16 | mixer->out_width;
DPU_REG_WRITE(c, LM_OUT_SIZE, outsize);
/* SPLIT_LEFT_RIGHT */
if (mixer->right_mixer)
op_mode |= BIT(31);
else
op_mode &= ~BIT(31);
DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
}
static void dpu_hw_lm_setup_border_color(struct dpu_hw_mixer *ctx,
struct dpu_mdss_color *color,
u8 border_en)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
if (border_en) {
DPU_REG_WRITE(c, LM_BORDER_COLOR_0,
(color->color_0 & 0xFFF) |
((color->color_1 & 0xFFF) << 0x10));
DPU_REG_WRITE(c, LM_BORDER_COLOR_1,
(color->color_2 & 0xFFF) |
((color->color_3 & 0xFFF) << 0x10));
}
}
static void dpu_hw_lm_setup_blend_config_sdm845(struct dpu_hw_mixer *ctx,
u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
int stage_off;
u32 const_alpha;
if (stage == DPU_STAGE_BASE)
return;
stage_off = _stage_offset(ctx, stage);
if (WARN_ON(stage_off < 0))
return;
const_alpha = (bg_alpha & 0xFF) | ((fg_alpha & 0xFF) << 16);
DPU_REG_WRITE(c, LM_BLEND0_CONST_ALPHA + stage_off, const_alpha);
DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
}
static void dpu_hw_lm_setup_blend_config(struct dpu_hw_mixer *ctx,
u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
int stage_off;
if (stage == DPU_STAGE_BASE)
return;
stage_off = _stage_offset(ctx, stage);
if (WARN_ON(stage_off < 0))
return;
DPU_REG_WRITE(c, LM_BLEND0_FG_ALPHA + stage_off, fg_alpha);
DPU_REG_WRITE(c, LM_BLEND0_BG_ALPHA + stage_off, bg_alpha);
DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
}
static void dpu_hw_lm_setup_color3(struct dpu_hw_mixer *ctx,
uint32_t mixer_op_mode)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
int op_mode;
/* read the existing op_mode configuration */
op_mode = DPU_REG_READ(c, LM_OP_MODE);
op_mode = (op_mode & (BIT(31) | BIT(30))) | mixer_op_mode;
DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
}
static void dpu_hw_lm_gc(struct dpu_hw_mixer *mixer,
void *cfg)
{
}
static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx,
bool enable, u32 frame_count)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 config = 0;
DPU_REG_WRITE(c, LM_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
/* clear misr data */
wmb();
if (enable)
config = (frame_count & MISR_FRAME_COUNT_MASK) |
MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
DPU_REG_WRITE(c, LM_MISR_CTRL, config);
}
static u32 dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
return DPU_REG_READ(c, LM_MISR_SIGNATURE);
}
static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
struct dpu_hw_lm_ops *ops,
unsigned long features)
{
ops->setup_mixer_out = dpu_hw_lm_setup_out;
if (IS_SDM845_TARGET(m->hwversion) || IS_SDM670_TARGET(m->hwversion))
ops->setup_blend_config = dpu_hw_lm_setup_blend_config_sdm845;
else
ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
ops->setup_alpha_out = dpu_hw_lm_setup_color3;
ops->setup_border_color = dpu_hw_lm_setup_border_color;
ops->setup_gc = dpu_hw_lm_gc;
ops->setup_misr = dpu_hw_lm_setup_misr;
ops->collect_misr = dpu_hw_lm_collect_misr;
};
static struct dpu_hw_blk_ops dpu_hw_ops = {
.start = NULL,
.stop = NULL,
};
struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
void __iomem *addr,
struct dpu_mdss_cfg *m)
{
struct dpu_hw_mixer *c;
struct dpu_lm_cfg *cfg;
int rc;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
cfg = _lm_offset(idx, m, addr, &c->hw);
if (IS_ERR_OR_NULL(cfg)) {
kfree(c);
return ERR_PTR(-EINVAL);
}
/* Assign ops */
c->idx = idx;
c->cap = cfg;
_setup_mixer_ops(m, &c->ops, c->cap->features);
rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_LM, idx, &dpu_hw_ops);
if (rc) {
DPU_ERROR("failed to init hw blk %d\n", rc);
goto blk_init_error;
}
return c;
blk_init_error:
kzfree(c);
return ERR_PTR(rc);
}
void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm)
{
if (lm)
dpu_hw_blk_destroy(&lm->base);
kfree(lm);
}

View File

@ -0,0 +1,122 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _DPU_HW_LM_H
#define _DPU_HW_LM_H
#include "dpu_hw_mdss.h"
#include "dpu_hw_util.h"
#include "dpu_hw_blk.h"
struct dpu_hw_mixer;
struct dpu_hw_mixer_cfg {
u32 out_width;
u32 out_height;
bool right_mixer;
int flags;
};
struct dpu_hw_color3_cfg {
u8 keep_fg[DPU_STAGE_MAX];
};
/**
*
* struct dpu_hw_lm_ops : Interface to the mixer Hw driver functions
* Assumption is these functions will be called after clocks are enabled
*/
struct dpu_hw_lm_ops {
/*
* Sets up mixer output width and height
* and border color if enabled
*/
void (*setup_mixer_out)(struct dpu_hw_mixer *ctx,
struct dpu_hw_mixer_cfg *cfg);
/*
* Alpha blending configuration
* for the specified stage
*/
void (*setup_blend_config)(struct dpu_hw_mixer *ctx, uint32_t stage,
uint32_t fg_alpha, uint32_t bg_alpha, uint32_t blend_op);
/*
* Alpha color component selection from either fg or bg
*/
void (*setup_alpha_out)(struct dpu_hw_mixer *ctx, uint32_t mixer_op);
/**
* setup_border_color : enable/disable border color
*/
void (*setup_border_color)(struct dpu_hw_mixer *ctx,
struct dpu_mdss_color *color,
u8 border_en);
/**
* setup_gc : enable/disable gamma correction feature
*/
void (*setup_gc)(struct dpu_hw_mixer *mixer,
void *cfg);
/* setup_misr: enables/disables MISR in HW register */
void (*setup_misr)(struct dpu_hw_mixer *ctx,
bool enable, u32 frame_count);
/* collect_misr: reads and stores MISR data from HW register */
u32 (*collect_misr)(struct dpu_hw_mixer *ctx);
};
struct dpu_hw_mixer {
struct dpu_hw_blk base;
struct dpu_hw_blk_reg_map hw;
/* lm */
enum dpu_lm idx;
const struct dpu_lm_cfg *cap;
const struct dpu_mdp_cfg *mdp;
const struct dpu_ctl_cfg *ctl;
/* ops */
struct dpu_hw_lm_ops ops;
/* store mixer info specific to display */
struct dpu_hw_mixer_cfg cfg;
};
/**
* to_dpu_hw_mixer - convert base object dpu_hw_base to container
* @hw: Pointer to base hardware block
* return: Pointer to hardware block container
*/
static inline struct dpu_hw_mixer *to_dpu_hw_mixer(struct dpu_hw_blk *hw)
{
return container_of(hw, struct dpu_hw_mixer, base);
}
/**
* dpu_hw_lm_init(): Initializes the mixer hw driver object.
* should be called once before accessing every mixer.
* @idx: mixer index for which driver object is required
* @addr: mapped register io address of MDP
* @m : pointer to mdss catalog data
*/
struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
void __iomem *addr,
struct dpu_mdss_cfg *m);
/**
* dpu_hw_lm_destroy(): Destroys layer mixer driver context
* @lm: Pointer to LM driver context
*/
void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm);
#endif /*_DPU_HW_LM_H */

View File

@ -0,0 +1,465 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _DPU_HW_MDSS_H
#define _DPU_HW_MDSS_H
#include <linux/kernel.h>
#include <linux/err.h>
#include "msm_drv.h"
#define DPU_DBG_NAME "dpu"
#define DPU_NONE 0
#ifndef DPU_CSC_MATRIX_COEFF_SIZE
#define DPU_CSC_MATRIX_COEFF_SIZE 9
#endif
#ifndef DPU_CSC_CLAMP_SIZE
#define DPU_CSC_CLAMP_SIZE 6
#endif
#ifndef DPU_CSC_BIAS_SIZE
#define DPU_CSC_BIAS_SIZE 3
#endif
#ifndef DPU_MAX_PLANES
#define DPU_MAX_PLANES 4
#endif
#define PIPES_PER_STAGE 2
#ifndef DPU_MAX_DE_CURVES
#define DPU_MAX_DE_CURVES 3
#endif
enum dpu_format_flags {
DPU_FORMAT_FLAG_YUV_BIT,
DPU_FORMAT_FLAG_DX_BIT,
DPU_FORMAT_FLAG_COMPRESSED_BIT,
DPU_FORMAT_FLAG_BIT_MAX,
};
#define DPU_FORMAT_FLAG_YUV BIT(DPU_FORMAT_FLAG_YUV_BIT)
#define DPU_FORMAT_FLAG_DX BIT(DPU_FORMAT_FLAG_DX_BIT)
#define DPU_FORMAT_FLAG_COMPRESSED BIT(DPU_FORMAT_FLAG_COMPRESSED_BIT)
#define DPU_FORMAT_IS_YUV(X) \
(test_bit(DPU_FORMAT_FLAG_YUV_BIT, (X)->flag))
#define DPU_FORMAT_IS_DX(X) \
(test_bit(DPU_FORMAT_FLAG_DX_BIT, (X)->flag))
#define DPU_FORMAT_IS_LINEAR(X) ((X)->fetch_mode == DPU_FETCH_LINEAR)
#define DPU_FORMAT_IS_TILE(X) \
(((X)->fetch_mode == DPU_FETCH_UBWC) && \
!test_bit(DPU_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
#define DPU_FORMAT_IS_UBWC(X) \
(((X)->fetch_mode == DPU_FETCH_UBWC) && \
test_bit(DPU_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
#define DPU_BLEND_FG_ALPHA_FG_CONST (0 << 0)
#define DPU_BLEND_FG_ALPHA_BG_CONST (1 << 0)
#define DPU_BLEND_FG_ALPHA_FG_PIXEL (2 << 0)
#define DPU_BLEND_FG_ALPHA_BG_PIXEL (3 << 0)
#define DPU_BLEND_FG_INV_ALPHA (1 << 2)
#define DPU_BLEND_FG_MOD_ALPHA (1 << 3)
#define DPU_BLEND_FG_INV_MOD_ALPHA (1 << 4)
#define DPU_BLEND_FG_TRANSP_EN (1 << 5)
#define DPU_BLEND_BG_ALPHA_FG_CONST (0 << 8)
#define DPU_BLEND_BG_ALPHA_BG_CONST (1 << 8)
#define DPU_BLEND_BG_ALPHA_FG_PIXEL (2 << 8)
#define DPU_BLEND_BG_ALPHA_BG_PIXEL (3 << 8)
#define DPU_BLEND_BG_INV_ALPHA (1 << 10)
#define DPU_BLEND_BG_MOD_ALPHA (1 << 11)
#define DPU_BLEND_BG_INV_MOD_ALPHA (1 << 12)
#define DPU_BLEND_BG_TRANSP_EN (1 << 13)
#define DPU_VSYNC0_SOURCE_GPIO 0
#define DPU_VSYNC1_SOURCE_GPIO 1
#define DPU_VSYNC2_SOURCE_GPIO 2
#define DPU_VSYNC_SOURCE_INTF_0 3
#define DPU_VSYNC_SOURCE_INTF_1 4
#define DPU_VSYNC_SOURCE_INTF_2 5
#define DPU_VSYNC_SOURCE_INTF_3 6
#define DPU_VSYNC_SOURCE_WD_TIMER_4 11
#define DPU_VSYNC_SOURCE_WD_TIMER_3 12
#define DPU_VSYNC_SOURCE_WD_TIMER_2 13
#define DPU_VSYNC_SOURCE_WD_TIMER_1 14
#define DPU_VSYNC_SOURCE_WD_TIMER_0 15
enum dpu_hw_blk_type {
DPU_HW_BLK_TOP = 0,
DPU_HW_BLK_SSPP,
DPU_HW_BLK_LM,
DPU_HW_BLK_CTL,
DPU_HW_BLK_CDM,
DPU_HW_BLK_PINGPONG,
DPU_HW_BLK_INTF,
DPU_HW_BLK_WB,
DPU_HW_BLK_MAX,
};
enum dpu_mdp {
MDP_TOP = 0x1,
MDP_MAX,
};
enum dpu_sspp {
SSPP_NONE,
SSPP_VIG0,
SSPP_VIG1,
SSPP_VIG2,
SSPP_VIG3,
SSPP_RGB0,
SSPP_RGB1,
SSPP_RGB2,
SSPP_RGB3,
SSPP_DMA0,
SSPP_DMA1,
SSPP_DMA2,
SSPP_DMA3,
SSPP_CURSOR0,
SSPP_CURSOR1,
SSPP_MAX
};
enum dpu_sspp_type {
SSPP_TYPE_VIG,
SSPP_TYPE_RGB,
SSPP_TYPE_DMA,
SSPP_TYPE_CURSOR,
SSPP_TYPE_MAX
};
enum dpu_lm {
LM_0 = 1,
LM_1,
LM_2,
LM_3,
LM_4,
LM_5,
LM_6,
LM_MAX
};
enum dpu_stage {
DPU_STAGE_BASE = 0,
DPU_STAGE_0,
DPU_STAGE_1,
DPU_STAGE_2,
DPU_STAGE_3,
DPU_STAGE_4,
DPU_STAGE_5,
DPU_STAGE_6,
DPU_STAGE_7,
DPU_STAGE_8,
DPU_STAGE_9,
DPU_STAGE_10,
DPU_STAGE_MAX
};
enum dpu_dspp {
DSPP_0 = 1,
DSPP_1,
DSPP_2,
DSPP_3,
DSPP_MAX
};
enum dpu_ds {
DS_TOP,
DS_0,
DS_1,
DS_MAX
};
enum dpu_ctl {
CTL_0 = 1,
CTL_1,
CTL_2,
CTL_3,
CTL_4,
CTL_MAX
};
enum dpu_cdm {
CDM_0 = 1,
CDM_1,
CDM_MAX
};
enum dpu_pingpong {
PINGPONG_0 = 1,
PINGPONG_1,
PINGPONG_2,
PINGPONG_3,
PINGPONG_4,
PINGPONG_S0,
PINGPONG_MAX
};
enum dpu_intf {
INTF_0 = 1,
INTF_1,
INTF_2,
INTF_3,
INTF_4,
INTF_5,
INTF_6,
INTF_MAX
};
enum dpu_intf_type {
INTF_NONE = 0x0,
INTF_DSI = 0x1,
INTF_HDMI = 0x3,
INTF_LCDC = 0x5,
INTF_EDP = 0x9,
INTF_DP = 0xa,
INTF_TYPE_MAX,
/* virtual interfaces */
INTF_WB = 0x100,
};
enum dpu_intf_mode {
INTF_MODE_NONE = 0,
INTF_MODE_CMD,
INTF_MODE_VIDEO,
INTF_MODE_WB_BLOCK,
INTF_MODE_WB_LINE,
INTF_MODE_MAX
};
enum dpu_wb {
WB_0 = 1,
WB_1,
WB_2,
WB_3,
WB_MAX
};
enum dpu_ad {
AD_0 = 0x1,
AD_1,
AD_MAX
};
enum dpu_cwb {
CWB_0 = 0x1,
CWB_1,
CWB_2,
CWB_3,
CWB_MAX
};
enum dpu_wd_timer {
WD_TIMER_0 = 0x1,
WD_TIMER_1,
WD_TIMER_2,
WD_TIMER_3,
WD_TIMER_4,
WD_TIMER_5,
WD_TIMER_MAX
};
enum dpu_vbif {
VBIF_0,
VBIF_1,
VBIF_MAX,
VBIF_RT = VBIF_0,
VBIF_NRT = VBIF_1
};
enum dpu_iommu_domain {
DPU_IOMMU_DOMAIN_UNSECURE,
DPU_IOMMU_DOMAIN_SECURE,
DPU_IOMMU_DOMAIN_MAX
};
/**
* DPU HW,Component order color map
*/
enum {
C0_G_Y = 0,
C1_B_Cb = 1,
C2_R_Cr = 2,
C3_ALPHA = 3
};
/**
* enum dpu_plane_type - defines how the color component pixel packing
* @DPU_PLANE_INTERLEAVED : Color components in single plane
* @DPU_PLANE_PLANAR : Color component in separate planes
* @DPU_PLANE_PSEUDO_PLANAR : Chroma components interleaved in separate plane
*/
enum dpu_plane_type {
DPU_PLANE_INTERLEAVED,
DPU_PLANE_PLANAR,
DPU_PLANE_PSEUDO_PLANAR,
};
/**
* enum dpu_chroma_samp_type - chroma sub-samplng type
* @DPU_CHROMA_RGB : No chroma subsampling
* @DPU_CHROMA_H2V1 : Chroma pixels are horizontally subsampled
* @DPU_CHROMA_H1V2 : Chroma pixels are vertically subsampled
* @DPU_CHROMA_420 : 420 subsampling
*/
enum dpu_chroma_samp_type {
DPU_CHROMA_RGB,
DPU_CHROMA_H2V1,
DPU_CHROMA_H1V2,
DPU_CHROMA_420
};
/**
* dpu_fetch_type - Defines How DPU HW fetches data
* @DPU_FETCH_LINEAR : fetch is line by line
* @DPU_FETCH_TILE : fetches data in Z order from a tile
* @DPU_FETCH_UBWC : fetch and decompress data
*/
enum dpu_fetch_type {
DPU_FETCH_LINEAR,
DPU_FETCH_TILE,
DPU_FETCH_UBWC
};
/**
* Value of enum chosen to fit the number of bits
* expected by the HW programming.
*/
enum {
COLOR_ALPHA_1BIT = 0,
COLOR_ALPHA_4BIT = 1,
COLOR_4BIT = 0,
COLOR_5BIT = 1, /* No 5-bit Alpha */
COLOR_6BIT = 2, /* 6-Bit Alpha also = 2 */
COLOR_8BIT = 3, /* 8-Bit Alpha also = 3 */
};
/**
* enum dpu_3d_blend_mode
* Desribes how the 3d data is blended
* @BLEND_3D_NONE : 3d blending not enabled
* @BLEND_3D_FRAME_INT : Frame interleaving
* @BLEND_3D_H_ROW_INT : Horizontal row interleaving
* @BLEND_3D_V_ROW_INT : vertical row interleaving
* @BLEND_3D_COL_INT : column interleaving
* @BLEND_3D_MAX :
*/
enum dpu_3d_blend_mode {
BLEND_3D_NONE = 0,
BLEND_3D_FRAME_INT,
BLEND_3D_H_ROW_INT,
BLEND_3D_V_ROW_INT,
BLEND_3D_COL_INT,
BLEND_3D_MAX
};
/** struct dpu_format - defines the format configuration which
* allows DPU HW to correctly fetch and decode the format
* @base: base msm_format struture containing fourcc code
* @fetch_planes: how the color components are packed in pixel format
* @element: element color ordering
* @bits: element bit widths
* @chroma_sample: chroma sub-samplng type
* @unpack_align_msb: unpack aligned, 0 to LSB, 1 to MSB
* @unpack_tight: 0 for loose, 1 for tight
* @unpack_count: 0 = 1 component, 1 = 2 component
* @bpp: bytes per pixel
* @alpha_enable: whether the format has an alpha channel
* @num_planes: number of planes (including meta data planes)
* @fetch_mode: linear, tiled, or ubwc hw fetch behavior
* @is_yuv: is format a yuv variant
* @flag: usage bit flags
* @tile_width: format tile width
* @tile_height: format tile height
*/
struct dpu_format {
struct msm_format base;
enum dpu_plane_type fetch_planes;
u8 element[DPU_MAX_PLANES];
u8 bits[DPU_MAX_PLANES];
enum dpu_chroma_samp_type chroma_sample;
u8 unpack_align_msb;
u8 unpack_tight;
u8 unpack_count;
u8 bpp;
u8 alpha_enable;
u8 num_planes;
enum dpu_fetch_type fetch_mode;
DECLARE_BITMAP(flag, DPU_FORMAT_FLAG_BIT_MAX);
u16 tile_width;
u16 tile_height;
};
#define to_dpu_format(x) container_of(x, struct dpu_format, base)
/**
* struct dpu_hw_fmt_layout - format information of the source pixel data
* @format: pixel format parameters
* @num_planes: number of planes (including meta data planes)
* @width: image width
* @height: image height
* @total_size: total size in bytes
* @plane_addr: address of each plane
* @plane_size: length of each plane
* @plane_pitch: pitch of each plane
*/
struct dpu_hw_fmt_layout {
const struct dpu_format *format;
uint32_t num_planes;
uint32_t width;
uint32_t height;
uint32_t total_size;
uint32_t plane_addr[DPU_MAX_PLANES];
uint32_t plane_size[DPU_MAX_PLANES];
uint32_t plane_pitch[DPU_MAX_PLANES];
};
struct dpu_csc_cfg {
/* matrix coefficients in S15.16 format */
uint32_t csc_mv[DPU_CSC_MATRIX_COEFF_SIZE];
uint32_t csc_pre_bv[DPU_CSC_BIAS_SIZE];
uint32_t csc_post_bv[DPU_CSC_BIAS_SIZE];
uint32_t csc_pre_lv[DPU_CSC_CLAMP_SIZE];
uint32_t csc_post_lv[DPU_CSC_CLAMP_SIZE];
};
/**
* struct dpu_mdss_color - mdss color description
* color 0 : green
* color 1 : blue
* color 2 : red
* color 3 : alpha
*/
struct dpu_mdss_color {
u32 color_0;
u32 color_1;
u32 color_2;
u32 color_3;
};
/*
* Define bit masks for h/w logging.
*/
#define DPU_DBG_MASK_NONE (1 << 0)
#define DPU_DBG_MASK_CDM (1 << 1)
#define DPU_DBG_MASK_INTF (1 << 2)
#define DPU_DBG_MASK_LM (1 << 3)
#define DPU_DBG_MASK_CTL (1 << 4)
#define DPU_DBG_MASK_PINGPONG (1 << 5)
#define DPU_DBG_MASK_SSPP (1 << 6)
#define DPU_DBG_MASK_WB (1 << 7)
#define DPU_DBG_MASK_TOP (1 << 8)
#define DPU_DBG_MASK_VBIF (1 << 9)
#define DPU_DBG_MASK_ROT (1 << 10)
#endif /* _DPU_HW_MDSS_H */

View File

@ -0,0 +1,250 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/iopoll.h>
#include "dpu_hw_mdss.h"
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_pingpong.h"
#include "dpu_dbg.h"
#include "dpu_kms.h"
#include "dpu_trace.h"
#define PP_TEAR_CHECK_EN 0x000
#define PP_SYNC_CONFIG_VSYNC 0x004
#define PP_SYNC_CONFIG_HEIGHT 0x008
#define PP_SYNC_WRCOUNT 0x00C
#define PP_VSYNC_INIT_VAL 0x010
#define PP_INT_COUNT_VAL 0x014
#define PP_SYNC_THRESH 0x018
#define PP_START_POS 0x01C
#define PP_RD_PTR_IRQ 0x020
#define PP_WR_PTR_IRQ 0x024
#define PP_OUT_LINE_COUNT 0x028
#define PP_LINE_COUNT 0x02C
#define PP_FBC_MODE 0x034
#define PP_FBC_BUDGET_CTL 0x038
#define PP_FBC_LOSSY_MODE 0x03C
static struct dpu_pingpong_cfg *_pingpong_offset(enum dpu_pingpong pp,
struct dpu_mdss_cfg *m,
void __iomem *addr,
struct dpu_hw_blk_reg_map *b)
{
int i;
for (i = 0; i < m->pingpong_count; i++) {
if (pp == m->pingpong[i].id) {
b->base_off = addr;
b->blk_off = m->pingpong[i].base;
b->length = m->pingpong[i].len;
b->hwversion = m->hwversion;
b->log_mask = DPU_DBG_MASK_PINGPONG;
return &m->pingpong[i];
}
}
return ERR_PTR(-EINVAL);
}
static int dpu_hw_pp_setup_te_config(struct dpu_hw_pingpong *pp,
struct dpu_hw_tear_check *te)
{
struct dpu_hw_blk_reg_map *c;
int cfg;
if (!pp || !te)
return -EINVAL;
c = &pp->hw;
cfg = BIT(19); /*VSYNC_COUNTER_EN */
if (te->hw_vsync_mode)
cfg |= BIT(20);
cfg |= te->vsync_count;
DPU_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
DPU_REG_WRITE(c, PP_SYNC_CONFIG_HEIGHT, te->sync_cfg_height);
DPU_REG_WRITE(c, PP_VSYNC_INIT_VAL, te->vsync_init_val);
DPU_REG_WRITE(c, PP_RD_PTR_IRQ, te->rd_ptr_irq);
DPU_REG_WRITE(c, PP_START_POS, te->start_pos);
DPU_REG_WRITE(c, PP_SYNC_THRESH,
((te->sync_threshold_continue << 16) |
te->sync_threshold_start));
DPU_REG_WRITE(c, PP_SYNC_WRCOUNT,
(te->start_pos + te->sync_threshold_start + 1));
return 0;
}
static int dpu_hw_pp_poll_timeout_wr_ptr(struct dpu_hw_pingpong *pp,
u32 timeout_us)
{
struct dpu_hw_blk_reg_map *c;
u32 val;
int rc;
if (!pp)
return -EINVAL;
c = &pp->hw;
rc = readl_poll_timeout(c->base_off + c->blk_off + PP_LINE_COUNT,
val, (val & 0xffff) >= 1, 10, timeout_us);
return rc;
}
static int dpu_hw_pp_enable_te(struct dpu_hw_pingpong *pp, bool enable)
{
struct dpu_hw_blk_reg_map *c;
if (!pp)
return -EINVAL;
c = &pp->hw;
DPU_REG_WRITE(c, PP_TEAR_CHECK_EN, enable);
return 0;
}
static int dpu_hw_pp_connect_external_te(struct dpu_hw_pingpong *pp,
bool enable_external_te)
{
struct dpu_hw_blk_reg_map *c = &pp->hw;
u32 cfg;
int orig;
if (!pp)
return -EINVAL;
c = &pp->hw;
cfg = DPU_REG_READ(c, PP_SYNC_CONFIG_VSYNC);
orig = (bool)(cfg & BIT(20));
if (enable_external_te)
cfg |= BIT(20);
else
cfg &= ~BIT(20);
DPU_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
trace_dpu_pp_connect_ext_te(pp->idx - PINGPONG_0, cfg);
return orig;
}
static int dpu_hw_pp_get_vsync_info(struct dpu_hw_pingpong *pp,
struct dpu_hw_pp_vsync_info *info)
{
struct dpu_hw_blk_reg_map *c;
u32 val;
if (!pp || !info)
return -EINVAL;
c = &pp->hw;
val = DPU_REG_READ(c, PP_VSYNC_INIT_VAL);
info->rd_ptr_init_val = val & 0xffff;
val = DPU_REG_READ(c, PP_INT_COUNT_VAL);
info->rd_ptr_frame_count = (val & 0xffff0000) >> 16;
info->rd_ptr_line_count = val & 0xffff;
val = DPU_REG_READ(c, PP_LINE_COUNT);
info->wr_ptr_line_count = val & 0xffff;
return 0;
}
static u32 dpu_hw_pp_get_line_count(struct dpu_hw_pingpong *pp)
{
struct dpu_hw_blk_reg_map *c = &pp->hw;
u32 height, init;
u32 line = 0xFFFF;
if (!pp)
return 0;
c = &pp->hw;
init = DPU_REG_READ(c, PP_VSYNC_INIT_VAL) & 0xFFFF;
height = DPU_REG_READ(c, PP_SYNC_CONFIG_HEIGHT) & 0xFFFF;
if (height < init)
goto line_count_exit;
line = DPU_REG_READ(c, PP_INT_COUNT_VAL) & 0xFFFF;
if (line < init)
line += (0xFFFF - init);
else
line -= init;
line_count_exit:
return line;
}
static void _setup_pingpong_ops(struct dpu_hw_pingpong_ops *ops,
const struct dpu_pingpong_cfg *hw_cap)
{
ops->setup_tearcheck = dpu_hw_pp_setup_te_config;
ops->enable_tearcheck = dpu_hw_pp_enable_te;
ops->connect_external_te = dpu_hw_pp_connect_external_te;
ops->get_vsync_info = dpu_hw_pp_get_vsync_info;
ops->poll_timeout_wr_ptr = dpu_hw_pp_poll_timeout_wr_ptr;
ops->get_line_count = dpu_hw_pp_get_line_count;
};
static struct dpu_hw_blk_ops dpu_hw_ops = {
.start = NULL,
.stop = NULL,
};
struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
void __iomem *addr,
struct dpu_mdss_cfg *m)
{
struct dpu_hw_pingpong *c;
struct dpu_pingpong_cfg *cfg;
int rc;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
cfg = _pingpong_offset(idx, m, addr, &c->hw);
if (IS_ERR_OR_NULL(cfg)) {
kfree(c);
return ERR_PTR(-EINVAL);
}
c->idx = idx;
c->caps = cfg;
_setup_pingpong_ops(&c->ops, c->caps);
rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_PINGPONG, idx, &dpu_hw_ops);
if (rc) {
DPU_ERROR("failed to init hw blk %d\n", rc);
goto blk_init_error;
}
return c;
blk_init_error:
kzfree(c);
return ERR_PTR(rc);
}
void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp)
{
if (pp)
dpu_hw_blk_destroy(&pp->base);
kfree(pp);
}

View File

@ -0,0 +1,136 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _DPU_HW_PINGPONG_H
#define _DPU_HW_PINGPONG_H
#include "dpu_hw_catalog.h"
#include "dpu_hw_mdss.h"
#include "dpu_hw_util.h"
#include "dpu_hw_blk.h"
struct dpu_hw_pingpong;
struct dpu_hw_tear_check {
/*
* This is ratio of MDP VSYNC clk freq(Hz) to
* refresh rate divided by no of lines
*/
u32 vsync_count;
u32 sync_cfg_height;
u32 vsync_init_val;
u32 sync_threshold_start;
u32 sync_threshold_continue;
u32 start_pos;
u32 rd_ptr_irq;
u8 hw_vsync_mode;
};
struct dpu_hw_pp_vsync_info {
u32 rd_ptr_init_val; /* value of rd pointer at vsync edge */
u32 rd_ptr_frame_count; /* num frames sent since enabling interface */
u32 rd_ptr_line_count; /* current line on panel (rd ptr) */
u32 wr_ptr_line_count; /* current line within pp fifo (wr ptr) */
};
/**
*
* struct dpu_hw_pingpong_ops : Interface to the pingpong Hw driver functions
* Assumption is these functions will be called after clocks are enabled
* @setup_tearcheck : program tear check values
* @enable_tearcheck : enables tear check
* @get_vsync_info : retries timing info of the panel
* @setup_dither : function to program the dither hw block
* @get_line_count: obtain current vertical line counter
*/
struct dpu_hw_pingpong_ops {
/**
* enables vysnc generation and sets up init value of
* read pointer and programs the tear check cofiguration
*/
int (*setup_tearcheck)(struct dpu_hw_pingpong *pp,
struct dpu_hw_tear_check *cfg);
/**
* enables tear check block
*/
int (*enable_tearcheck)(struct dpu_hw_pingpong *pp,
bool enable);
/**
* read, modify, write to either set or clear listening to external TE
* @Return: 1 if TE was originally connected, 0 if not, or -ERROR
*/
int (*connect_external_te)(struct dpu_hw_pingpong *pp,
bool enable_external_te);
/**
* provides the programmed and current
* line_count
*/
int (*get_vsync_info)(struct dpu_hw_pingpong *pp,
struct dpu_hw_pp_vsync_info *info);
/**
* poll until write pointer transmission starts
* @Return: 0 on success, -ETIMEDOUT on timeout
*/
int (*poll_timeout_wr_ptr)(struct dpu_hw_pingpong *pp, u32 timeout_us);
/**
* Obtain current vertical line counter
*/
u32 (*get_line_count)(struct dpu_hw_pingpong *pp);
};
struct dpu_hw_pingpong {
struct dpu_hw_blk base;
struct dpu_hw_blk_reg_map hw;
/* pingpong */
enum dpu_pingpong idx;
const struct dpu_pingpong_cfg *caps;
/* ops */
struct dpu_hw_pingpong_ops ops;
};
/**
* dpu_hw_pingpong - convert base object dpu_hw_base to container
* @hw: Pointer to base hardware block
* return: Pointer to hardware block container
*/
static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw)
{
return container_of(hw, struct dpu_hw_pingpong, base);
}
/**
* dpu_hw_pingpong_init - initializes the pingpong driver for the passed
* pingpong idx.
* @idx: Pingpong index for which driver object is required
* @addr: Mapped register io address of MDP
* @m: Pointer to mdss catalog data
* Returns: Error code or allocated dpu_hw_pingpong context
*/
struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
void __iomem *addr,
struct dpu_mdss_cfg *m);
/**
* dpu_hw_pingpong_destroy - destroys pingpong driver context
* should be called to free the context
* @pp: Pointer to PP driver context returned by dpu_hw_pingpong_init
*/
void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp);
#endif /*_DPU_HW_PINGPONG_H */

View File

@ -0,0 +1,753 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_sspp.h"
#include "dpu_dbg.h"
#include "dpu_kms.h"
#define DPU_FETCH_CONFIG_RESET_VALUE 0x00000087
/* DPU_SSPP_SRC */
#define SSPP_SRC_SIZE 0x00
#define SSPP_SRC_XY 0x08
#define SSPP_OUT_SIZE 0x0c
#define SSPP_OUT_XY 0x10
#define SSPP_SRC0_ADDR 0x14
#define SSPP_SRC1_ADDR 0x18
#define SSPP_SRC2_ADDR 0x1C
#define SSPP_SRC3_ADDR 0x20
#define SSPP_SRC_YSTRIDE0 0x24
#define SSPP_SRC_YSTRIDE1 0x28
#define SSPP_SRC_FORMAT 0x30
#define SSPP_SRC_UNPACK_PATTERN 0x34
#define SSPP_SRC_OP_MODE 0x38
/* SSPP_MULTIRECT*/
#define SSPP_SRC_SIZE_REC1 0x16C
#define SSPP_SRC_XY_REC1 0x168
#define SSPP_OUT_SIZE_REC1 0x160
#define SSPP_OUT_XY_REC1 0x164
#define SSPP_SRC_FORMAT_REC1 0x174
#define SSPP_SRC_UNPACK_PATTERN_REC1 0x178
#define SSPP_SRC_OP_MODE_REC1 0x17C
#define SSPP_MULTIRECT_OPMODE 0x170
#define SSPP_SRC_CONSTANT_COLOR_REC1 0x180
#define SSPP_EXCL_REC_SIZE_REC1 0x184
#define SSPP_EXCL_REC_XY_REC1 0x188
#define MDSS_MDP_OP_DEINTERLACE BIT(22)
#define MDSS_MDP_OP_DEINTERLACE_ODD BIT(23)
#define MDSS_MDP_OP_IGC_ROM_1 BIT(18)
#define MDSS_MDP_OP_IGC_ROM_0 BIT(17)
#define MDSS_MDP_OP_IGC_EN BIT(16)
#define MDSS_MDP_OP_FLIP_UD BIT(14)
#define MDSS_MDP_OP_FLIP_LR BIT(13)
#define MDSS_MDP_OP_BWC_EN BIT(0)
#define MDSS_MDP_OP_PE_OVERRIDE BIT(31)
#define MDSS_MDP_OP_BWC_LOSSLESS (0 << 1)
#define MDSS_MDP_OP_BWC_Q_HIGH (1 << 1)
#define MDSS_MDP_OP_BWC_Q_MED (2 << 1)
#define SSPP_SRC_CONSTANT_COLOR 0x3c
#define SSPP_EXCL_REC_CTL 0x40
#define SSPP_UBWC_STATIC_CTRL 0x44
#define SSPP_FETCH_CONFIG 0x048
#define SSPP_DANGER_LUT 0x60
#define SSPP_SAFE_LUT 0x64
#define SSPP_CREQ_LUT 0x68
#define SSPP_QOS_CTRL 0x6C
#define SSPP_DECIMATION_CONFIG 0xB4
#define SSPP_SRC_ADDR_SW_STATUS 0x70
#define SSPP_CREQ_LUT_0 0x74
#define SSPP_CREQ_LUT_1 0x78
#define SSPP_SW_PIX_EXT_C0_LR 0x100
#define SSPP_SW_PIX_EXT_C0_TB 0x104
#define SSPP_SW_PIX_EXT_C0_REQ_PIXELS 0x108
#define SSPP_SW_PIX_EXT_C1C2_LR 0x110
#define SSPP_SW_PIX_EXT_C1C2_TB 0x114
#define SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS 0x118
#define SSPP_SW_PIX_EXT_C3_LR 0x120
#define SSPP_SW_PIX_EXT_C3_TB 0x124
#define SSPP_SW_PIX_EXT_C3_REQ_PIXELS 0x128
#define SSPP_TRAFFIC_SHAPER 0x130
#define SSPP_CDP_CNTL 0x134
#define SSPP_UBWC_ERROR_STATUS 0x138
#define SSPP_TRAFFIC_SHAPER_PREFILL 0x150
#define SSPP_TRAFFIC_SHAPER_REC1_PREFILL 0x154
#define SSPP_TRAFFIC_SHAPER_REC1 0x158
#define SSPP_EXCL_REC_SIZE 0x1B4
#define SSPP_EXCL_REC_XY 0x1B8
#define SSPP_VIG_OP_MODE 0x0
#define SSPP_VIG_CSC_10_OP_MODE 0x0
#define SSPP_TRAFFIC_SHAPER_BPC_MAX 0xFF
/* SSPP_QOS_CTRL */
#define SSPP_QOS_CTRL_VBLANK_EN BIT(16)
#define SSPP_QOS_CTRL_DANGER_SAFE_EN BIT(0)
#define SSPP_QOS_CTRL_DANGER_VBLANK_MASK 0x3
#define SSPP_QOS_CTRL_DANGER_VBLANK_OFF 4
#define SSPP_QOS_CTRL_CREQ_VBLANK_MASK 0x3
#define SSPP_QOS_CTRL_CREQ_VBLANK_OFF 20
/* DPU_SSPP_SCALER_QSEED2 */
#define SCALE_CONFIG 0x04
#define COMP0_3_PHASE_STEP_X 0x10
#define COMP0_3_PHASE_STEP_Y 0x14
#define COMP1_2_PHASE_STEP_X 0x18
#define COMP1_2_PHASE_STEP_Y 0x1c
#define COMP0_3_INIT_PHASE_X 0x20
#define COMP0_3_INIT_PHASE_Y 0x24
#define COMP1_2_INIT_PHASE_X 0x28
#define COMP1_2_INIT_PHASE_Y 0x2C
#define VIG_0_QSEED2_SHARP 0x30
/*
* Definitions for ViG op modes
*/
#define VIG_OP_CSC_DST_DATAFMT BIT(19)
#define VIG_OP_CSC_SRC_DATAFMT BIT(18)
#define VIG_OP_CSC_EN BIT(17)
#define VIG_OP_MEM_PROT_CONT BIT(15)
#define VIG_OP_MEM_PROT_VAL BIT(14)
#define VIG_OP_MEM_PROT_SAT BIT(13)
#define VIG_OP_MEM_PROT_HUE BIT(12)
#define VIG_OP_HIST BIT(8)
#define VIG_OP_SKY_COL BIT(7)
#define VIG_OP_FOIL BIT(6)
#define VIG_OP_SKIN_COL BIT(5)
#define VIG_OP_PA_EN BIT(4)
#define VIG_OP_PA_SAT_ZERO_EXP BIT(2)
#define VIG_OP_MEM_PROT_BLEND BIT(1)
/*
* Definitions for CSC 10 op modes
*/
#define VIG_CSC_10_SRC_DATAFMT BIT(1)
#define VIG_CSC_10_EN BIT(0)
#define CSC_10BIT_OFFSET 4
/* traffic shaper clock in Hz */
#define TS_CLK 19200000
static inline int _sspp_subblk_offset(struct dpu_hw_pipe *ctx,
int s_id,
u32 *idx)
{
int rc = 0;
const struct dpu_sspp_sub_blks *sblk = ctx->cap->sblk;
if (!ctx)
return -EINVAL;
switch (s_id) {
case DPU_SSPP_SRC:
*idx = sblk->src_blk.base;
break;
case DPU_SSPP_SCALER_QSEED2:
case DPU_SSPP_SCALER_QSEED3:
case DPU_SSPP_SCALER_RGB:
*idx = sblk->scaler_blk.base;
break;
case DPU_SSPP_CSC:
case DPU_SSPP_CSC_10BIT:
*idx = sblk->csc_blk.base;
break;
default:
rc = -EINVAL;
}
return rc;
}
static void dpu_hw_sspp_setup_multirect(struct dpu_hw_pipe *ctx,
enum dpu_sspp_multirect_index index,
enum dpu_sspp_multirect_mode mode)
{
u32 mode_mask;
u32 idx;
if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
return;
if (index == DPU_SSPP_RECT_SOLO) {
/**
* if rect index is RECT_SOLO, we cannot expect a
* virtual plane sharing the same SSPP id. So we go
* and disable multirect
*/
mode_mask = 0;
} else {
mode_mask = DPU_REG_READ(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx);
mode_mask |= index;
if (mode == DPU_SSPP_MULTIRECT_TIME_MX)
mode_mask |= BIT(2);
else
mode_mask &= ~BIT(2);
}
DPU_REG_WRITE(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx, mode_mask);
}
static void _sspp_setup_opmode(struct dpu_hw_pipe *ctx,
u32 mask, u8 en)
{
u32 idx;
u32 opmode;
if (!test_bit(DPU_SSPP_SCALER_QSEED2, &ctx->cap->features) ||
_sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED2, &idx) ||
!test_bit(DPU_SSPP_CSC, &ctx->cap->features))
return;
opmode = DPU_REG_READ(&ctx->hw, SSPP_VIG_OP_MODE + idx);
if (en)
opmode |= mask;
else
opmode &= ~mask;
DPU_REG_WRITE(&ctx->hw, SSPP_VIG_OP_MODE + idx, opmode);
}
static void _sspp_setup_csc10_opmode(struct dpu_hw_pipe *ctx,
u32 mask, u8 en)
{
u32 idx;
u32 opmode;
if (_sspp_subblk_offset(ctx, DPU_SSPP_CSC_10BIT, &idx))
return;
opmode = DPU_REG_READ(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx);
if (en)
opmode |= mask;
else
opmode &= ~mask;
DPU_REG_WRITE(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx, opmode);
}
/**
* Setup source pixel format, flip,
*/
static void dpu_hw_sspp_setup_format(struct dpu_hw_pipe *ctx,
const struct dpu_format *fmt, u32 flags,
enum dpu_sspp_multirect_index rect_mode)
{
struct dpu_hw_blk_reg_map *c;
u32 chroma_samp, unpack, src_format;
u32 opmode = 0;
u32 fast_clear = 0;
u32 op_mode_off, unpack_pat_off, format_off;
u32 idx;
if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !fmt)
return;
if (rect_mode == DPU_SSPP_RECT_SOLO || rect_mode == DPU_SSPP_RECT_0) {
op_mode_off = SSPP_SRC_OP_MODE;
unpack_pat_off = SSPP_SRC_UNPACK_PATTERN;
format_off = SSPP_SRC_FORMAT;
} else {
op_mode_off = SSPP_SRC_OP_MODE_REC1;
unpack_pat_off = SSPP_SRC_UNPACK_PATTERN_REC1;
format_off = SSPP_SRC_FORMAT_REC1;
}
c = &ctx->hw;
opmode = DPU_REG_READ(c, op_mode_off + idx);
opmode &= ~(MDSS_MDP_OP_FLIP_LR | MDSS_MDP_OP_FLIP_UD |
MDSS_MDP_OP_BWC_EN | MDSS_MDP_OP_PE_OVERRIDE);
if (flags & DPU_SSPP_FLIP_LR)
opmode |= MDSS_MDP_OP_FLIP_LR;
if (flags & DPU_SSPP_FLIP_UD)
opmode |= MDSS_MDP_OP_FLIP_UD;
chroma_samp = fmt->chroma_sample;
if (flags & DPU_SSPP_SOURCE_ROTATED_90) {
if (chroma_samp == DPU_CHROMA_H2V1)
chroma_samp = DPU_CHROMA_H1V2;
else if (chroma_samp == DPU_CHROMA_H1V2)
chroma_samp = DPU_CHROMA_H2V1;
}
src_format = (chroma_samp << 23) | (fmt->fetch_planes << 19) |
(fmt->bits[C3_ALPHA] << 6) | (fmt->bits[C2_R_Cr] << 4) |
(fmt->bits[C1_B_Cb] << 2) | (fmt->bits[C0_G_Y] << 0);
if (flags & DPU_SSPP_ROT_90)
src_format |= BIT(11); /* ROT90 */
if (fmt->alpha_enable && fmt->fetch_planes == DPU_PLANE_INTERLEAVED)
src_format |= BIT(8); /* SRCC3_EN */
if (flags & DPU_SSPP_SOLID_FILL)
src_format |= BIT(22);
unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
(fmt->element[1] << 8) | (fmt->element[0] << 0);
src_format |= ((fmt->unpack_count - 1) << 12) |
(fmt->unpack_tight << 17) |
(fmt->unpack_align_msb << 18) |
((fmt->bpp - 1) << 9);
if (fmt->fetch_mode != DPU_FETCH_LINEAR) {
if (DPU_FORMAT_IS_UBWC(fmt))
opmode |= MDSS_MDP_OP_BWC_EN;
src_format |= (fmt->fetch_mode & 3) << 30; /*FRAME_FORMAT */
DPU_REG_WRITE(c, SSPP_FETCH_CONFIG,
DPU_FETCH_CONFIG_RESET_VALUE |
ctx->mdp->highest_bank_bit << 18);
if (IS_UBWC_20_SUPPORTED(ctx->catalog->caps->ubwc_version)) {
fast_clear = fmt->alpha_enable ? BIT(31) : 0;
DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
fast_clear | (ctx->mdp->ubwc_swizzle) |
(ctx->mdp->highest_bank_bit << 4));
}
}
opmode |= MDSS_MDP_OP_PE_OVERRIDE;
/* if this is YUV pixel format, enable CSC */
if (DPU_FORMAT_IS_YUV(fmt))
src_format |= BIT(15);
if (DPU_FORMAT_IS_DX(fmt))
src_format |= BIT(14);
/* update scaler opmode, if appropriate */
if (test_bit(DPU_SSPP_CSC, &ctx->cap->features))
_sspp_setup_opmode(ctx, VIG_OP_CSC_EN | VIG_OP_CSC_SRC_DATAFMT,
DPU_FORMAT_IS_YUV(fmt));
else if (test_bit(DPU_SSPP_CSC_10BIT, &ctx->cap->features))
_sspp_setup_csc10_opmode(ctx,
VIG_CSC_10_EN | VIG_CSC_10_SRC_DATAFMT,
DPU_FORMAT_IS_YUV(fmt));
DPU_REG_WRITE(c, format_off + idx, src_format);
DPU_REG_WRITE(c, unpack_pat_off + idx, unpack);
DPU_REG_WRITE(c, op_mode_off + idx, opmode);
/* clear previous UBWC error */
DPU_REG_WRITE(c, SSPP_UBWC_ERROR_STATUS + idx, BIT(31));
}
static void dpu_hw_sspp_setup_pe_config(struct dpu_hw_pipe *ctx,
struct dpu_hw_pixel_ext *pe_ext)
{
struct dpu_hw_blk_reg_map *c;
u8 color;
u32 lr_pe[4], tb_pe[4], tot_req_pixels[4];
const u32 bytemask = 0xff;
const u32 shortmask = 0xffff;
u32 idx;
if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !pe_ext)
return;
c = &ctx->hw;
/* program SW pixel extension override for all pipes*/
for (color = 0; color < DPU_MAX_PLANES; color++) {
/* color 2 has the same set of registers as color 1 */
if (color == 2)
continue;
lr_pe[color] = ((pe_ext->right_ftch[color] & bytemask) << 24)|
((pe_ext->right_rpt[color] & bytemask) << 16)|
((pe_ext->left_ftch[color] & bytemask) << 8)|
(pe_ext->left_rpt[color] & bytemask);
tb_pe[color] = ((pe_ext->btm_ftch[color] & bytemask) << 24)|
((pe_ext->btm_rpt[color] & bytemask) << 16)|
((pe_ext->top_ftch[color] & bytemask) << 8)|
(pe_ext->top_rpt[color] & bytemask);
tot_req_pixels[color] = (((pe_ext->roi_h[color] +
pe_ext->num_ext_pxls_top[color] +
pe_ext->num_ext_pxls_btm[color]) & shortmask) << 16) |
((pe_ext->roi_w[color] +
pe_ext->num_ext_pxls_left[color] +
pe_ext->num_ext_pxls_right[color]) & shortmask);
}
/* color 0 */
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_LR + idx, lr_pe[0]);
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_TB + idx, tb_pe[0]);
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_REQ_PIXELS + idx,
tot_req_pixels[0]);
/* color 1 and color 2 */
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_LR + idx, lr_pe[1]);
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_TB + idx, tb_pe[1]);
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS + idx,
tot_req_pixels[1]);
/* color 3 */
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_LR + idx, lr_pe[3]);
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_TB + idx, lr_pe[3]);
DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_REQ_PIXELS + idx,
tot_req_pixels[3]);
}
static void _dpu_hw_sspp_setup_scaler3(struct dpu_hw_pipe *ctx,
struct dpu_hw_pipe_cfg *sspp,
struct dpu_hw_pixel_ext *pe,
void *scaler_cfg)
{
u32 idx;
struct dpu_hw_scaler3_cfg *scaler3_cfg = scaler_cfg;
(void)pe;
if (_sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx) || !sspp
|| !scaler3_cfg || !ctx || !ctx->cap || !ctx->cap->sblk)
return;
dpu_hw_setup_scaler3(&ctx->hw, scaler3_cfg, idx,
ctx->cap->sblk->scaler_blk.version,
sspp->layout.format);
}
static u32 _dpu_hw_sspp_get_scaler3_ver(struct dpu_hw_pipe *ctx)
{
u32 idx;
if (!ctx || _sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx))
return 0;
return dpu_hw_get_scaler3_ver(&ctx->hw, idx);
}
/**
* dpu_hw_sspp_setup_rects()
*/
static void dpu_hw_sspp_setup_rects(struct dpu_hw_pipe *ctx,
struct dpu_hw_pipe_cfg *cfg,
enum dpu_sspp_multirect_index rect_index)
{
struct dpu_hw_blk_reg_map *c;
u32 src_size, src_xy, dst_size, dst_xy, ystride0, ystride1;
u32 src_size_off, src_xy_off, out_size_off, out_xy_off;
u32 idx;
if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !cfg)
return;
c = &ctx->hw;
if (rect_index == DPU_SSPP_RECT_SOLO || rect_index == DPU_SSPP_RECT_0) {
src_size_off = SSPP_SRC_SIZE;
src_xy_off = SSPP_SRC_XY;
out_size_off = SSPP_OUT_SIZE;
out_xy_off = SSPP_OUT_XY;
} else {
src_size_off = SSPP_SRC_SIZE_REC1;
src_xy_off = SSPP_SRC_XY_REC1;
out_size_off = SSPP_OUT_SIZE_REC1;
out_xy_off = SSPP_OUT_XY_REC1;
}
/* src and dest rect programming */
src_xy = (cfg->src_rect.y1 << 16) | cfg->src_rect.x1;
src_size = (drm_rect_height(&cfg->src_rect) << 16) |
drm_rect_width(&cfg->src_rect);
dst_xy = (cfg->dst_rect.y1 << 16) | cfg->dst_rect.x1;
dst_size = (drm_rect_height(&cfg->dst_rect) << 16) |
drm_rect_width(&cfg->dst_rect);
if (rect_index == DPU_SSPP_RECT_SOLO) {
ystride0 = (cfg->layout.plane_pitch[0]) |
(cfg->layout.plane_pitch[1] << 16);
ystride1 = (cfg->layout.plane_pitch[2]) |
(cfg->layout.plane_pitch[3] << 16);
} else {
ystride0 = DPU_REG_READ(c, SSPP_SRC_YSTRIDE0 + idx);
ystride1 = DPU_REG_READ(c, SSPP_SRC_YSTRIDE1 + idx);
if (rect_index == DPU_SSPP_RECT_0) {
ystride0 = (ystride0 & 0xFFFF0000) |
(cfg->layout.plane_pitch[0] & 0x0000FFFF);
ystride1 = (ystride1 & 0xFFFF0000)|
(cfg->layout.plane_pitch[2] & 0x0000FFFF);
} else {
ystride0 = (ystride0 & 0x0000FFFF) |
((cfg->layout.plane_pitch[0] << 16) &
0xFFFF0000);
ystride1 = (ystride1 & 0x0000FFFF) |
((cfg->layout.plane_pitch[2] << 16) &
0xFFFF0000);
}
}
/* rectangle register programming */
DPU_REG_WRITE(c, src_size_off + idx, src_size);
DPU_REG_WRITE(c, src_xy_off + idx, src_xy);
DPU_REG_WRITE(c, out_size_off + idx, dst_size);
DPU_REG_WRITE(c, out_xy_off + idx, dst_xy);
DPU_REG_WRITE(c, SSPP_SRC_YSTRIDE0 + idx, ystride0);
DPU_REG_WRITE(c, SSPP_SRC_YSTRIDE1 + idx, ystride1);
}
static void dpu_hw_sspp_setup_sourceaddress(struct dpu_hw_pipe *ctx,
struct dpu_hw_pipe_cfg *cfg,
enum dpu_sspp_multirect_index rect_mode)
{
int i;
u32 idx;
if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
return;
if (rect_mode == DPU_SSPP_RECT_SOLO) {
for (i = 0; i < ARRAY_SIZE(cfg->layout.plane_addr); i++)
DPU_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx + i * 0x4,
cfg->layout.plane_addr[i]);
} else if (rect_mode == DPU_SSPP_RECT_0) {
DPU_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx,
cfg->layout.plane_addr[0]);
DPU_REG_WRITE(&ctx->hw, SSPP_SRC2_ADDR + idx,
cfg->layout.plane_addr[2]);
} else {
DPU_REG_WRITE(&ctx->hw, SSPP_SRC1_ADDR + idx,
cfg->layout.plane_addr[0]);
DPU_REG_WRITE(&ctx->hw, SSPP_SRC3_ADDR + idx,
cfg->layout.plane_addr[2]);
}
}
static void dpu_hw_sspp_setup_csc(struct dpu_hw_pipe *ctx,
struct dpu_csc_cfg *data)
{
u32 idx;
bool csc10 = false;
if (_sspp_subblk_offset(ctx, DPU_SSPP_CSC, &idx) || !data)
return;
if (test_bit(DPU_SSPP_CSC_10BIT, &ctx->cap->features)) {
idx += CSC_10BIT_OFFSET;
csc10 = true;
}
dpu_hw_csc_setup(&ctx->hw, idx, data, csc10);
}
static void dpu_hw_sspp_setup_solidfill(struct dpu_hw_pipe *ctx, u32 color, enum
dpu_sspp_multirect_index rect_index)
{
u32 idx;
if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
return;
if (rect_index == DPU_SSPP_RECT_SOLO || rect_index == DPU_SSPP_RECT_0)
DPU_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR + idx, color);
else
DPU_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR_REC1 + idx,
color);
}
static void dpu_hw_sspp_setup_danger_safe_lut(struct dpu_hw_pipe *ctx,
struct dpu_hw_pipe_qos_cfg *cfg)
{
u32 idx;
if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
return;
DPU_REG_WRITE(&ctx->hw, SSPP_DANGER_LUT + idx, cfg->danger_lut);
DPU_REG_WRITE(&ctx->hw, SSPP_SAFE_LUT + idx, cfg->safe_lut);
}
static void dpu_hw_sspp_setup_creq_lut(struct dpu_hw_pipe *ctx,
struct dpu_hw_pipe_qos_cfg *cfg)
{
u32 idx;
if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
return;
if (ctx->cap && test_bit(DPU_SSPP_QOS_8LVL, &ctx->cap->features)) {
DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_0 + idx, cfg->creq_lut);
DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_1 + idx,
cfg->creq_lut >> 32);
} else {
DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT + idx, cfg->creq_lut);
}
}
static void dpu_hw_sspp_setup_qos_ctrl(struct dpu_hw_pipe *ctx,
struct dpu_hw_pipe_qos_cfg *cfg)
{
u32 idx;
u32 qos_ctrl = 0;
if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
return;
if (cfg->vblank_en) {
qos_ctrl |= ((cfg->creq_vblank &
SSPP_QOS_CTRL_CREQ_VBLANK_MASK) <<
SSPP_QOS_CTRL_CREQ_VBLANK_OFF);
qos_ctrl |= ((cfg->danger_vblank &
SSPP_QOS_CTRL_DANGER_VBLANK_MASK) <<
SSPP_QOS_CTRL_DANGER_VBLANK_OFF);
qos_ctrl |= SSPP_QOS_CTRL_VBLANK_EN;
}
if (cfg->danger_safe_en)
qos_ctrl |= SSPP_QOS_CTRL_DANGER_SAFE_EN;
DPU_REG_WRITE(&ctx->hw, SSPP_QOS_CTRL + idx, qos_ctrl);
}
static void dpu_hw_sspp_setup_cdp(struct dpu_hw_pipe *ctx,
struct dpu_hw_pipe_cdp_cfg *cfg)
{
u32 idx;
u32 cdp_cntl = 0;
if (!ctx || !cfg)
return;
if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
return;
if (cfg->enable)
cdp_cntl |= BIT(0);
if (cfg->ubwc_meta_enable)
cdp_cntl |= BIT(1);
if (cfg->tile_amortize_enable)
cdp_cntl |= BIT(2);
if (cfg->preload_ahead == DPU_SSPP_CDP_PRELOAD_AHEAD_64)
cdp_cntl |= BIT(3);
DPU_REG_WRITE(&ctx->hw, SSPP_CDP_CNTL, cdp_cntl);
}
static void _setup_layer_ops(struct dpu_hw_pipe *c,
unsigned long features)
{
if (test_bit(DPU_SSPP_SRC, &features)) {
c->ops.setup_format = dpu_hw_sspp_setup_format;
c->ops.setup_rects = dpu_hw_sspp_setup_rects;
c->ops.setup_sourceaddress = dpu_hw_sspp_setup_sourceaddress;
c->ops.setup_solidfill = dpu_hw_sspp_setup_solidfill;
c->ops.setup_pe = dpu_hw_sspp_setup_pe_config;
}
if (test_bit(DPU_SSPP_QOS, &features)) {
c->ops.setup_danger_safe_lut =
dpu_hw_sspp_setup_danger_safe_lut;
c->ops.setup_creq_lut = dpu_hw_sspp_setup_creq_lut;
c->ops.setup_qos_ctrl = dpu_hw_sspp_setup_qos_ctrl;
}
if (test_bit(DPU_SSPP_CSC, &features) ||
test_bit(DPU_SSPP_CSC_10BIT, &features))
c->ops.setup_csc = dpu_hw_sspp_setup_csc;
if (dpu_hw_sspp_multirect_enabled(c->cap))
c->ops.setup_multirect = dpu_hw_sspp_setup_multirect;
if (test_bit(DPU_SSPP_SCALER_QSEED3, &features)) {
c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3;
c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver;
}
if (test_bit(DPU_SSPP_CDP, &features))
c->ops.setup_cdp = dpu_hw_sspp_setup_cdp;
}
static struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
void __iomem *addr,
struct dpu_mdss_cfg *catalog,
struct dpu_hw_blk_reg_map *b)
{
int i;
if ((sspp < SSPP_MAX) && catalog && addr && b) {
for (i = 0; i < catalog->sspp_count; i++) {
if (sspp == catalog->sspp[i].id) {
b->base_off = addr;
b->blk_off = catalog->sspp[i].base;
b->length = catalog->sspp[i].len;
b->hwversion = catalog->hwversion;
b->log_mask = DPU_DBG_MASK_SSPP;
return &catalog->sspp[i];
}
}
}
return ERR_PTR(-ENOMEM);
}
static struct dpu_hw_blk_ops dpu_hw_ops = {
.start = NULL,
.stop = NULL,
};
struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
void __iomem *addr, struct dpu_mdss_cfg *catalog,
bool is_virtual_pipe)
{
struct dpu_hw_pipe *hw_pipe;
struct dpu_sspp_cfg *cfg;
int rc;
if (!addr || !catalog)
return ERR_PTR(-EINVAL);
hw_pipe = kzalloc(sizeof(*hw_pipe), GFP_KERNEL);
if (!hw_pipe)
return ERR_PTR(-ENOMEM);
cfg = _sspp_offset(idx, addr, catalog, &hw_pipe->hw);
if (IS_ERR_OR_NULL(cfg)) {
kfree(hw_pipe);
return ERR_PTR(-EINVAL);
}
/* Assign ops */
hw_pipe->catalog = catalog;
hw_pipe->mdp = &catalog->mdp[0];
hw_pipe->idx = idx;
hw_pipe->cap = cfg;
_setup_layer_ops(hw_pipe, hw_pipe->cap->features);
rc = dpu_hw_blk_init(&hw_pipe->base, DPU_HW_BLK_SSPP, idx, &dpu_hw_ops);
if (rc) {
DPU_ERROR("failed to init hw blk %d\n", rc);
goto blk_init_error;
}
return hw_pipe;
blk_init_error:
kzfree(hw_pipe);
return ERR_PTR(rc);
}
void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx)
{
if (ctx)
dpu_hw_blk_destroy(&ctx->base);
kfree(ctx);
}

View File

@ -0,0 +1,424 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _DPU_HW_SSPP_H
#define _DPU_HW_SSPP_H
#include "dpu_hw_catalog.h"
#include "dpu_hw_mdss.h"
#include "dpu_hw_util.h"
#include "dpu_hw_blk.h"
#include "dpu_formats.h"
struct dpu_hw_pipe;
/**
* Flags
*/
#define DPU_SSPP_FLIP_LR BIT(0)
#define DPU_SSPP_FLIP_UD BIT(1)
#define DPU_SSPP_SOURCE_ROTATED_90 BIT(2)
#define DPU_SSPP_ROT_90 BIT(3)
#define DPU_SSPP_SOLID_FILL BIT(4)
/**
* Define all scaler feature bits in catalog
*/
#define DPU_SSPP_SCALER ((1UL << DPU_SSPP_SCALER_RGB) | \
(1UL << DPU_SSPP_SCALER_QSEED2) | \
(1UL << DPU_SSPP_SCALER_QSEED3))
/**
* Component indices
*/
enum {
DPU_SSPP_COMP_0,
DPU_SSPP_COMP_1_2,
DPU_SSPP_COMP_2,
DPU_SSPP_COMP_3,
DPU_SSPP_COMP_MAX
};
/**
* DPU_SSPP_RECT_SOLO - multirect disabled
* DPU_SSPP_RECT_0 - rect0 of a multirect pipe
* DPU_SSPP_RECT_1 - rect1 of a multirect pipe
*
* Note: HW supports multirect with either RECT0 or
* RECT1. Considering no benefit of such configs over
* SOLO mode and to keep the plane management simple,
* we dont support single rect multirect configs.
*/
enum dpu_sspp_multirect_index {
DPU_SSPP_RECT_SOLO = 0,
DPU_SSPP_RECT_0,
DPU_SSPP_RECT_1,
};
enum dpu_sspp_multirect_mode {
DPU_SSPP_MULTIRECT_NONE = 0,
DPU_SSPP_MULTIRECT_PARALLEL,
DPU_SSPP_MULTIRECT_TIME_MX,
};
enum {
DPU_FRAME_LINEAR,
DPU_FRAME_TILE_A4X,
DPU_FRAME_TILE_A5X,
};
enum dpu_hw_filter {
DPU_SCALE_FILTER_NEAREST = 0,
DPU_SCALE_FILTER_BIL,
DPU_SCALE_FILTER_PCMN,
DPU_SCALE_FILTER_CA,
DPU_SCALE_FILTER_MAX
};
enum dpu_hw_filter_alpa {
DPU_SCALE_ALPHA_PIXEL_REP,
DPU_SCALE_ALPHA_BIL
};
enum dpu_hw_filter_yuv {
DPU_SCALE_2D_4X4,
DPU_SCALE_2D_CIR,
DPU_SCALE_1D_SEP,
DPU_SCALE_BIL
};
struct dpu_hw_sharp_cfg {
u32 strength;
u32 edge_thr;
u32 smooth_thr;
u32 noise_thr;
};
struct dpu_hw_pixel_ext {
/* scaling factors are enabled for this input layer */
uint8_t enable_pxl_ext;
int init_phase_x[DPU_MAX_PLANES];
int phase_step_x[DPU_MAX_PLANES];
int init_phase_y[DPU_MAX_PLANES];
int phase_step_y[DPU_MAX_PLANES];
/*
* Number of pixels extension in left, right, top and bottom direction
* for all color components. This pixel value for each color component
* should be sum of fetch + repeat pixels.
*/
int num_ext_pxls_left[DPU_MAX_PLANES];
int num_ext_pxls_right[DPU_MAX_PLANES];
int num_ext_pxls_top[DPU_MAX_PLANES];
int num_ext_pxls_btm[DPU_MAX_PLANES];
/*
* Number of pixels needs to be overfetched in left, right, top and
* bottom directions from source image for scaling.
*/
int left_ftch[DPU_MAX_PLANES];
int right_ftch[DPU_MAX_PLANES];
int top_ftch[DPU_MAX_PLANES];
int btm_ftch[DPU_MAX_PLANES];
/*
* Number of pixels needs to be repeated in left, right, top and
* bottom directions for scaling.
*/
int left_rpt[DPU_MAX_PLANES];
int right_rpt[DPU_MAX_PLANES];
int top_rpt[DPU_MAX_PLANES];
int btm_rpt[DPU_MAX_PLANES];
uint32_t roi_w[DPU_MAX_PLANES];
uint32_t roi_h[DPU_MAX_PLANES];
/*
* Filter type to be used for scaling in horizontal and vertical
* directions
*/
enum dpu_hw_filter horz_filter[DPU_MAX_PLANES];
enum dpu_hw_filter vert_filter[DPU_MAX_PLANES];
};
/**
* struct dpu_hw_pipe_cfg : Pipe description
* @layout: format layout information for programming buffer to hardware
* @src_rect: src ROI, caller takes into account the different operations
* such as decimation, flip etc to program this field
* @dest_rect: destination ROI.
* @index: index of the rectangle of SSPP
* @mode: parallel or time multiplex multirect mode
*/
struct dpu_hw_pipe_cfg {
struct dpu_hw_fmt_layout layout;
struct drm_rect src_rect;
struct drm_rect dst_rect;
enum dpu_sspp_multirect_index index;
enum dpu_sspp_multirect_mode mode;
};
/**
* struct dpu_hw_pipe_qos_cfg : Source pipe QoS configuration
* @danger_lut: LUT for generate danger level based on fill level
* @safe_lut: LUT for generate safe level based on fill level
* @creq_lut: LUT for generate creq level based on fill level
* @creq_vblank: creq value generated to vbif during vertical blanking
* @danger_vblank: danger value generated during vertical blanking
* @vblank_en: enable creq_vblank and danger_vblank during vblank
* @danger_safe_en: enable danger safe generation
*/
struct dpu_hw_pipe_qos_cfg {
u32 danger_lut;
u32 safe_lut;
u64 creq_lut;
u32 creq_vblank;
u32 danger_vblank;
bool vblank_en;
bool danger_safe_en;
};
/**
* enum CDP preload ahead address size
*/
enum {
DPU_SSPP_CDP_PRELOAD_AHEAD_32,
DPU_SSPP_CDP_PRELOAD_AHEAD_64
};
/**
* struct dpu_hw_pipe_cdp_cfg : CDP configuration
* @enable: true to enable CDP
* @ubwc_meta_enable: true to enable ubwc metadata preload
* @tile_amortize_enable: true to enable amortization control for tile format
* @preload_ahead: number of request to preload ahead
* DPU_SSPP_CDP_PRELOAD_AHEAD_32,
* DPU_SSPP_CDP_PRELOAD_AHEAD_64
*/
struct dpu_hw_pipe_cdp_cfg {
bool enable;
bool ubwc_meta_enable;
bool tile_amortize_enable;
u32 preload_ahead;
};
/**
* struct dpu_hw_pipe_ts_cfg - traffic shaper configuration
* @size: size to prefill in bytes, or zero to disable
* @time: time to prefill in usec, or zero to disable
*/
struct dpu_hw_pipe_ts_cfg {
u64 size;
u64 time;
};
/**
* struct dpu_hw_sspp_ops - interface to the SSPP Hw driver functions
* Caller must call the init function to get the pipe context for each pipe
* Assumption is these functions will be called after clocks are enabled
*/
struct dpu_hw_sspp_ops {
/**
* setup_format - setup pixel format cropping rectangle, flip
* @ctx: Pointer to pipe context
* @cfg: Pointer to pipe config structure
* @flags: Extra flags for format config
* @index: rectangle index in multirect
*/
void (*setup_format)(struct dpu_hw_pipe *ctx,
const struct dpu_format *fmt, u32 flags,
enum dpu_sspp_multirect_index index);
/**
* setup_rects - setup pipe ROI rectangles
* @ctx: Pointer to pipe context
* @cfg: Pointer to pipe config structure
* @index: rectangle index in multirect
*/
void (*setup_rects)(struct dpu_hw_pipe *ctx,
struct dpu_hw_pipe_cfg *cfg,
enum dpu_sspp_multirect_index index);
/**
* setup_pe - setup pipe pixel extension
* @ctx: Pointer to pipe context
* @pe_ext: Pointer to pixel ext settings
*/
void (*setup_pe)(struct dpu_hw_pipe *ctx,
struct dpu_hw_pixel_ext *pe_ext);
/**
* setup_sourceaddress - setup pipe source addresses
* @ctx: Pointer to pipe context
* @cfg: Pointer to pipe config structure
* @index: rectangle index in multirect
*/
void (*setup_sourceaddress)(struct dpu_hw_pipe *ctx,
struct dpu_hw_pipe_cfg *cfg,
enum dpu_sspp_multirect_index index);
/**
* setup_csc - setup color space coversion
* @ctx: Pointer to pipe context
* @data: Pointer to config structure
*/
void (*setup_csc)(struct dpu_hw_pipe *ctx, struct dpu_csc_cfg *data);
/**
* setup_solidfill - enable/disable colorfill
* @ctx: Pointer to pipe context
* @const_color: Fill color value
* @flags: Pipe flags
* @index: rectangle index in multirect
*/
void (*setup_solidfill)(struct dpu_hw_pipe *ctx, u32 color,
enum dpu_sspp_multirect_index index);
/**
* setup_multirect - setup multirect configuration
* @ctx: Pointer to pipe context
* @index: rectangle index in multirect
* @mode: parallel fetch / time multiplex multirect mode
*/
void (*setup_multirect)(struct dpu_hw_pipe *ctx,
enum dpu_sspp_multirect_index index,
enum dpu_sspp_multirect_mode mode);
/**
* setup_sharpening - setup sharpening
* @ctx: Pointer to pipe context
* @cfg: Pointer to config structure
*/
void (*setup_sharpening)(struct dpu_hw_pipe *ctx,
struct dpu_hw_sharp_cfg *cfg);
/**
* setup_danger_safe_lut - setup danger safe LUTs
* @ctx: Pointer to pipe context
* @cfg: Pointer to pipe QoS configuration
*
*/
void (*setup_danger_safe_lut)(struct dpu_hw_pipe *ctx,
struct dpu_hw_pipe_qos_cfg *cfg);
/**
* setup_creq_lut - setup CREQ LUT
* @ctx: Pointer to pipe context
* @cfg: Pointer to pipe QoS configuration
*
*/
void (*setup_creq_lut)(struct dpu_hw_pipe *ctx,
struct dpu_hw_pipe_qos_cfg *cfg);
/**
* setup_qos_ctrl - setup QoS control
* @ctx: Pointer to pipe context
* @cfg: Pointer to pipe QoS configuration
*
*/
void (*setup_qos_ctrl)(struct dpu_hw_pipe *ctx,
struct dpu_hw_pipe_qos_cfg *cfg);
/**
* setup_histogram - setup histograms
* @ctx: Pointer to pipe context
* @cfg: Pointer to histogram configuration
*/
void (*setup_histogram)(struct dpu_hw_pipe *ctx,
void *cfg);
/**
* setup_scaler - setup scaler
* @ctx: Pointer to pipe context
* @pipe_cfg: Pointer to pipe configuration
* @pe_cfg: Pointer to pixel extension configuration
* @scaler_cfg: Pointer to scaler configuration
*/
void (*setup_scaler)(struct dpu_hw_pipe *ctx,
struct dpu_hw_pipe_cfg *pipe_cfg,
struct dpu_hw_pixel_ext *pe_cfg,
void *scaler_cfg);
/**
* get_scaler_ver - get scaler h/w version
* @ctx: Pointer to pipe context
*/
u32 (*get_scaler_ver)(struct dpu_hw_pipe *ctx);
/**
* setup_cdp - setup client driven prefetch
* @ctx: Pointer to pipe context
* @cfg: Pointer to cdp configuration
*/
void (*setup_cdp)(struct dpu_hw_pipe *ctx,
struct dpu_hw_pipe_cdp_cfg *cfg);
};
/**
* struct dpu_hw_pipe - pipe description
* @base: hardware block base structure
* @hw: block hardware details
* @catalog: back pointer to catalog
* @mdp: pointer to associated mdp portion of the catalog
* @idx: pipe index
* @cap: pointer to layer_cfg
* @ops: pointer to operations possible for this pipe
*/
struct dpu_hw_pipe {
struct dpu_hw_blk base;
struct dpu_hw_blk_reg_map hw;
struct dpu_mdss_cfg *catalog;
struct dpu_mdp_cfg *mdp;
/* Pipe */
enum dpu_sspp idx;
const struct dpu_sspp_cfg *cap;
/* Ops */
struct dpu_hw_sspp_ops ops;
};
/**
* dpu_hw_pipe - convert base object dpu_hw_base to container
* @hw: Pointer to base hardware block
* return: Pointer to hardware block container
*/
static inline struct dpu_hw_pipe *to_dpu_hw_pipe(struct dpu_hw_blk *hw)
{
return container_of(hw, struct dpu_hw_pipe, base);
}
/**
* dpu_hw_sspp_init - initializes the sspp hw driver object.
* Should be called once before accessing every pipe.
* @idx: Pipe index for which driver object is required
* @addr: Mapped register io address of MDP
* @catalog : Pointer to mdss catalog data
* @is_virtual_pipe: is this pipe virtual pipe
*/
struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
void __iomem *addr, struct dpu_mdss_cfg *catalog,
bool is_virtual_pipe);
/**
* dpu_hw_sspp_destroy(): Destroys SSPP driver context
* should be called during Hw pipe cleanup.
* @ctx: Pointer to SSPP driver context returned by dpu_hw_sspp_init
*/
void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx);
#endif /*_DPU_HW_SSPP_H */

View File

@ -0,0 +1,398 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_top.h"
#include "dpu_dbg.h"
#include "dpu_kms.h"
#define SSPP_SPARE 0x28
#define UBWC_STATIC 0x144
#define FLD_SPLIT_DISPLAY_CMD BIT(1)
#define FLD_SMART_PANEL_FREE_RUN BIT(2)
#define FLD_INTF_1_SW_TRG_MUX BIT(4)
#define FLD_INTF_2_SW_TRG_MUX BIT(8)
#define FLD_TE_LINE_INTER_WATERLEVEL_MASK 0xFFFF
#define DANGER_STATUS 0x360
#define SAFE_STATUS 0x364
#define TE_LINE_INTERVAL 0x3F4
#define TRAFFIC_SHAPER_EN BIT(31)
#define TRAFFIC_SHAPER_RD_CLIENT(num) (0x030 + (num * 4))
#define TRAFFIC_SHAPER_WR_CLIENT(num) (0x060 + (num * 4))
#define TRAFFIC_SHAPER_FIXPOINT_FACTOR 4
#define MDP_WD_TIMER_0_CTL 0x380
#define MDP_WD_TIMER_0_CTL2 0x384
#define MDP_WD_TIMER_0_LOAD_VALUE 0x388
#define MDP_WD_TIMER_1_CTL 0x390
#define MDP_WD_TIMER_1_CTL2 0x394
#define MDP_WD_TIMER_1_LOAD_VALUE 0x398
#define MDP_WD_TIMER_2_CTL 0x420
#define MDP_WD_TIMER_2_CTL2 0x424
#define MDP_WD_TIMER_2_LOAD_VALUE 0x428
#define MDP_WD_TIMER_3_CTL 0x430
#define MDP_WD_TIMER_3_CTL2 0x434
#define MDP_WD_TIMER_3_LOAD_VALUE 0x438
#define MDP_WD_TIMER_4_CTL 0x440
#define MDP_WD_TIMER_4_CTL2 0x444
#define MDP_WD_TIMER_4_LOAD_VALUE 0x448
#define MDP_TICK_COUNT 16
#define XO_CLK_RATE 19200
#define MS_TICKS_IN_SEC 1000
#define CALCULATE_WD_LOAD_VALUE(fps) \
((uint32_t)((MS_TICKS_IN_SEC * XO_CLK_RATE)/(MDP_TICK_COUNT * fps)))
#define DCE_SEL 0x450
static void dpu_hw_setup_split_pipe(struct dpu_hw_mdp *mdp,
struct split_pipe_cfg *cfg)
{
struct dpu_hw_blk_reg_map *c;
u32 upper_pipe = 0;
u32 lower_pipe = 0;
if (!mdp || !cfg)
return;
c = &mdp->hw;
if (cfg->en) {
if (cfg->mode == INTF_MODE_CMD) {
lower_pipe = FLD_SPLIT_DISPLAY_CMD;
/* interface controlling sw trigger */
if (cfg->intf == INTF_2)
lower_pipe |= FLD_INTF_1_SW_TRG_MUX;
else
lower_pipe |= FLD_INTF_2_SW_TRG_MUX;
upper_pipe = lower_pipe;
} else {
if (cfg->intf == INTF_2) {
lower_pipe = FLD_INTF_1_SW_TRG_MUX;
upper_pipe = FLD_INTF_2_SW_TRG_MUX;
} else {
lower_pipe = FLD_INTF_2_SW_TRG_MUX;
upper_pipe = FLD_INTF_1_SW_TRG_MUX;
}
}
}
DPU_REG_WRITE(c, SSPP_SPARE, cfg->split_flush_en ? 0x1 : 0x0);
DPU_REG_WRITE(c, SPLIT_DISPLAY_LOWER_PIPE_CTRL, lower_pipe);
DPU_REG_WRITE(c, SPLIT_DISPLAY_UPPER_PIPE_CTRL, upper_pipe);
DPU_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1);
}
static void dpu_hw_setup_cdm_output(struct dpu_hw_mdp *mdp,
struct cdm_output_cfg *cfg)
{
struct dpu_hw_blk_reg_map *c;
u32 out_ctl = 0;
if (!mdp || !cfg)
return;
c = &mdp->hw;
if (cfg->intf_en)
out_ctl |= BIT(19);
DPU_REG_WRITE(c, MDP_OUT_CTL_0, out_ctl);
}
static bool dpu_hw_setup_clk_force_ctrl(struct dpu_hw_mdp *mdp,
enum dpu_clk_ctrl_type clk_ctrl, bool enable)
{
struct dpu_hw_blk_reg_map *c;
u32 reg_off, bit_off;
u32 reg_val, new_val;
bool clk_forced_on;
if (!mdp)
return false;
c = &mdp->hw;
if (clk_ctrl <= DPU_CLK_CTRL_NONE || clk_ctrl >= DPU_CLK_CTRL_MAX)
return false;
reg_off = mdp->caps->clk_ctrls[clk_ctrl].reg_off;
bit_off = mdp->caps->clk_ctrls[clk_ctrl].bit_off;
reg_val = DPU_REG_READ(c, reg_off);
if (enable)
new_val = reg_val | BIT(bit_off);
else
new_val = reg_val & ~BIT(bit_off);
DPU_REG_WRITE(c, reg_off, new_val);
clk_forced_on = !(reg_val & BIT(bit_off));
return clk_forced_on;
}
static void dpu_hw_get_danger_status(struct dpu_hw_mdp *mdp,
struct dpu_danger_safe_status *status)
{
struct dpu_hw_blk_reg_map *c;
u32 value;
if (!mdp || !status)
return;
c = &mdp->hw;
value = DPU_REG_READ(c, DANGER_STATUS);
status->mdp = (value >> 0) & 0x3;
status->sspp[SSPP_VIG0] = (value >> 4) & 0x3;
status->sspp[SSPP_VIG1] = (value >> 6) & 0x3;
status->sspp[SSPP_VIG2] = (value >> 8) & 0x3;
status->sspp[SSPP_VIG3] = (value >> 10) & 0x3;
status->sspp[SSPP_RGB0] = (value >> 12) & 0x3;
status->sspp[SSPP_RGB1] = (value >> 14) & 0x3;
status->sspp[SSPP_RGB2] = (value >> 16) & 0x3;
status->sspp[SSPP_RGB3] = (value >> 18) & 0x3;
status->sspp[SSPP_DMA0] = (value >> 20) & 0x3;
status->sspp[SSPP_DMA1] = (value >> 22) & 0x3;
status->sspp[SSPP_DMA2] = (value >> 28) & 0x3;
status->sspp[SSPP_DMA3] = (value >> 30) & 0x3;
status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x3;
status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x3;
}
static void dpu_hw_setup_vsync_source(struct dpu_hw_mdp *mdp,
struct dpu_vsync_source_cfg *cfg)
{
struct dpu_hw_blk_reg_map *c;
u32 reg, wd_load_value, wd_ctl, wd_ctl2, i;
static const u32 pp_offset[PINGPONG_MAX] = {0xC, 0x8, 0x4, 0x13, 0x18};
if (!mdp || !cfg || (cfg->pp_count > ARRAY_SIZE(cfg->ppnumber)))
return;
c = &mdp->hw;
reg = DPU_REG_READ(c, MDP_VSYNC_SEL);
for (i = 0; i < cfg->pp_count; i++) {
int pp_idx = cfg->ppnumber[i] - PINGPONG_0;
if (pp_idx >= ARRAY_SIZE(pp_offset))
continue;
reg &= ~(0xf << pp_offset[pp_idx]);
reg |= (cfg->vsync_source & 0xf) << pp_offset[pp_idx];
}
DPU_REG_WRITE(c, MDP_VSYNC_SEL, reg);
if (cfg->vsync_source >= DPU_VSYNC_SOURCE_WD_TIMER_4 &&
cfg->vsync_source <= DPU_VSYNC_SOURCE_WD_TIMER_0) {
switch (cfg->vsync_source) {
case DPU_VSYNC_SOURCE_WD_TIMER_4:
wd_load_value = MDP_WD_TIMER_4_LOAD_VALUE;
wd_ctl = MDP_WD_TIMER_4_CTL;
wd_ctl2 = MDP_WD_TIMER_4_CTL2;
break;
case DPU_VSYNC_SOURCE_WD_TIMER_3:
wd_load_value = MDP_WD_TIMER_3_LOAD_VALUE;
wd_ctl = MDP_WD_TIMER_3_CTL;
wd_ctl2 = MDP_WD_TIMER_3_CTL2;
break;
case DPU_VSYNC_SOURCE_WD_TIMER_2:
wd_load_value = MDP_WD_TIMER_2_LOAD_VALUE;
wd_ctl = MDP_WD_TIMER_2_CTL;
wd_ctl2 = MDP_WD_TIMER_2_CTL2;
break;
case DPU_VSYNC_SOURCE_WD_TIMER_1:
wd_load_value = MDP_WD_TIMER_1_LOAD_VALUE;
wd_ctl = MDP_WD_TIMER_1_CTL;
wd_ctl2 = MDP_WD_TIMER_1_CTL2;
break;
case DPU_VSYNC_SOURCE_WD_TIMER_0:
default:
wd_load_value = MDP_WD_TIMER_0_LOAD_VALUE;
wd_ctl = MDP_WD_TIMER_0_CTL;
wd_ctl2 = MDP_WD_TIMER_0_CTL2;
break;
}
DPU_REG_WRITE(c, wd_load_value,
CALCULATE_WD_LOAD_VALUE(cfg->frame_rate));
DPU_REG_WRITE(c, wd_ctl, BIT(0)); /* clear timer */
reg = DPU_REG_READ(c, wd_ctl2);
reg |= BIT(8); /* enable heartbeat timer */
reg |= BIT(0); /* enable WD timer */
DPU_REG_WRITE(c, wd_ctl2, reg);
/* make sure that timers are enabled/disabled for vsync state */
wmb();
}
}
static void dpu_hw_get_safe_status(struct dpu_hw_mdp *mdp,
struct dpu_danger_safe_status *status)
{
struct dpu_hw_blk_reg_map *c;
u32 value;
if (!mdp || !status)
return;
c = &mdp->hw;
value = DPU_REG_READ(c, SAFE_STATUS);
status->mdp = (value >> 0) & 0x1;
status->sspp[SSPP_VIG0] = (value >> 4) & 0x1;
status->sspp[SSPP_VIG1] = (value >> 6) & 0x1;
status->sspp[SSPP_VIG2] = (value >> 8) & 0x1;
status->sspp[SSPP_VIG3] = (value >> 10) & 0x1;
status->sspp[SSPP_RGB0] = (value >> 12) & 0x1;
status->sspp[SSPP_RGB1] = (value >> 14) & 0x1;
status->sspp[SSPP_RGB2] = (value >> 16) & 0x1;
status->sspp[SSPP_RGB3] = (value >> 18) & 0x1;
status->sspp[SSPP_DMA0] = (value >> 20) & 0x1;
status->sspp[SSPP_DMA1] = (value >> 22) & 0x1;
status->sspp[SSPP_DMA2] = (value >> 28) & 0x1;
status->sspp[SSPP_DMA3] = (value >> 30) & 0x1;
status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x1;
status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x1;
}
void dpu_hw_reset_ubwc(struct dpu_hw_mdp *mdp, struct dpu_mdss_cfg *m)
{
struct dpu_hw_blk_reg_map c;
if (!mdp || !m)
return;
if (!IS_UBWC_20_SUPPORTED(m->caps->ubwc_version))
return;
/* force blk offset to zero to access beginning of register region */
c = mdp->hw;
c.blk_off = 0x0;
DPU_REG_WRITE(&c, UBWC_STATIC, m->mdp[0].ubwc_static);
}
static void dpu_hw_intf_audio_select(struct dpu_hw_mdp *mdp)
{
struct dpu_hw_blk_reg_map *c;
if (!mdp)
return;
c = &mdp->hw;
DPU_REG_WRITE(c, HDMI_DP_CORE_SELECT, 0x1);
}
static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
unsigned long cap)
{
ops->setup_split_pipe = dpu_hw_setup_split_pipe;
ops->setup_cdm_output = dpu_hw_setup_cdm_output;
ops->setup_clk_force_ctrl = dpu_hw_setup_clk_force_ctrl;
ops->get_danger_status = dpu_hw_get_danger_status;
ops->setup_vsync_source = dpu_hw_setup_vsync_source;
ops->get_safe_status = dpu_hw_get_safe_status;
ops->reset_ubwc = dpu_hw_reset_ubwc;
ops->intf_audio_select = dpu_hw_intf_audio_select;
}
static const struct dpu_mdp_cfg *_top_offset(enum dpu_mdp mdp,
const struct dpu_mdss_cfg *m,
void __iomem *addr,
struct dpu_hw_blk_reg_map *b)
{
int i;
if (!m || !addr || !b)
return ERR_PTR(-EINVAL);
for (i = 0; i < m->mdp_count; i++) {
if (mdp == m->mdp[i].id) {
b->base_off = addr;
b->blk_off = m->mdp[i].base;
b->length = m->mdp[i].len;
b->hwversion = m->hwversion;
b->log_mask = DPU_DBG_MASK_TOP;
return &m->mdp[i];
}
}
return ERR_PTR(-EINVAL);
}
static struct dpu_hw_blk_ops dpu_hw_ops = {
.start = NULL,
.stop = NULL,
};
struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
void __iomem *addr,
const struct dpu_mdss_cfg *m)
{
struct dpu_hw_mdp *mdp;
const struct dpu_mdp_cfg *cfg;
int rc;
if (!addr || !m)
return ERR_PTR(-EINVAL);
mdp = kzalloc(sizeof(*mdp), GFP_KERNEL);
if (!mdp)
return ERR_PTR(-ENOMEM);
cfg = _top_offset(idx, m, addr, &mdp->hw);
if (IS_ERR_OR_NULL(cfg)) {
kfree(mdp);
return ERR_PTR(-EINVAL);
}
/*
* Assign ops
*/
mdp->idx = idx;
mdp->caps = cfg;
_setup_mdp_ops(&mdp->ops, mdp->caps->features);
rc = dpu_hw_blk_init(&mdp->base, DPU_HW_BLK_TOP, idx, &dpu_hw_ops);
if (rc) {
DPU_ERROR("failed to init hw blk %d\n", rc);
goto blk_init_error;
}
dpu_dbg_set_dpu_top_offset(mdp->hw.blk_off);
return mdp;
blk_init_error:
kzfree(mdp);
return ERR_PTR(rc);
}
void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp)
{
if (mdp)
dpu_hw_blk_destroy(&mdp->base);
kfree(mdp);
}

View File

@ -0,0 +1,202 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _DPU_HW_TOP_H
#define _DPU_HW_TOP_H
#include "dpu_hw_catalog.h"
#include "dpu_hw_mdss.h"
#include "dpu_hw_util.h"
#include "dpu_hw_blk.h"
struct dpu_hw_mdp;
/**
* struct traffic_shaper_cfg: traffic shaper configuration
* @en : enable/disable traffic shaper
* @rd_client : true if read client; false if write client
* @client_id : client identifier
* @bpc_denom : denominator of byte per clk
* @bpc_numer : numerator of byte per clk
*/
struct traffic_shaper_cfg {
bool en;
bool rd_client;
u32 client_id;
u32 bpc_denom;
u64 bpc_numer;
};
/**
* struct split_pipe_cfg - pipe configuration for dual display panels
* @en : Enable/disable dual pipe confguration
* @mode : Panel interface mode
* @intf : Interface id for main control path
* @split_flush_en: Allows both the paths to be flushed when master path is
* flushed
*/
struct split_pipe_cfg {
bool en;
enum dpu_intf_mode mode;
enum dpu_intf intf;
bool split_flush_en;
};
/**
* struct cdm_output_cfg: output configuration for cdm
* @intf_en : enable/disable interface output
*/
struct cdm_output_cfg {
bool intf_en;
};
/**
* struct dpu_danger_safe_status: danger and safe status signals
* @mdp: top level status
* @sspp: source pipe status
*/
struct dpu_danger_safe_status {
u8 mdp;
u8 sspp[SSPP_MAX];
};
/**
* struct dpu_vsync_source_cfg - configure vsync source and configure the
* watchdog timers if required.
* @pp_count: number of ping pongs active
* @frame_rate: Display frame rate
* @ppnumber: ping pong index array
* @vsync_source: vsync source selection
*/
struct dpu_vsync_source_cfg {
u32 pp_count;
u32 frame_rate;
u32 ppnumber[PINGPONG_MAX];
u32 vsync_source;
};
/**
* struct dpu_hw_mdp_ops - interface to the MDP TOP Hw driver functions
* Assumption is these functions will be called after clocks are enabled.
* @setup_split_pipe : Programs the pipe control registers
* @setup_pp_split : Programs the pp split control registers
* @setup_cdm_output : programs cdm control
* @setup_traffic_shaper : programs traffic shaper control
*/
struct dpu_hw_mdp_ops {
/** setup_split_pipe() : Regsiters are not double buffered, thisk
* function should be called before timing control enable
* @mdp : mdp top context driver
* @cfg : upper and lower part of pipe configuration
*/
void (*setup_split_pipe)(struct dpu_hw_mdp *mdp,
struct split_pipe_cfg *p);
/**
* setup_cdm_output() : Setup selection control of the cdm data path
* @mdp : mdp top context driver
* @cfg : cdm output configuration
*/
void (*setup_cdm_output)(struct dpu_hw_mdp *mdp,
struct cdm_output_cfg *cfg);
/**
* setup_traffic_shaper() : Setup traffic shaper control
* @mdp : mdp top context driver
* @cfg : traffic shaper configuration
*/
void (*setup_traffic_shaper)(struct dpu_hw_mdp *mdp,
struct traffic_shaper_cfg *cfg);
/**
* setup_clk_force_ctrl - set clock force control
* @mdp: mdp top context driver
* @clk_ctrl: clock to be controlled
* @enable: force on enable
* @return: if the clock is forced-on by this function
*/
bool (*setup_clk_force_ctrl)(struct dpu_hw_mdp *mdp,
enum dpu_clk_ctrl_type clk_ctrl, bool enable);
/**
* get_danger_status - get danger status
* @mdp: mdp top context driver
* @status: Pointer to danger safe status
*/
void (*get_danger_status)(struct dpu_hw_mdp *mdp,
struct dpu_danger_safe_status *status);
/**
* setup_vsync_source - setup vsync source configuration details
* @mdp: mdp top context driver
* @cfg: vsync source selection configuration
*/
void (*setup_vsync_source)(struct dpu_hw_mdp *mdp,
struct dpu_vsync_source_cfg *cfg);
/**
* get_safe_status - get safe status
* @mdp: mdp top context driver
* @status: Pointer to danger safe status
*/
void (*get_safe_status)(struct dpu_hw_mdp *mdp,
struct dpu_danger_safe_status *status);
/**
* reset_ubwc - reset top level UBWC configuration
* @mdp: mdp top context driver
* @m: pointer to mdss catalog data
*/
void (*reset_ubwc)(struct dpu_hw_mdp *mdp, struct dpu_mdss_cfg *m);
/**
* intf_audio_select - select the external interface for audio
* @mdp: mdp top context driver
*/
void (*intf_audio_select)(struct dpu_hw_mdp *mdp);
};
struct dpu_hw_mdp {
struct dpu_hw_blk base;
struct dpu_hw_blk_reg_map hw;
/* top */
enum dpu_mdp idx;
const struct dpu_mdp_cfg *caps;
/* ops */
struct dpu_hw_mdp_ops ops;
};
/**
* to_dpu_hw_mdp - convert base object dpu_hw_base to container
* @hw: Pointer to base hardware block
* return: Pointer to hardware block container
*/
static inline struct dpu_hw_mdp *to_dpu_hw_mdp(struct dpu_hw_blk *hw)
{
return container_of(hw, struct dpu_hw_mdp, base);
}
/**
* dpu_hw_mdptop_init - initializes the top driver for the passed idx
* @idx: Interface index for which driver object is required
* @addr: Mapped register io address of MDP
* @m: Pointer to mdss catalog data
*/
struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
void __iomem *addr,
const struct dpu_mdss_cfg *m);
void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp);
#endif /*_DPU_HW_TOP_H */

View File

@ -0,0 +1,452 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include "msm_drv.h"
#include "dpu_kms.h"
#include "dpu_hw_mdss.h"
#include "dpu_hw_util.h"
/* using a file static variables for debugfs access */
static u32 dpu_hw_util_log_mask = DPU_DBG_MASK_NONE;
/* DPU_SCALER_QSEED3 */
#define QSEED3_HW_VERSION 0x00
#define QSEED3_OP_MODE 0x04
#define QSEED3_RGB2Y_COEFF 0x08
#define QSEED3_PHASE_INIT 0x0C
#define QSEED3_PHASE_STEP_Y_H 0x10
#define QSEED3_PHASE_STEP_Y_V 0x14
#define QSEED3_PHASE_STEP_UV_H 0x18
#define QSEED3_PHASE_STEP_UV_V 0x1C
#define QSEED3_PRELOAD 0x20
#define QSEED3_DE_SHARPEN 0x24
#define QSEED3_DE_SHARPEN_CTL 0x28
#define QSEED3_DE_SHAPE_CTL 0x2C
#define QSEED3_DE_THRESHOLD 0x30
#define QSEED3_DE_ADJUST_DATA_0 0x34
#define QSEED3_DE_ADJUST_DATA_1 0x38
#define QSEED3_DE_ADJUST_DATA_2 0x3C
#define QSEED3_SRC_SIZE_Y_RGB_A 0x40
#define QSEED3_SRC_SIZE_UV 0x44
#define QSEED3_DST_SIZE 0x48
#define QSEED3_COEF_LUT_CTRL 0x4C
#define QSEED3_COEF_LUT_SWAP_BIT 0
#define QSEED3_COEF_LUT_DIR_BIT 1
#define QSEED3_COEF_LUT_Y_CIR_BIT 2
#define QSEED3_COEF_LUT_UV_CIR_BIT 3
#define QSEED3_COEF_LUT_Y_SEP_BIT 4
#define QSEED3_COEF_LUT_UV_SEP_BIT 5
#define QSEED3_BUFFER_CTRL 0x50
#define QSEED3_CLK_CTRL0 0x54
#define QSEED3_CLK_CTRL1 0x58
#define QSEED3_CLK_STATUS 0x5C
#define QSEED3_MISR_CTRL 0x70
#define QSEED3_MISR_SIGNATURE_0 0x74
#define QSEED3_MISR_SIGNATURE_1 0x78
#define QSEED3_PHASE_INIT_Y_H 0x90
#define QSEED3_PHASE_INIT_Y_V 0x94
#define QSEED3_PHASE_INIT_UV_H 0x98
#define QSEED3_PHASE_INIT_UV_V 0x9C
#define QSEED3_COEF_LUT 0x100
#define QSEED3_FILTERS 5
#define QSEED3_LUT_REGIONS 4
#define QSEED3_CIRCULAR_LUTS 9
#define QSEED3_SEPARABLE_LUTS 10
#define QSEED3_LUT_SIZE 60
#define QSEED3_ENABLE 2
#define QSEED3_DIR_LUT_SIZE (200 * sizeof(u32))
#define QSEED3_CIR_LUT_SIZE \
(QSEED3_LUT_SIZE * QSEED3_CIRCULAR_LUTS * sizeof(u32))
#define QSEED3_SEP_LUT_SIZE \
(QSEED3_LUT_SIZE * QSEED3_SEPARABLE_LUTS * sizeof(u32))
void dpu_reg_write(struct dpu_hw_blk_reg_map *c,
u32 reg_off,
u32 val,
const char *name)
{
/* don't need to mutex protect this */
if (c->log_mask & dpu_hw_util_log_mask)
DPU_DEBUG_DRIVER("[%s:0x%X] <= 0x%X\n",
name, c->blk_off + reg_off, val);
writel_relaxed(val, c->base_off + c->blk_off + reg_off);
}
int dpu_reg_read(struct dpu_hw_blk_reg_map *c, u32 reg_off)
{
return readl_relaxed(c->base_off + c->blk_off + reg_off);
}
u32 *dpu_hw_util_get_log_mask_ptr(void)
{
return &dpu_hw_util_log_mask;
}
void dpu_set_scaler_v2(struct dpu_hw_scaler3_cfg *cfg,
const struct dpu_drm_scaler_v2 *scale_v2)
{
int i;
cfg->enable = scale_v2->enable;
cfg->dir_en = scale_v2->dir_en;
for (i = 0; i < DPU_MAX_PLANES; i++) {
cfg->init_phase_x[i] = scale_v2->init_phase_x[i];
cfg->phase_step_x[i] = scale_v2->phase_step_x[i];
cfg->init_phase_y[i] = scale_v2->init_phase_y[i];
cfg->phase_step_y[i] = scale_v2->phase_step_y[i];
cfg->preload_x[i] = scale_v2->preload_x[i];
cfg->preload_y[i] = scale_v2->preload_y[i];
cfg->src_width[i] = scale_v2->src_width[i];
cfg->src_height[i] = scale_v2->src_height[i];
}
cfg->dst_width = scale_v2->dst_width;
cfg->dst_height = scale_v2->dst_height;
cfg->y_rgb_filter_cfg = scale_v2->y_rgb_filter_cfg;
cfg->uv_filter_cfg = scale_v2->uv_filter_cfg;
cfg->alpha_filter_cfg = scale_v2->alpha_filter_cfg;
cfg->blend_cfg = scale_v2->blend_cfg;
cfg->lut_flag = scale_v2->lut_flag;
cfg->dir_lut_idx = scale_v2->dir_lut_idx;
cfg->y_rgb_cir_lut_idx = scale_v2->y_rgb_cir_lut_idx;
cfg->uv_cir_lut_idx = scale_v2->uv_cir_lut_idx;
cfg->y_rgb_sep_lut_idx = scale_v2->y_rgb_sep_lut_idx;
cfg->uv_sep_lut_idx = scale_v2->uv_sep_lut_idx;
cfg->de.enable = scale_v2->de.enable;
cfg->de.sharpen_level1 = scale_v2->de.sharpen_level1;
cfg->de.sharpen_level2 = scale_v2->de.sharpen_level2;
cfg->de.clip = scale_v2->de.clip;
cfg->de.limit = scale_v2->de.limit;
cfg->de.thr_quiet = scale_v2->de.thr_quiet;
cfg->de.thr_dieout = scale_v2->de.thr_dieout;
cfg->de.thr_low = scale_v2->de.thr_low;
cfg->de.thr_high = scale_v2->de.thr_high;
cfg->de.prec_shift = scale_v2->de.prec_shift;
for (i = 0; i < DPU_MAX_DE_CURVES; i++) {
cfg->de.adjust_a[i] = scale_v2->de.adjust_a[i];
cfg->de.adjust_b[i] = scale_v2->de.adjust_b[i];
cfg->de.adjust_c[i] = scale_v2->de.adjust_c[i];
}
}
static void _dpu_hw_setup_scaler3_lut(struct dpu_hw_blk_reg_map *c,
struct dpu_hw_scaler3_cfg *scaler3_cfg, u32 offset)
{
int i, j, filter;
int config_lut = 0x0;
unsigned long lut_flags;
u32 lut_addr, lut_offset, lut_len;
u32 *lut[QSEED3_FILTERS] = {NULL, NULL, NULL, NULL, NULL};
static const uint32_t off_tbl[QSEED3_FILTERS][QSEED3_LUT_REGIONS][2] = {
{{18, 0x000}, {12, 0x120}, {12, 0x1E0}, {8, 0x2A0} },
{{6, 0x320}, {3, 0x3E0}, {3, 0x440}, {3, 0x4A0} },
{{6, 0x500}, {3, 0x5c0}, {3, 0x620}, {3, 0x680} },
{{6, 0x380}, {3, 0x410}, {3, 0x470}, {3, 0x4d0} },
{{6, 0x560}, {3, 0x5f0}, {3, 0x650}, {3, 0x6b0} },
};
lut_flags = (unsigned long) scaler3_cfg->lut_flag;
if (test_bit(QSEED3_COEF_LUT_DIR_BIT, &lut_flags) &&
(scaler3_cfg->dir_len == QSEED3_DIR_LUT_SIZE)) {
lut[0] = scaler3_cfg->dir_lut;
config_lut = 1;
}
if (test_bit(QSEED3_COEF_LUT_Y_CIR_BIT, &lut_flags) &&
(scaler3_cfg->y_rgb_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
(scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
lut[1] = scaler3_cfg->cir_lut +
scaler3_cfg->y_rgb_cir_lut_idx * QSEED3_LUT_SIZE;
config_lut = 1;
}
if (test_bit(QSEED3_COEF_LUT_UV_CIR_BIT, &lut_flags) &&
(scaler3_cfg->uv_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
(scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
lut[2] = scaler3_cfg->cir_lut +
scaler3_cfg->uv_cir_lut_idx * QSEED3_LUT_SIZE;
config_lut = 1;
}
if (test_bit(QSEED3_COEF_LUT_Y_SEP_BIT, &lut_flags) &&
(scaler3_cfg->y_rgb_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
(scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
lut[3] = scaler3_cfg->sep_lut +
scaler3_cfg->y_rgb_sep_lut_idx * QSEED3_LUT_SIZE;
config_lut = 1;
}
if (test_bit(QSEED3_COEF_LUT_UV_SEP_BIT, &lut_flags) &&
(scaler3_cfg->uv_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
(scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
lut[4] = scaler3_cfg->sep_lut +
scaler3_cfg->uv_sep_lut_idx * QSEED3_LUT_SIZE;
config_lut = 1;
}
if (config_lut) {
for (filter = 0; filter < QSEED3_FILTERS; filter++) {
if (!lut[filter])
continue;
lut_offset = 0;
for (i = 0; i < QSEED3_LUT_REGIONS; i++) {
lut_addr = QSEED3_COEF_LUT + offset
+ off_tbl[filter][i][1];
lut_len = off_tbl[filter][i][0] << 2;
for (j = 0; j < lut_len; j++) {
DPU_REG_WRITE(c,
lut_addr,
(lut[filter])[lut_offset++]);
lut_addr += 4;
}
}
}
}
if (test_bit(QSEED3_COEF_LUT_SWAP_BIT, &lut_flags))
DPU_REG_WRITE(c, QSEED3_COEF_LUT_CTRL + offset, BIT(0));
}
static void _dpu_hw_setup_scaler3_de(struct dpu_hw_blk_reg_map *c,
struct dpu_hw_scaler3_de_cfg *de_cfg, u32 offset)
{
u32 sharp_lvl, sharp_ctl, shape_ctl, de_thr;
u32 adjust_a, adjust_b, adjust_c;
if (!de_cfg->enable)
return;
sharp_lvl = (de_cfg->sharpen_level1 & 0x1FF) |
((de_cfg->sharpen_level2 & 0x1FF) << 16);
sharp_ctl = ((de_cfg->limit & 0xF) << 9) |
((de_cfg->prec_shift & 0x7) << 13) |
((de_cfg->clip & 0x7) << 16);
shape_ctl = (de_cfg->thr_quiet & 0xFF) |
((de_cfg->thr_dieout & 0x3FF) << 16);
de_thr = (de_cfg->thr_low & 0x3FF) |
((de_cfg->thr_high & 0x3FF) << 16);
adjust_a = (de_cfg->adjust_a[0] & 0x3FF) |
((de_cfg->adjust_a[1] & 0x3FF) << 10) |
((de_cfg->adjust_a[2] & 0x3FF) << 20);
adjust_b = (de_cfg->adjust_b[0] & 0x3FF) |
((de_cfg->adjust_b[1] & 0x3FF) << 10) |
((de_cfg->adjust_b[2] & 0x3FF) << 20);
adjust_c = (de_cfg->adjust_c[0] & 0x3FF) |
((de_cfg->adjust_c[1] & 0x3FF) << 10) |
((de_cfg->adjust_c[2] & 0x3FF) << 20);
DPU_REG_WRITE(c, QSEED3_DE_SHARPEN + offset, sharp_lvl);
DPU_REG_WRITE(c, QSEED3_DE_SHARPEN_CTL + offset, sharp_ctl);
DPU_REG_WRITE(c, QSEED3_DE_SHAPE_CTL + offset, shape_ctl);
DPU_REG_WRITE(c, QSEED3_DE_THRESHOLD + offset, de_thr);
DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_0 + offset, adjust_a);
DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_1 + offset, adjust_b);
DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_2 + offset, adjust_c);
}
void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
struct dpu_hw_scaler3_cfg *scaler3_cfg,
u32 scaler_offset, u32 scaler_version,
const struct dpu_format *format)
{
u32 op_mode = 0;
u32 phase_init, preload, src_y_rgb, src_uv, dst;
if (!scaler3_cfg->enable)
goto end;
op_mode |= BIT(0);
op_mode |= (scaler3_cfg->y_rgb_filter_cfg & 0x3) << 16;
if (format && DPU_FORMAT_IS_YUV(format)) {
op_mode |= BIT(12);
op_mode |= (scaler3_cfg->uv_filter_cfg & 0x3) << 24;
}
op_mode |= (scaler3_cfg->blend_cfg & 1) << 31;
op_mode |= (scaler3_cfg->dir_en) ? BIT(4) : 0;
preload =
((scaler3_cfg->preload_x[0] & 0x7F) << 0) |
((scaler3_cfg->preload_y[0] & 0x7F) << 8) |
((scaler3_cfg->preload_x[1] & 0x7F) << 16) |
((scaler3_cfg->preload_y[1] & 0x7F) << 24);
src_y_rgb = (scaler3_cfg->src_width[0] & 0x1FFFF) |
((scaler3_cfg->src_height[0] & 0x1FFFF) << 16);
src_uv = (scaler3_cfg->src_width[1] & 0x1FFFF) |
((scaler3_cfg->src_height[1] & 0x1FFFF) << 16);
dst = (scaler3_cfg->dst_width & 0x1FFFF) |
((scaler3_cfg->dst_height & 0x1FFFF) << 16);
if (scaler3_cfg->de.enable) {
_dpu_hw_setup_scaler3_de(c, &scaler3_cfg->de, scaler_offset);
op_mode |= BIT(8);
}
if (scaler3_cfg->lut_flag)
_dpu_hw_setup_scaler3_lut(c, scaler3_cfg,
scaler_offset);
if (scaler_version == 0x1002) {
phase_init =
((scaler3_cfg->init_phase_x[0] & 0x3F) << 0) |
((scaler3_cfg->init_phase_y[0] & 0x3F) << 8) |
((scaler3_cfg->init_phase_x[1] & 0x3F) << 16) |
((scaler3_cfg->init_phase_y[1] & 0x3F) << 24);
DPU_REG_WRITE(c, QSEED3_PHASE_INIT + scaler_offset, phase_init);
} else {
DPU_REG_WRITE(c, QSEED3_PHASE_INIT_Y_H + scaler_offset,
scaler3_cfg->init_phase_x[0] & 0x1FFFFF);
DPU_REG_WRITE(c, QSEED3_PHASE_INIT_Y_V + scaler_offset,
scaler3_cfg->init_phase_y[0] & 0x1FFFFF);
DPU_REG_WRITE(c, QSEED3_PHASE_INIT_UV_H + scaler_offset,
scaler3_cfg->init_phase_x[1] & 0x1FFFFF);
DPU_REG_WRITE(c, QSEED3_PHASE_INIT_UV_V + scaler_offset,
scaler3_cfg->init_phase_y[1] & 0x1FFFFF);
}
DPU_REG_WRITE(c, QSEED3_PHASE_STEP_Y_H + scaler_offset,
scaler3_cfg->phase_step_x[0] & 0xFFFFFF);
DPU_REG_WRITE(c, QSEED3_PHASE_STEP_Y_V + scaler_offset,
scaler3_cfg->phase_step_y[0] & 0xFFFFFF);
DPU_REG_WRITE(c, QSEED3_PHASE_STEP_UV_H + scaler_offset,
scaler3_cfg->phase_step_x[1] & 0xFFFFFF);
DPU_REG_WRITE(c, QSEED3_PHASE_STEP_UV_V + scaler_offset,
scaler3_cfg->phase_step_y[1] & 0xFFFFFF);
DPU_REG_WRITE(c, QSEED3_PRELOAD + scaler_offset, preload);
DPU_REG_WRITE(c, QSEED3_SRC_SIZE_Y_RGB_A + scaler_offset, src_y_rgb);
DPU_REG_WRITE(c, QSEED3_SRC_SIZE_UV + scaler_offset, src_uv);
DPU_REG_WRITE(c, QSEED3_DST_SIZE + scaler_offset, dst);
end:
if (format && !DPU_FORMAT_IS_DX(format))
op_mode |= BIT(14);
if (format && format->alpha_enable) {
op_mode |= BIT(10);
if (scaler_version == 0x1002)
op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x1) << 30;
else
op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x3) << 29;
}
DPU_REG_WRITE(c, QSEED3_OP_MODE + scaler_offset, op_mode);
}
u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c,
u32 scaler_offset)
{
return DPU_REG_READ(c, QSEED3_HW_VERSION + scaler_offset);
}
void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
u32 csc_reg_off,
struct dpu_csc_cfg *data, bool csc10)
{
static const u32 matrix_shift = 7;
u32 clamp_shift = csc10 ? 16 : 8;
u32 val;
/* matrix coeff - convert S15.16 to S4.9 */
val = ((data->csc_mv[0] >> matrix_shift) & 0x1FFF) |
(((data->csc_mv[1] >> matrix_shift) & 0x1FFF) << 16);
DPU_REG_WRITE(c, csc_reg_off, val);
val = ((data->csc_mv[2] >> matrix_shift) & 0x1FFF) |
(((data->csc_mv[3] >> matrix_shift) & 0x1FFF) << 16);
DPU_REG_WRITE(c, csc_reg_off + 0x4, val);
val = ((data->csc_mv[4] >> matrix_shift) & 0x1FFF) |
(((data->csc_mv[5] >> matrix_shift) & 0x1FFF) << 16);
DPU_REG_WRITE(c, csc_reg_off + 0x8, val);
val = ((data->csc_mv[6] >> matrix_shift) & 0x1FFF) |
(((data->csc_mv[7] >> matrix_shift) & 0x1FFF) << 16);
DPU_REG_WRITE(c, csc_reg_off + 0xc, val);
val = (data->csc_mv[8] >> matrix_shift) & 0x1FFF;
DPU_REG_WRITE(c, csc_reg_off + 0x10, val);
/* Pre clamp */
val = (data->csc_pre_lv[0] << clamp_shift) | data->csc_pre_lv[1];
DPU_REG_WRITE(c, csc_reg_off + 0x14, val);
val = (data->csc_pre_lv[2] << clamp_shift) | data->csc_pre_lv[3];
DPU_REG_WRITE(c, csc_reg_off + 0x18, val);
val = (data->csc_pre_lv[4] << clamp_shift) | data->csc_pre_lv[5];
DPU_REG_WRITE(c, csc_reg_off + 0x1c, val);
/* Post clamp */
val = (data->csc_post_lv[0] << clamp_shift) | data->csc_post_lv[1];
DPU_REG_WRITE(c, csc_reg_off + 0x20, val);
val = (data->csc_post_lv[2] << clamp_shift) | data->csc_post_lv[3];
DPU_REG_WRITE(c, csc_reg_off + 0x24, val);
val = (data->csc_post_lv[4] << clamp_shift) | data->csc_post_lv[5];
DPU_REG_WRITE(c, csc_reg_off + 0x28, val);
/* Pre-Bias */
DPU_REG_WRITE(c, csc_reg_off + 0x2c, data->csc_pre_bv[0]);
DPU_REG_WRITE(c, csc_reg_off + 0x30, data->csc_pre_bv[1]);
DPU_REG_WRITE(c, csc_reg_off + 0x34, data->csc_pre_bv[2]);
/* Post-Bias */
DPU_REG_WRITE(c, csc_reg_off + 0x38, data->csc_post_bv[0]);
DPU_REG_WRITE(c, csc_reg_off + 0x3c, data->csc_post_bv[1]);
DPU_REG_WRITE(c, csc_reg_off + 0x40, data->csc_post_bv[2]);
}
/**
* _dpu_copy_formats - copy formats from src_list to dst_list
* @dst_list: pointer to destination list where to copy formats
* @dst_list_size: size of destination list
* @dst_list_pos: starting position on the list where to copy formats
* @src_list: pointer to source list where to copy formats from
* @src_list_size: size of source list
* Return: number of elements populated
*/
uint32_t dpu_copy_formats(
struct dpu_format_extended *dst_list,
uint32_t dst_list_size,
uint32_t dst_list_pos,
const struct dpu_format_extended *src_list,
uint32_t src_list_size)
{
uint32_t cur_pos, i;
if (!dst_list || !src_list || (dst_list_pos >= (dst_list_size - 1)))
return 0;
for (i = 0, cur_pos = dst_list_pos;
(cur_pos < (dst_list_size - 1)) && (i < src_list_size)
&& src_list[i].fourcc_format; ++i, ++cur_pos)
dst_list[cur_pos] = src_list[i];
dst_list[cur_pos].fourcc_format = 0;
return i;
}

View File

@ -0,0 +1,358 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _DPU_HW_UTIL_H
#define _DPU_HW_UTIL_H
#include <linux/io.h>
#include <linux/slab.h>
#include "dpu_hw_mdss.h"
#define REG_MASK(n) ((BIT(n)) - 1)
struct dpu_format_extended;
/*
* This is the common struct maintained by each sub block
* for mapping the register offsets in this block to the
* absoulute IO address
* @base_off: mdp register mapped offset
* @blk_off: pipe offset relative to mdss offset
* @length length of register block offset
* @xin_id xin id
* @hwversion mdss hw version number
*/
struct dpu_hw_blk_reg_map {
void __iomem *base_off;
u32 blk_off;
u32 length;
u32 xin_id;
u32 hwversion;
u32 log_mask;
};
/**
* struct dpu_hw_scaler3_de_cfg : QSEEDv3 detail enhancer configuration
* @enable: detail enhancer enable/disable
* @sharpen_level1: sharpening strength for noise
* @sharpen_level2: sharpening strength for signal
* @ clip: clip shift
* @ limit: limit value
* @ thr_quiet: quiet threshold
* @ thr_dieout: dieout threshold
* @ thr_high: low threshold
* @ thr_high: high threshold
* @ prec_shift: precision shift
* @ adjust_a: A-coefficients for mapping curve
* @ adjust_b: B-coefficients for mapping curve
* @ adjust_c: C-coefficients for mapping curve
*/
struct dpu_hw_scaler3_de_cfg {
u32 enable;
int16_t sharpen_level1;
int16_t sharpen_level2;
uint16_t clip;
uint16_t limit;
uint16_t thr_quiet;
uint16_t thr_dieout;
uint16_t thr_low;
uint16_t thr_high;
uint16_t prec_shift;
int16_t adjust_a[DPU_MAX_DE_CURVES];
int16_t adjust_b[DPU_MAX_DE_CURVES];
int16_t adjust_c[DPU_MAX_DE_CURVES];
};
/**
* struct dpu_hw_scaler3_cfg : QSEEDv3 configuration
* @enable: scaler enable
* @dir_en: direction detection block enable
* @ init_phase_x: horizontal initial phase
* @ phase_step_x: horizontal phase step
* @ init_phase_y: vertical initial phase
* @ phase_step_y: vertical phase step
* @ preload_x: horizontal preload value
* @ preload_y: vertical preload value
* @ src_width: source width
* @ src_height: source height
* @ dst_width: destination width
* @ dst_height: destination height
* @ y_rgb_filter_cfg: y/rgb plane filter configuration
* @ uv_filter_cfg: uv plane filter configuration
* @ alpha_filter_cfg: alpha filter configuration
* @ blend_cfg: blend coefficients configuration
* @ lut_flag: scaler LUT update flags
* 0x1 swap LUT bank
* 0x2 update 2D filter LUT
* 0x4 update y circular filter LUT
* 0x8 update uv circular filter LUT
* 0x10 update y separable filter LUT
* 0x20 update uv separable filter LUT
* @ dir_lut_idx: 2D filter LUT index
* @ y_rgb_cir_lut_idx: y circular filter LUT index
* @ uv_cir_lut_idx: uv circular filter LUT index
* @ y_rgb_sep_lut_idx: y circular filter LUT index
* @ uv_sep_lut_idx: uv separable filter LUT index
* @ dir_lut: pointer to 2D LUT
* @ cir_lut: pointer to circular filter LUT
* @ sep_lut: pointer to separable filter LUT
* @ de: detail enhancer configuration
*/
struct dpu_hw_scaler3_cfg {
u32 enable;
u32 dir_en;
int32_t init_phase_x[DPU_MAX_PLANES];
int32_t phase_step_x[DPU_MAX_PLANES];
int32_t init_phase_y[DPU_MAX_PLANES];
int32_t phase_step_y[DPU_MAX_PLANES];
u32 preload_x[DPU_MAX_PLANES];
u32 preload_y[DPU_MAX_PLANES];
u32 src_width[DPU_MAX_PLANES];
u32 src_height[DPU_MAX_PLANES];
u32 dst_width;
u32 dst_height;
u32 y_rgb_filter_cfg;
u32 uv_filter_cfg;
u32 alpha_filter_cfg;
u32 blend_cfg;
u32 lut_flag;
u32 dir_lut_idx;
u32 y_rgb_cir_lut_idx;
u32 uv_cir_lut_idx;
u32 y_rgb_sep_lut_idx;
u32 uv_sep_lut_idx;
u32 *dir_lut;
size_t dir_len;
u32 *cir_lut;
size_t cir_len;
u32 *sep_lut;
size_t sep_len;
/*
* Detail enhancer settings
*/
struct dpu_hw_scaler3_de_cfg de;
};
struct dpu_hw_scaler3_lut_cfg {
bool is_configured;
u32 *dir_lut;
size_t dir_len;
u32 *cir_lut;
size_t cir_len;
u32 *sep_lut;
size_t sep_len;
};
/**
* struct dpu_drm_pix_ext_v1 - version 1 of pixel ext structure
* @num_ext_pxls_lr: Number of total horizontal pixels
* @num_ext_pxls_tb: Number of total vertical lines
* @left_ftch: Number of extra pixels to overfetch from left
* @right_ftch: Number of extra pixels to overfetch from right
* @top_ftch: Number of extra lines to overfetch from top
* @btm_ftch: Number of extra lines to overfetch from bottom
* @left_rpt: Number of extra pixels to repeat from left
* @right_rpt: Number of extra pixels to repeat from right
* @top_rpt: Number of extra lines to repeat from top
* @btm_rpt: Number of extra lines to repeat from bottom
*/
struct dpu_drm_pix_ext_v1 {
/*
* Number of pixels ext in left, right, top and bottom direction
* for all color components.
*/
int32_t num_ext_pxls_lr[DPU_MAX_PLANES];
int32_t num_ext_pxls_tb[DPU_MAX_PLANES];
/*
* Number of pixels needs to be overfetched in left, right, top
* and bottom directions from source image for scaling.
*/
int32_t left_ftch[DPU_MAX_PLANES];
int32_t right_ftch[DPU_MAX_PLANES];
int32_t top_ftch[DPU_MAX_PLANES];
int32_t btm_ftch[DPU_MAX_PLANES];
/*
* Number of pixels needs to be repeated in left, right, top and
* bottom directions for scaling.
*/
int32_t left_rpt[DPU_MAX_PLANES];
int32_t right_rpt[DPU_MAX_PLANES];
int32_t top_rpt[DPU_MAX_PLANES];
int32_t btm_rpt[DPU_MAX_PLANES];
};
/**
* struct dpu_drm_de_v1 - version 1 of detail enhancer structure
* @enable: Enables/disables detail enhancer
* @sharpen_level1: Sharpening strength for noise
* @sharpen_level2: Sharpening strength for context
* @clip: Clip coefficient
* @limit: Detail enhancer limit factor
* @thr_quiet: Quite zone threshold
* @thr_dieout: Die-out zone threshold
* @thr_low: Linear zone left threshold
* @thr_high: Linear zone right threshold
* @prec_shift: Detail enhancer precision
* @adjust_a: Mapping curves A coefficients
* @adjust_b: Mapping curves B coefficients
* @adjust_c: Mapping curves C coefficients
*/
struct dpu_drm_de_v1 {
uint32_t enable;
int16_t sharpen_level1;
int16_t sharpen_level2;
uint16_t clip;
uint16_t limit;
uint16_t thr_quiet;
uint16_t thr_dieout;
uint16_t thr_low;
uint16_t thr_high;
uint16_t prec_shift;
int16_t adjust_a[DPU_MAX_DE_CURVES];
int16_t adjust_b[DPU_MAX_DE_CURVES];
int16_t adjust_c[DPU_MAX_DE_CURVES];
};
/**
* struct dpu_drm_scaler_v2 - version 2 of struct dpu_drm_scaler
* @enable: Scaler enable
* @dir_en: Detail enhancer enable
* @pe: Pixel extension settings
* @horz_decimate: Horizontal decimation factor
* @vert_decimate: Vertical decimation factor
* @init_phase_x: Initial scaler phase values for x
* @phase_step_x: Phase step values for x
* @init_phase_y: Initial scaler phase values for y
* @phase_step_y: Phase step values for y
* @preload_x: Horizontal preload value
* @preload_y: Vertical preload value
* @src_width: Source width
* @src_height: Source height
* @dst_width: Destination width
* @dst_height: Destination height
* @y_rgb_filter_cfg: Y/RGB plane filter configuration
* @uv_filter_cfg: UV plane filter configuration
* @alpha_filter_cfg: Alpha filter configuration
* @blend_cfg: Selection of blend coefficients
* @lut_flag: LUT configuration flags
* @dir_lut_idx: 2d 4x4 LUT index
* @y_rgb_cir_lut_idx: Y/RGB circular LUT index
* @uv_cir_lut_idx: UV circular LUT index
* @y_rgb_sep_lut_idx: Y/RGB separable LUT index
* @uv_sep_lut_idx: UV separable LUT index
* @de: Detail enhancer settings
*/
struct dpu_drm_scaler_v2 {
/*
* General definitions
*/
uint32_t enable;
uint32_t dir_en;
/*
* Pix ext settings
*/
struct dpu_drm_pix_ext_v1 pe;
/*
* Decimation settings
*/
uint32_t horz_decimate;
uint32_t vert_decimate;
/*
* Phase settings
*/
int32_t init_phase_x[DPU_MAX_PLANES];
int32_t phase_step_x[DPU_MAX_PLANES];
int32_t init_phase_y[DPU_MAX_PLANES];
int32_t phase_step_y[DPU_MAX_PLANES];
uint32_t preload_x[DPU_MAX_PLANES];
uint32_t preload_y[DPU_MAX_PLANES];
uint32_t src_width[DPU_MAX_PLANES];
uint32_t src_height[DPU_MAX_PLANES];
uint32_t dst_width;
uint32_t dst_height;
uint32_t y_rgb_filter_cfg;
uint32_t uv_filter_cfg;
uint32_t alpha_filter_cfg;
uint32_t blend_cfg;
uint32_t lut_flag;
uint32_t dir_lut_idx;
/* for Y(RGB) and UV planes*/
uint32_t y_rgb_cir_lut_idx;
uint32_t uv_cir_lut_idx;
uint32_t y_rgb_sep_lut_idx;
uint32_t uv_sep_lut_idx;
/*
* Detail enhancer settings
*/
struct dpu_drm_de_v1 de;
};
u32 *dpu_hw_util_get_log_mask_ptr(void);
void dpu_reg_write(struct dpu_hw_blk_reg_map *c,
u32 reg_off,
u32 val,
const char *name);
int dpu_reg_read(struct dpu_hw_blk_reg_map *c, u32 reg_off);
#define DPU_REG_WRITE(c, off, val) dpu_reg_write(c, off, val, #off)
#define DPU_REG_READ(c, off) dpu_reg_read(c, off)
#define MISR_FRAME_COUNT_MASK 0xFF
#define MISR_CTRL_ENABLE BIT(8)
#define MISR_CTRL_STATUS BIT(9)
#define MISR_CTRL_STATUS_CLEAR BIT(10)
#define INTF_MISR_CTRL_FREE_RUN_MASK BIT(31)
void *dpu_hw_util_get_dir(void);
void dpu_set_scaler_v2(struct dpu_hw_scaler3_cfg *cfg,
const struct dpu_drm_scaler_v2 *scale_v2);
void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
struct dpu_hw_scaler3_cfg *scaler3_cfg,
u32 scaler_offset, u32 scaler_version,
const struct dpu_format *format);
u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c,
u32 scaler_offset);
void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
u32 csc_reg_off,
struct dpu_csc_cfg *data, bool csc10);
uint32_t dpu_copy_formats(
struct dpu_format_extended *dst_list,
uint32_t dst_list_size,
uint32_t dst_list_pos,
const struct dpu_format_extended *src_list,
uint32_t src_list_size);
#endif /* _DPU_HW_UTIL_H */

View File

@ -0,0 +1,275 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_vbif.h"
#include "dpu_dbg.h"
#define VBIF_VERSION 0x0000
#define VBIF_CLK_FORCE_CTRL0 0x0008
#define VBIF_CLK_FORCE_CTRL1 0x000C
#define VBIF_QOS_REMAP_00 0x0020
#define VBIF_QOS_REMAP_01 0x0024
#define VBIF_QOS_REMAP_10 0x0028
#define VBIF_QOS_REMAP_11 0x002C
#define VBIF_WRITE_GATHER_EN 0x00AC
#define VBIF_IN_RD_LIM_CONF0 0x00B0
#define VBIF_IN_RD_LIM_CONF1 0x00B4
#define VBIF_IN_RD_LIM_CONF2 0x00B8
#define VBIF_IN_WR_LIM_CONF0 0x00C0
#define VBIF_IN_WR_LIM_CONF1 0x00C4
#define VBIF_IN_WR_LIM_CONF2 0x00C8
#define VBIF_OUT_RD_LIM_CONF0 0x00D0
#define VBIF_OUT_WR_LIM_CONF0 0x00D4
#define VBIF_OUT_AXI_AMEMTYPE_CONF0 0x0160
#define VBIF_OUT_AXI_AMEMTYPE_CONF1 0x0164
#define VBIF_XIN_PND_ERR 0x0190
#define VBIF_XIN_SRC_ERR 0x0194
#define VBIF_XIN_CLR_ERR 0x019C
#define VBIF_XIN_HALT_CTRL0 0x0200
#define VBIF_XIN_HALT_CTRL1 0x0204
#define VBIF_XINL_QOS_RP_REMAP_000 0x0550
#define VBIF_XINL_QOS_LVL_REMAP_000 0x0590
static void dpu_hw_clear_errors(struct dpu_hw_vbif *vbif,
u32 *pnd_errors, u32 *src_errors)
{
struct dpu_hw_blk_reg_map *c;
u32 pnd, src;
if (!vbif)
return;
c = &vbif->hw;
pnd = DPU_REG_READ(c, VBIF_XIN_PND_ERR);
src = DPU_REG_READ(c, VBIF_XIN_SRC_ERR);
if (pnd_errors)
*pnd_errors = pnd;
if (src_errors)
*src_errors = src;
DPU_REG_WRITE(c, VBIF_XIN_CLR_ERR, pnd | src);
}
static void dpu_hw_set_mem_type(struct dpu_hw_vbif *vbif,
u32 xin_id, u32 value)
{
struct dpu_hw_blk_reg_map *c;
u32 reg_off;
u32 bit_off;
u32 reg_val;
/*
* Assume 4 bits per bit field, 8 fields per 32-bit register so
* 16 bit fields maximum across two registers
*/
if (!vbif || xin_id >= MAX_XIN_COUNT || xin_id >= 16)
return;
c = &vbif->hw;
if (xin_id >= 8) {
xin_id -= 8;
reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF1;
} else {
reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF0;
}
bit_off = (xin_id & 0x7) * 4;
reg_val = DPU_REG_READ(c, reg_off);
reg_val &= ~(0x7 << bit_off);
reg_val |= (value & 0x7) << bit_off;
DPU_REG_WRITE(c, reg_off, reg_val);
}
static void dpu_hw_set_limit_conf(struct dpu_hw_vbif *vbif,
u32 xin_id, bool rd, u32 limit)
{
struct dpu_hw_blk_reg_map *c = &vbif->hw;
u32 reg_val;
u32 reg_off;
u32 bit_off;
if (rd)
reg_off = VBIF_IN_RD_LIM_CONF0;
else
reg_off = VBIF_IN_WR_LIM_CONF0;
reg_off += (xin_id / 4) * 4;
bit_off = (xin_id % 4) * 8;
reg_val = DPU_REG_READ(c, reg_off);
reg_val &= ~(0xFF << bit_off);
reg_val |= (limit) << bit_off;
DPU_REG_WRITE(c, reg_off, reg_val);
}
static u32 dpu_hw_get_limit_conf(struct dpu_hw_vbif *vbif,
u32 xin_id, bool rd)
{
struct dpu_hw_blk_reg_map *c = &vbif->hw;
u32 reg_val;
u32 reg_off;
u32 bit_off;
u32 limit;
if (rd)
reg_off = VBIF_IN_RD_LIM_CONF0;
else
reg_off = VBIF_IN_WR_LIM_CONF0;
reg_off += (xin_id / 4) * 4;
bit_off = (xin_id % 4) * 8;
reg_val = DPU_REG_READ(c, reg_off);
limit = (reg_val >> bit_off) & 0xFF;
return limit;
}
static void dpu_hw_set_halt_ctrl(struct dpu_hw_vbif *vbif,
u32 xin_id, bool enable)
{
struct dpu_hw_blk_reg_map *c = &vbif->hw;
u32 reg_val;
reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL0);
if (enable)
reg_val |= BIT(xin_id);
else
reg_val &= ~BIT(xin_id);
DPU_REG_WRITE(c, VBIF_XIN_HALT_CTRL0, reg_val);
}
static bool dpu_hw_get_halt_ctrl(struct dpu_hw_vbif *vbif,
u32 xin_id)
{
struct dpu_hw_blk_reg_map *c = &vbif->hw;
u32 reg_val;
reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL1);
return (reg_val & BIT(xin_id)) ? true : false;
}
static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif,
u32 xin_id, u32 level, u32 remap_level)
{
struct dpu_hw_blk_reg_map *c;
u32 reg_val, reg_val_lvl, mask, reg_high, reg_shift;
if (!vbif)
return;
c = &vbif->hw;
reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8);
reg_shift = (xin_id & 0x7) * 4;
reg_val = DPU_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high);
reg_val_lvl = DPU_REG_READ(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high);
mask = 0x7 << reg_shift;
reg_val &= ~mask;
reg_val |= (remap_level << reg_shift) & mask;
reg_val_lvl &= ~mask;
reg_val_lvl |= (remap_level << reg_shift) & mask;
DPU_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val);
DPU_REG_WRITE(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high, reg_val_lvl);
}
static void dpu_hw_set_write_gather_en(struct dpu_hw_vbif *vbif, u32 xin_id)
{
struct dpu_hw_blk_reg_map *c;
u32 reg_val;
if (!vbif || xin_id >= MAX_XIN_COUNT)
return;
c = &vbif->hw;
reg_val = DPU_REG_READ(c, VBIF_WRITE_GATHER_EN);
reg_val |= BIT(xin_id);
DPU_REG_WRITE(c, VBIF_WRITE_GATHER_EN, reg_val);
}
static void _setup_vbif_ops(struct dpu_hw_vbif_ops *ops,
unsigned long cap)
{
ops->set_limit_conf = dpu_hw_set_limit_conf;
ops->get_limit_conf = dpu_hw_get_limit_conf;
ops->set_halt_ctrl = dpu_hw_set_halt_ctrl;
ops->get_halt_ctrl = dpu_hw_get_halt_ctrl;
if (test_bit(DPU_VBIF_QOS_REMAP, &cap))
ops->set_qos_remap = dpu_hw_set_qos_remap;
ops->set_mem_type = dpu_hw_set_mem_type;
ops->clear_errors = dpu_hw_clear_errors;
ops->set_write_gather_en = dpu_hw_set_write_gather_en;
}
static const struct dpu_vbif_cfg *_top_offset(enum dpu_vbif vbif,
const struct dpu_mdss_cfg *m,
void __iomem *addr,
struct dpu_hw_blk_reg_map *b)
{
int i;
for (i = 0; i < m->vbif_count; i++) {
if (vbif == m->vbif[i].id) {
b->base_off = addr;
b->blk_off = m->vbif[i].base;
b->length = m->vbif[i].len;
b->hwversion = m->hwversion;
b->log_mask = DPU_DBG_MASK_VBIF;
return &m->vbif[i];
}
}
return ERR_PTR(-EINVAL);
}
struct dpu_hw_vbif *dpu_hw_vbif_init(enum dpu_vbif idx,
void __iomem *addr,
const struct dpu_mdss_cfg *m)
{
struct dpu_hw_vbif *c;
const struct dpu_vbif_cfg *cfg;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
cfg = _top_offset(idx, m, addr, &c->hw);
if (IS_ERR_OR_NULL(cfg)) {
kfree(c);
return ERR_PTR(-EINVAL);
}
/*
* Assign ops
*/
c->idx = idx;
c->cap = cfg;
_setup_vbif_ops(&c->ops, c->cap->features);
/* no need to register sub-range in dpu dbg, dump entire vbif io base */
return c;
}
void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif)
{
kfree(vbif);
}

View File

@ -0,0 +1,128 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _DPU_HW_VBIF_H
#define _DPU_HW_VBIF_H
#include "dpu_hw_catalog.h"
#include "dpu_hw_mdss.h"
#include "dpu_hw_util.h"
struct dpu_hw_vbif;
/**
* struct dpu_hw_vbif_ops : Interface to the VBIF hardware driver functions
* Assumption is these functions will be called after clocks are enabled
*/
struct dpu_hw_vbif_ops {
/**
* set_limit_conf - set transaction limit config
* @vbif: vbif context driver
* @xin_id: client interface identifier
* @rd: true for read limit; false for write limit
* @limit: outstanding transaction limit
*/
void (*set_limit_conf)(struct dpu_hw_vbif *vbif,
u32 xin_id, bool rd, u32 limit);
/**
* get_limit_conf - get transaction limit config
* @vbif: vbif context driver
* @xin_id: client interface identifier
* @rd: true for read limit; false for write limit
* @return: outstanding transaction limit
*/
u32 (*get_limit_conf)(struct dpu_hw_vbif *vbif,
u32 xin_id, bool rd);
/**
* set_halt_ctrl - set halt control
* @vbif: vbif context driver
* @xin_id: client interface identifier
* @enable: halt control enable
*/
void (*set_halt_ctrl)(struct dpu_hw_vbif *vbif,
u32 xin_id, bool enable);
/**
* get_halt_ctrl - get halt control
* @vbif: vbif context driver
* @xin_id: client interface identifier
* @return: halt control enable
*/
bool (*get_halt_ctrl)(struct dpu_hw_vbif *vbif,
u32 xin_id);
/**
* set_qos_remap - set QoS priority remap
* @vbif: vbif context driver
* @xin_id: client interface identifier
* @level: priority level
* @remap_level: remapped level
*/
void (*set_qos_remap)(struct dpu_hw_vbif *vbif,
u32 xin_id, u32 level, u32 remap_level);
/**
* set_mem_type - set memory type
* @vbif: vbif context driver
* @xin_id: client interface identifier
* @value: memory type value
*/
void (*set_mem_type)(struct dpu_hw_vbif *vbif,
u32 xin_id, u32 value);
/**
* clear_errors - clear any vbif errors
* This function clears any detected pending/source errors
* on the VBIF interface, and optionally returns the detected
* error mask(s).
* @vbif: vbif context driver
* @pnd_errors: pointer to pending error reporting variable
* @src_errors: pointer to source error reporting variable
*/
void (*clear_errors)(struct dpu_hw_vbif *vbif,
u32 *pnd_errors, u32 *src_errors);
/**
* set_write_gather_en - set write_gather enable
* @vbif: vbif context driver
* @xin_id: client interface identifier
*/
void (*set_write_gather_en)(struct dpu_hw_vbif *vbif, u32 xin_id);
};
struct dpu_hw_vbif {
/* base */
struct dpu_hw_blk_reg_map hw;
/* vbif */
enum dpu_vbif idx;
const struct dpu_vbif_cfg *cap;
/* ops */
struct dpu_hw_vbif_ops ops;
};
/**
* dpu_hw_vbif_init - initializes the vbif driver for the passed interface idx
* @idx: Interface index for which driver object is required
* @addr: Mapped register io address of MDSS
* @m: Pointer to mdss catalog data
*/
struct dpu_hw_vbif *dpu_hw_vbif_init(enum dpu_vbif idx,
void __iomem *addr,
const struct dpu_mdss_cfg *m);
void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif);
#endif /*_DPU_HW_VBIF_H */

View File

@ -0,0 +1,56 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _DPU_HWIO_H
#define _DPU_HWIO_H
#include "dpu_hw_util.h"
/**
* MDP TOP block Register and bit fields and defines
*/
#define DISP_INTF_SEL 0x004
#define INTR_EN 0x010
#define INTR_STATUS 0x014
#define INTR_CLEAR 0x018
#define INTR2_EN 0x008
#define INTR2_STATUS 0x00c
#define INTR2_CLEAR 0x02c
#define HIST_INTR_EN 0x01c
#define HIST_INTR_STATUS 0x020
#define HIST_INTR_CLEAR 0x024
#define INTF_INTR_EN 0x1C0
#define INTF_INTR_STATUS 0x1C4
#define INTF_INTR_CLEAR 0x1C8
#define SPLIT_DISPLAY_EN 0x2F4
#define SPLIT_DISPLAY_UPPER_PIPE_CTRL 0x2F8
#define DSPP_IGC_COLOR0_RAM_LUTN 0x300
#define DSPP_IGC_COLOR1_RAM_LUTN 0x304
#define DSPP_IGC_COLOR2_RAM_LUTN 0x308
#define HW_EVENTS_CTL 0x37C
#define CLK_CTRL3 0x3A8
#define CLK_STATUS3 0x3AC
#define CLK_CTRL4 0x3B0
#define CLK_STATUS4 0x3B4
#define CLK_CTRL5 0x3B8
#define CLK_STATUS5 0x3BC
#define CLK_CTRL7 0x3D0
#define CLK_STATUS7 0x3D4
#define SPLIT_DISPLAY_LOWER_PIPE_CTRL 0x3F0
#define SPLIT_DISPLAY_TE_LINE_INTERVAL 0x3F4
#define INTF_SW_RESET_MASK 0x3FC
#define HDMI_DP_CORE_SELECT 0x408
#define MDP_OUT_CTL_0 0x410
#define MDP_VSYNC_SEL 0x414
#define DCE_SEL 0x450
#endif /*_DPU_HWIO_H */

View File

@ -0,0 +1,204 @@
/* Copyright (c) 2012-2015, 2017-2018, The Linux Foundation.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/clk.h>
#include <linux/clk/clk-conf.h>
#include <linux/err.h>
#include <linux/delay.h>
#include "dpu_io_util.h"
void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk)
{
int i;
for (i = num_clk - 1; i >= 0; i--) {
if (clk_arry[i].clk)
clk_put(clk_arry[i].clk);
clk_arry[i].clk = NULL;
}
}
int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk)
{
int i, rc = 0;
for (i = 0; i < num_clk; i++) {
clk_arry[i].clk = clk_get(dev, clk_arry[i].clk_name);
rc = PTR_RET(clk_arry[i].clk);
if (rc) {
DEV_ERR("%pS->%s: '%s' get failed. rc=%d\n",
__builtin_return_address(0), __func__,
clk_arry[i].clk_name, rc);
goto error;
}
}
return rc;
error:
for (i--; i >= 0; i--) {
if (clk_arry[i].clk)
clk_put(clk_arry[i].clk);
clk_arry[i].clk = NULL;
}
return rc;
}
int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk)
{
int i, rc = 0;
for (i = 0; i < num_clk; i++) {
if (clk_arry[i].clk) {
if (clk_arry[i].type != DSS_CLK_AHB) {
DEV_DBG("%pS->%s: '%s' rate %ld\n",
__builtin_return_address(0), __func__,
clk_arry[i].clk_name,
clk_arry[i].rate);
rc = clk_set_rate(clk_arry[i].clk,
clk_arry[i].rate);
if (rc) {
DEV_ERR("%pS->%s: %s failed. rc=%d\n",
__builtin_return_address(0),
__func__,
clk_arry[i].clk_name, rc);
break;
}
}
} else {
DEV_ERR("%pS->%s: '%s' is not available\n",
__builtin_return_address(0), __func__,
clk_arry[i].clk_name);
rc = -EPERM;
break;
}
}
return rc;
}
int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable)
{
int i, rc = 0;
if (enable) {
for (i = 0; i < num_clk; i++) {
DEV_DBG("%pS->%s: enable '%s'\n",
__builtin_return_address(0), __func__,
clk_arry[i].clk_name);
if (clk_arry[i].clk) {
rc = clk_prepare_enable(clk_arry[i].clk);
if (rc)
DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
__builtin_return_address(0),
__func__,
clk_arry[i].clk_name, rc);
} else {
DEV_ERR("%pS->%s: '%s' is not available\n",
__builtin_return_address(0), __func__,
clk_arry[i].clk_name);
rc = -EPERM;
}
if (rc) {
msm_dss_enable_clk(&clk_arry[i],
i, false);
break;
}
}
} else {
for (i = num_clk - 1; i >= 0; i--) {
DEV_DBG("%pS->%s: disable '%s'\n",
__builtin_return_address(0), __func__,
clk_arry[i].clk_name);
if (clk_arry[i].clk)
clk_disable_unprepare(clk_arry[i].clk);
else
DEV_ERR("%pS->%s: '%s' is not available\n",
__builtin_return_address(0), __func__,
clk_arry[i].clk_name);
}
}
return rc;
}
int msm_dss_parse_clock(struct platform_device *pdev,
struct dss_module_power *mp)
{
u32 i, rc = 0;
const char *clock_name;
int num_clk = 0;
if (!pdev || !mp)
return -EINVAL;
mp->num_clk = 0;
num_clk = of_property_count_strings(pdev->dev.of_node,
"assigned-clock-names");
if (num_clk <= 0) {
pr_debug("clocks are not defined\n");
return 0;
}
mp->clk_config = devm_kzalloc(&pdev->dev,
sizeof(struct dss_clk) * num_clk,
GFP_KERNEL);
if (!mp->clk_config)
return -ENOMEM;
for (i = 0; i < num_clk; i++) {
rc = of_property_read_string_index(pdev->dev.of_node,
"assigned-clock-names", i,
&clock_name);
if (rc) {
dev_err(&pdev->dev, "Failed to get clock name for %d\n",
i);
break;
}
strlcpy(mp->clk_config[i].clk_name, clock_name,
sizeof(mp->clk_config[i].clk_name));
mp->clk_config[i].type = DSS_CLK_AHB;
}
rc = msm_dss_get_clk(&pdev->dev, mp->clk_config, num_clk);
if (rc) {
dev_err(&pdev->dev, "Failed to get clock refs %d\n", rc);
goto err;
}
rc = of_clk_set_defaults(pdev->dev.of_node, false);
if (rc) {
dev_err(&pdev->dev, "Failed to set clock defaults %d\n", rc);
goto err;
}
for (i = 0; i < num_clk; i++) {
u32 rate = clk_get_rate(mp->clk_config[i].clk);
if (!rate)
continue;
mp->clk_config[i].rate = rate;
mp->clk_config[i].type = DSS_CLK_PCLK;
}
mp->num_clk = num_clk;
return 0;
err:
msm_dss_put_clk(mp->clk_config, num_clk);
return rc;
}

View File

@ -0,0 +1,57 @@
/* Copyright (c) 2012, 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __DPU_IO_UTIL_H__
#define __DPU_IO_UTIL_H__
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#define DEV_DBG(fmt, args...) pr_debug(fmt, ##args)
#define DEV_INFO(fmt, args...) pr_info(fmt, ##args)
#define DEV_WARN(fmt, args...) pr_warn(fmt, ##args)
#define DEV_ERR(fmt, args...) pr_err(fmt, ##args)
struct dss_gpio {
unsigned int gpio;
unsigned int value;
char gpio_name[32];
};
enum dss_clk_type {
DSS_CLK_AHB, /* no set rate. rate controlled through rpm */
DSS_CLK_PCLK,
};
struct dss_clk {
struct clk *clk; /* clk handle */
char clk_name[32];
enum dss_clk_type type;
unsigned long rate;
unsigned long max_rate;
};
struct dss_module_power {
unsigned int num_gpio;
struct dss_gpio *gpio_config;
unsigned int num_clk;
struct dss_clk *clk_config;
};
int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk);
void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk);
int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk);
int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable);
int msm_dss_parse_clock(struct platform_device *pdev,
struct dss_module_power *mp);
#endif /* __DPU_IO_UTIL_H__ */

View File

@ -0,0 +1,66 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include <linux/irqdomain.h>
#include <linux/irq.h>
#include <linux/kthread.h>
#include "dpu_irq.h"
#include "dpu_core_irq.h"
irqreturn_t dpu_irq(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
return dpu_core_irq(dpu_kms);
}
void dpu_irq_preinstall(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
if (!dpu_kms->dev || !dpu_kms->dev->dev) {
pr_err("invalid device handles\n");
return;
}
dpu_core_irq_preinstall(dpu_kms);
}
int dpu_irq_postinstall(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
int rc;
if (!kms) {
DPU_ERROR("invalid parameters\n");
return -EINVAL;
}
rc = dpu_core_irq_postinstall(dpu_kms);
return rc;
}
void dpu_irq_uninstall(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
if (!kms) {
DPU_ERROR("invalid parameters\n");
return;
}
dpu_core_irq_uninstall(dpu_kms);
}

View File

@ -0,0 +1,59 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __DPU_IRQ_H__
#define __DPU_IRQ_H__
#include <linux/kernel.h>
#include <linux/irqdomain.h>
#include "msm_kms.h"
/**
* dpu_irq_controller - define MDSS level interrupt controller context
* @enabled_mask: enable status of MDSS level interrupt
* @domain: interrupt domain of this controller
*/
struct dpu_irq_controller {
unsigned long enabled_mask;
struct irq_domain *domain;
};
/**
* dpu_irq_preinstall - perform pre-installation of MDSS IRQ handler
* @kms: pointer to kms context
* @return: none
*/
void dpu_irq_preinstall(struct msm_kms *kms);
/**
* dpu_irq_postinstall - perform post-installation of MDSS IRQ handler
* @kms: pointer to kms context
* @return: 0 if success; error code otherwise
*/
int dpu_irq_postinstall(struct msm_kms *kms);
/**
* dpu_irq_uninstall - uninstall MDSS IRQ handler
* @drm_dev: pointer to kms context
* @return: none
*/
void dpu_irq_uninstall(struct msm_kms *kms);
/**
* dpu_irq - MDSS level IRQ handler
* @kms: pointer to kms context
* @return: interrupt handling status
*/
irqreturn_t dpu_irq(struct msm_kms *kms);
#endif /* __DPU_IRQ_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,402 @@
/*
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __DPU_KMS_H__
#define __DPU_KMS_H__
#include "msm_drv.h"
#include "msm_kms.h"
#include "msm_mmu.h"
#include "msm_gem.h"
#include "dpu_dbg.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_ctl.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_interrupts.h"
#include "dpu_hw_top.h"
#include "dpu_rm.h"
#include "dpu_power_handle.h"
#include "dpu_irq.h"
#include "dpu_core_perf.h"
#define DRMID(x) ((x) ? (x)->base.id : -1)
/**
* DPU_DEBUG - macro for kms/plane/crtc/encoder/connector logs
* @fmt: Pointer to format string
*/
#define DPU_DEBUG(fmt, ...) \
do { \
if (unlikely(drm_debug & DRM_UT_KMS)) \
DRM_DEBUG(fmt, ##__VA_ARGS__); \
else \
pr_debug(fmt, ##__VA_ARGS__); \
} while (0)
/**
* DPU_DEBUG_DRIVER - macro for hardware driver logging
* @fmt: Pointer to format string
*/
#define DPU_DEBUG_DRIVER(fmt, ...) \
do { \
if (unlikely(drm_debug & DRM_UT_DRIVER)) \
DRM_ERROR(fmt, ##__VA_ARGS__); \
else \
pr_debug(fmt, ##__VA_ARGS__); \
} while (0)
#define DPU_ERROR(fmt, ...) pr_err("[dpu error]" fmt, ##__VA_ARGS__)
/**
* ktime_compare_safe - compare two ktime structures
* This macro is similar to the standard ktime_compare() function, but
* attempts to also handle ktime overflows.
* @A: First ktime value
* @B: Second ktime value
* Returns: -1 if A < B, 0 if A == B, 1 if A > B
*/
#define ktime_compare_safe(A, B) \
ktime_compare(ktime_sub((A), (B)), ktime_set(0, 0))
#define DPU_NAME_SIZE 12
/* timeout in frames waiting for frame done */
#define DPU_FRAME_DONE_TIMEOUT 60
/*
* struct dpu_irq_callback - IRQ callback handlers
* @list: list to callback
* @func: intr handler
* @arg: argument for the handler
*/
struct dpu_irq_callback {
struct list_head list;
void (*func)(void *arg, int irq_idx);
void *arg;
};
/**
* struct dpu_irq: IRQ structure contains callback registration info
* @total_irq: total number of irq_idx obtained from HW interrupts mapping
* @irq_cb_tbl: array of IRQ callbacks setting
* @enable_counts array of IRQ enable counts
* @cb_lock: callback lock
* @debugfs_file: debugfs file for irq statistics
*/
struct dpu_irq {
u32 total_irqs;
struct list_head *irq_cb_tbl;
atomic_t *enable_counts;
atomic_t *irq_counts;
spinlock_t cb_lock;
struct dentry *debugfs_file;
};
struct dpu_kms {
struct msm_kms base;
struct drm_device *dev;
int core_rev;
struct dpu_mdss_cfg *catalog;
struct dpu_power_handle phandle;
struct dpu_power_client *core_client;
struct dpu_power_event *power_event;
/* directory entry for debugfs */
struct dentry *debugfs_root;
struct dentry *debugfs_danger;
struct dentry *debugfs_vbif;
/* io/register spaces: */
void __iomem *mmio, *vbif[VBIF_MAX], *reg_dma;
unsigned long mmio_len, vbif_len[VBIF_MAX], reg_dma_len;
struct regulator *vdd;
struct regulator *mmagic;
struct regulator *venus;
struct dpu_hw_intr *hw_intr;
struct dpu_irq irq_obj;
struct dpu_core_perf perf;
/* saved atomic state during system suspend */
struct drm_atomic_state *suspend_state;
bool suspend_block;
struct dpu_rm rm;
bool rm_init;
struct dpu_hw_vbif *hw_vbif[VBIF_MAX];
struct dpu_hw_mdp *hw_mdp;
bool has_danger_ctrl;
struct platform_device *pdev;
bool rpm_enabled;
struct dss_module_power mp;
};
struct vsync_info {
u32 frame_count;
u32 line_count;
};
#define to_dpu_kms(x) container_of(x, struct dpu_kms, base)
/* get struct msm_kms * from drm_device * */
#define ddev_to_msm_kms(D) ((D) && (D)->dev_private ? \
((struct msm_drm_private *)((D)->dev_private))->kms : NULL)
/**
* dpu_kms_is_suspend_state - whether or not the system is pm suspended
* @dev: Pointer to drm device
* Return: Suspend status
*/
static inline bool dpu_kms_is_suspend_state(struct drm_device *dev)
{
if (!ddev_to_msm_kms(dev))
return false;
return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_state != NULL;
}
/**
* dpu_kms_is_suspend_blocked - whether or not commits are blocked due to pm
* suspend status
* @dev: Pointer to drm device
* Return: True if commits should be rejected due to pm suspend
*/
static inline bool dpu_kms_is_suspend_blocked(struct drm_device *dev)
{
if (!dpu_kms_is_suspend_state(dev))
return false;
return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_block;
}
/**
* Debugfs functions - extra helper functions for debugfs support
*
* Main debugfs documentation is located at,
*
* Documentation/filesystems/debugfs.txt
*
* @dpu_debugfs_setup_regset32: Initialize data for dpu_debugfs_create_regset32
* @dpu_debugfs_create_regset32: Create 32-bit register dump file
* @dpu_debugfs_get_root: Get root dentry for DPU_KMS's debugfs node
*/
/**
* Companion structure for dpu_debugfs_create_regset32. Do not initialize the
* members of this structure explicitly; use dpu_debugfs_setup_regset32 instead.
*/
struct dpu_debugfs_regset32 {
uint32_t offset;
uint32_t blk_len;
struct dpu_kms *dpu_kms;
};
/**
* dpu_debugfs_setup_regset32 - Initialize register block definition for debugfs
* This function is meant to initialize dpu_debugfs_regset32 structures for use
* with dpu_debugfs_create_regset32.
* @regset: opaque register definition structure
* @offset: sub-block offset
* @length: sub-block length, in bytes
* @dpu_kms: pointer to dpu kms structure
*/
void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms);
/**
* dpu_debugfs_create_regset32 - Create register read back file for debugfs
*
* This function is almost identical to the standard debugfs_create_regset32()
* function, with the main difference being that a list of register
* names/offsets do not need to be provided. The 'read' function simply outputs
* sequential register values over a specified range.
*
* Similar to the related debugfs_create_regset32 API, the structure pointed to
* by regset needs to persist for the lifetime of the created file. The calling
* code is responsible for initialization/management of this structure.
*
* The structure pointed to by regset is meant to be opaque. Please use
* dpu_debugfs_setup_regset32 to initialize it.
*
* @name: File name within debugfs
* @mode: File mode within debugfs
* @parent: Parent directory entry within debugfs, can be NULL
* @regset: Pointer to persistent register block definition
*
* Return: dentry pointer for newly created file, use either debugfs_remove()
* or debugfs_remove_recursive() (on a parent directory) to remove the
* file
*/
void *dpu_debugfs_create_regset32(const char *name, umode_t mode,
void *parent, struct dpu_debugfs_regset32 *regset);
/**
* dpu_debugfs_get_root - Return root directory entry for KMS's debugfs
*
* The return value should be passed as the 'parent' argument to subsequent
* debugfs create calls.
*
* @dpu_kms: Pointer to DPU's KMS structure
*
* Return: dentry pointer for DPU's debugfs location
*/
void *dpu_debugfs_get_root(struct dpu_kms *dpu_kms);
/**
* DPU info management functions
* These functions/definitions allow for building up a 'dpu_info' structure
* containing one or more "key=value\n" entries.
*/
#define DPU_KMS_INFO_MAX_SIZE 4096
/**
* struct dpu_kms_info - connector information structure container
* @data: Array of information character data
* @len: Current length of information data
* @staged_len: Temporary data buffer length, commit to
* len using dpu_kms_info_stop
* @start: Whether or not a partial data entry was just started
*/
struct dpu_kms_info {
char data[DPU_KMS_INFO_MAX_SIZE];
uint32_t len;
uint32_t staged_len;
bool start;
};
/**
* DPU_KMS_INFO_DATA - Macro for accessing dpu_kms_info data bytes
* @S: Pointer to dpu_kms_info structure
* Returns: Pointer to byte data
*/
#define DPU_KMS_INFO_DATA(S) ((S) ? ((struct dpu_kms_info *)(S))->data : 0)
/**
* DPU_KMS_INFO_DATALEN - Macro for accessing dpu_kms_info data length
* it adds an extra character length to count null.
* @S: Pointer to dpu_kms_info structure
* Returns: Size of available byte data
*/
#define DPU_KMS_INFO_DATALEN(S) ((S) ? ((struct dpu_kms_info *)(S))->len + 1 \
: 0)
/**
* dpu_kms_info_reset - reset dpu_kms_info structure
* @info: Pointer to dpu_kms_info structure
*/
void dpu_kms_info_reset(struct dpu_kms_info *info);
/**
* dpu_kms_info_add_keyint - add integer value to 'dpu_kms_info'
* @info: Pointer to dpu_kms_info structure
* @key: Pointer to key string
* @value: Signed 64-bit integer value
*/
void dpu_kms_info_add_keyint(struct dpu_kms_info *info,
const char *key,
int64_t value);
/**
* dpu_kms_info_add_keystr - add string value to 'dpu_kms_info'
* @info: Pointer to dpu_kms_info structure
* @key: Pointer to key string
* @value: Pointer to string value
*/
void dpu_kms_info_add_keystr(struct dpu_kms_info *info,
const char *key,
const char *value);
/**
* dpu_kms_info_start - begin adding key to 'dpu_kms_info'
* Usage:
* dpu_kms_info_start(key)
* dpu_kms_info_append(val_1)
* ...
* dpu_kms_info_append(val_n)
* dpu_kms_info_stop
* @info: Pointer to dpu_kms_info structure
* @key: Pointer to key string
*/
void dpu_kms_info_start(struct dpu_kms_info *info,
const char *key);
/**
* dpu_kms_info_append - append value string to 'dpu_kms_info'
* Usage:
* dpu_kms_info_start(key)
* dpu_kms_info_append(val_1)
* ...
* dpu_kms_info_append(val_n)
* dpu_kms_info_stop
* @info: Pointer to dpu_kms_info structure
* @str: Pointer to partial value string
*/
void dpu_kms_info_append(struct dpu_kms_info *info,
const char *str);
/**
* dpu_kms_info_append_format - append format code string to 'dpu_kms_info'
* Usage:
* dpu_kms_info_start(key)
* dpu_kms_info_append_format(fourcc, modifier)
* ...
* dpu_kms_info_stop
* @info: Pointer to dpu_kms_info structure
* @pixel_format: FOURCC format code
* @modifier: 64-bit drm format modifier
*/
void dpu_kms_info_append_format(struct dpu_kms_info *info,
uint32_t pixel_format,
uint64_t modifier);
/**
* dpu_kms_info_stop - finish adding key to 'dpu_kms_info'
* Usage:
* dpu_kms_info_start(key)
* dpu_kms_info_append(val_1)
* ...
* dpu_kms_info_append(val_n)
* dpu_kms_info_stop
* @info: Pointer to dpu_kms_info structure
*/
void dpu_kms_info_stop(struct dpu_kms_info *info);
/**
* Vblank enable/disable functions
*/
int dpu_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
void dpu_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
void dpu_kms_encoder_enable(struct drm_encoder *encoder);
/**
* dpu_kms_get_clk_rate() - get the clock rate
* @dpu_kms: poiner to dpu_kms structure
* @clock_name: clock name to get the rate
*
* Return: current clock rate
*/
u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name);
#endif /* __dpu_kms_H__ */

View File

@ -0,0 +1,153 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "dpu-kms_utils:[%s] " fmt, __func__
#include "dpu_kms.h"
void dpu_kms_info_reset(struct dpu_kms_info *info)
{
if (info) {
info->len = 0;
info->staged_len = 0;
}
}
void dpu_kms_info_add_keyint(struct dpu_kms_info *info,
const char *key,
int64_t value)
{
uint32_t len;
if (info && key) {
len = snprintf(info->data + info->len,
DPU_KMS_INFO_MAX_SIZE - info->len,
"%s=%lld\n",
key,
value);
/* check if snprintf truncated the string */
if ((info->len + len) < DPU_KMS_INFO_MAX_SIZE)
info->len += len;
}
}
void dpu_kms_info_add_keystr(struct dpu_kms_info *info,
const char *key,
const char *value)
{
uint32_t len;
if (info && key && value) {
len = snprintf(info->data + info->len,
DPU_KMS_INFO_MAX_SIZE - info->len,
"%s=%s\n",
key,
value);
/* check if snprintf truncated the string */
if ((info->len + len) < DPU_KMS_INFO_MAX_SIZE)
info->len += len;
}
}
void dpu_kms_info_start(struct dpu_kms_info *info,
const char *key)
{
uint32_t len;
if (info && key) {
len = snprintf(info->data + info->len,
DPU_KMS_INFO_MAX_SIZE - info->len,
"%s=",
key);
info->start = true;
/* check if snprintf truncated the string */
if ((info->len + len) < DPU_KMS_INFO_MAX_SIZE)
info->staged_len = info->len + len;
}
}
void dpu_kms_info_append(struct dpu_kms_info *info,
const char *str)
{
uint32_t len;
if (info) {
len = snprintf(info->data + info->staged_len,
DPU_KMS_INFO_MAX_SIZE - info->staged_len,
"%s",
str);
/* check if snprintf truncated the string */
if ((info->staged_len + len) < DPU_KMS_INFO_MAX_SIZE) {
info->staged_len += len;
info->start = false;
}
}
}
void dpu_kms_info_append_format(struct dpu_kms_info *info,
uint32_t pixel_format,
uint64_t modifier)
{
uint32_t len;
if (!info)
return;
if (modifier) {
len = snprintf(info->data + info->staged_len,
DPU_KMS_INFO_MAX_SIZE - info->staged_len,
info->start ?
"%c%c%c%c/%llX/%llX" : " %c%c%c%c/%llX/%llX",
(pixel_format >> 0) & 0xFF,
(pixel_format >> 8) & 0xFF,
(pixel_format >> 16) & 0xFF,
(pixel_format >> 24) & 0xFF,
(modifier >> 56) & 0xFF,
modifier & ((1ULL << 56) - 1));
} else {
len = snprintf(info->data + info->staged_len,
DPU_KMS_INFO_MAX_SIZE - info->staged_len,
info->start ?
"%c%c%c%c" : " %c%c%c%c",
(pixel_format >> 0) & 0xFF,
(pixel_format >> 8) & 0xFF,
(pixel_format >> 16) & 0xFF,
(pixel_format >> 24) & 0xFF);
}
/* check if snprintf truncated the string */
if ((info->staged_len + len) < DPU_KMS_INFO_MAX_SIZE) {
info->staged_len += len;
info->start = false;
}
}
void dpu_kms_info_stop(struct dpu_kms_info *info)
{
uint32_t len;
if (info) {
/* insert final delimiter */
len = snprintf(info->data + info->staged_len,
DPU_KMS_INFO_MAX_SIZE - info->staged_len,
"\n");
/* check if snprintf truncated the string */
if ((info->staged_len + len) < DPU_KMS_INFO_MAX_SIZE)
info->len = info->staged_len + len;
}
}

View File

@ -0,0 +1,245 @@
/*
* SPDX-License-Identifier: GPL-2.0
* Copyright (c) 2018, The Linux Foundation
*/
#include "dpu_kms.h"
#define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base)
#define HW_INTR_STATUS 0x0010
struct dpu_mdss {
struct msm_mdss base;
void __iomem *mmio;
unsigned long mmio_len;
u32 hwversion;
struct dss_module_power mp;
struct dpu_irq_controller irq_controller;
};
static irqreturn_t dpu_mdss_irq(int irq, void *arg)
{
struct dpu_mdss *dpu_mdss = arg;
u32 interrupts;
interrupts = readl_relaxed(dpu_mdss->mmio + HW_INTR_STATUS);
while (interrupts) {
irq_hw_number_t hwirq = fls(interrupts) - 1;
unsigned int mapping;
int rc;
mapping = irq_find_mapping(dpu_mdss->irq_controller.domain,
hwirq);
if (mapping == 0) {
DRM_ERROR("couldn't find irq mapping for %lu\n", hwirq);
return IRQ_NONE;
}
rc = generic_handle_irq(mapping);
if (rc < 0) {
DRM_ERROR("handle irq fail: irq=%lu mapping=%u rc=%d\n",
hwirq, mapping, rc);
return IRQ_NONE;
}
interrupts &= ~(1 << hwirq);
}
return IRQ_HANDLED;
}
static void dpu_mdss_irq_mask(struct irq_data *irqd)
{
struct dpu_mdss *dpu_mdss = irq_data_get_irq_chip_data(irqd);
/* memory barrier */
smp_mb__before_atomic();
clear_bit(irqd->hwirq, &dpu_mdss->irq_controller.enabled_mask);
/* memory barrier */
smp_mb__after_atomic();
}
static void dpu_mdss_irq_unmask(struct irq_data *irqd)
{
struct dpu_mdss *dpu_mdss = irq_data_get_irq_chip_data(irqd);
/* memory barrier */
smp_mb__before_atomic();
set_bit(irqd->hwirq, &dpu_mdss->irq_controller.enabled_mask);
/* memory barrier */
smp_mb__after_atomic();
}
static struct irq_chip dpu_mdss_irq_chip = {
.name = "dpu_mdss",
.irq_mask = dpu_mdss_irq_mask,
.irq_unmask = dpu_mdss_irq_unmask,
};
static int dpu_mdss_irqdomain_map(struct irq_domain *domain,
unsigned int irq, irq_hw_number_t hwirq)
{
struct dpu_mdss *dpu_mdss = domain->host_data;
int ret;
irq_set_chip_and_handler(irq, &dpu_mdss_irq_chip, handle_level_irq);
ret = irq_set_chip_data(irq, dpu_mdss);
return ret;
}
static const struct irq_domain_ops dpu_mdss_irqdomain_ops = {
.map = dpu_mdss_irqdomain_map,
.xlate = irq_domain_xlate_onecell,
};
static int _dpu_mdss_irq_domain_add(struct dpu_mdss *dpu_mdss)
{
struct device *dev;
struct irq_domain *domain;
dev = dpu_mdss->base.dev->dev;
domain = irq_domain_add_linear(dev->of_node, 32,
&dpu_mdss_irqdomain_ops, dpu_mdss);
if (!domain) {
DPU_ERROR("failed to add irq_domain\n");
return -EINVAL;
}
dpu_mdss->irq_controller.enabled_mask = 0;
dpu_mdss->irq_controller.domain = domain;
return 0;
}
int _dpu_mdss_irq_domain_fini(struct dpu_mdss *dpu_mdss)
{
if (dpu_mdss->irq_controller.domain) {
irq_domain_remove(dpu_mdss->irq_controller.domain);
dpu_mdss->irq_controller.domain = NULL;
}
return 0;
}
static int dpu_mdss_enable(struct msm_mdss *mdss)
{
struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
struct dss_module_power *mp = &dpu_mdss->mp;
int ret;
ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
if (ret)
DPU_ERROR("clock enable failed, ret:%d\n", ret);
return ret;
}
static int dpu_mdss_disable(struct msm_mdss *mdss)
{
struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
struct dss_module_power *mp = &dpu_mdss->mp;
int ret;
ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
if (ret)
DPU_ERROR("clock disable failed, ret:%d\n", ret);
return ret;
}
static void dpu_mdss_destroy(struct drm_device *dev)
{
struct platform_device *pdev = to_platform_device(dev->dev);
struct msm_drm_private *priv = dev->dev_private;
struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
struct dss_module_power *mp = &dpu_mdss->mp;
_dpu_mdss_irq_domain_fini(dpu_mdss);
msm_dss_put_clk(mp->clk_config, mp->num_clk);
devm_kfree(&pdev->dev, mp->clk_config);
if (dpu_mdss->mmio)
devm_iounmap(&pdev->dev, dpu_mdss->mmio);
dpu_mdss->mmio = NULL;
pm_runtime_disable(dev->dev);
priv->mdss = NULL;
}
static const struct msm_mdss_funcs mdss_funcs = {
.enable = dpu_mdss_enable,
.disable = dpu_mdss_disable,
.destroy = dpu_mdss_destroy,
};
int dpu_mdss_init(struct drm_device *dev)
{
struct platform_device *pdev = to_platform_device(dev->dev);
struct msm_drm_private *priv = dev->dev_private;
struct resource *res;
struct dpu_mdss *dpu_mdss;
struct dss_module_power *mp;
int ret = 0;
dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
if (!dpu_mdss)
return -ENOMEM;
dpu_mdss->mmio = msm_ioremap(pdev, "mdss", "mdss");
if (IS_ERR(dpu_mdss->mmio))
return PTR_ERR(dpu_mdss->mmio);
DRM_DEBUG("mapped mdss address space @%pK\n", dpu_mdss->mmio);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mdss");
if (!res) {
DRM_ERROR("failed to get memory resource for mdss\n");
return -ENOMEM;
}
dpu_mdss->mmio_len = resource_size(res);
mp = &dpu_mdss->mp;
ret = msm_dss_parse_clock(pdev, mp);
if (ret) {
DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
goto clk_parse_err;
}
dpu_mdss->base.dev = dev;
dpu_mdss->base.funcs = &mdss_funcs;
ret = _dpu_mdss_irq_domain_add(dpu_mdss);
if (ret)
goto irq_domain_error;
ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
dpu_mdss_irq, 0, "dpu_mdss_isr", dpu_mdss);
if (ret) {
DPU_ERROR("failed to init irq: %d\n", ret);
goto irq_error;
}
pm_runtime_enable(dev->dev);
pm_runtime_get_sync(dev->dev);
dpu_mdss->hwversion = readl_relaxed(dpu_mdss->mmio);
pm_runtime_put_sync(dev->dev);
priv->mdss = &dpu_mdss->base;
return ret;
irq_error:
_dpu_mdss_irq_domain_fini(dpu_mdss);
irq_domain_error:
msm_dss_put_clk(mp->clk_config, mp->num_clk);
clk_parse_err:
devm_kfree(&pdev->dev, mp->clk_config);
if (dpu_mdss->mmio)
devm_iounmap(&pdev->dev, dpu_mdss->mmio);
dpu_mdss->mmio = NULL;
return ret;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,175 @@
/*
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _DPU_PLANE_H_
#define _DPU_PLANE_H_
#include <drm/drm_crtc.h>
#include "dpu_kms.h"
#include "dpu_hw_mdss.h"
#include "dpu_hw_sspp.h"
/**
* struct dpu_plane_state: Define dpu extension of drm plane state object
* @base: base drm plane state object
* @property_state: Local storage for msm_prop properties
* @property_values: cached plane property values
* @aspace: pointer to address space for input/output buffers
* @input_fence: dereferenced input fence pointer
* @stage: assigned by crtc blender
* @multirect_index: index of the rectangle of SSPP
* @multirect_mode: parallel or time multiplex multirect mode
* @pending: whether the current update is still pending
* @scaler3_cfg: configuration data for scaler3
* @pixel_ext: configuration data for pixel extensions
* @scaler_check_state: indicates status of user provided pixel extension data
* @cdp_cfg: CDP configuration
*/
struct dpu_plane_state {
struct drm_plane_state base;
struct msm_gem_address_space *aspace;
void *input_fence;
enum dpu_stage stage;
uint32_t multirect_index;
uint32_t multirect_mode;
bool pending;
/* scaler configuration */
struct dpu_hw_scaler3_cfg scaler3_cfg;
struct dpu_hw_pixel_ext pixel_ext;
struct dpu_hw_pipe_cdp_cfg cdp_cfg;
};
/**
* struct dpu_multirect_plane_states: Defines multirect pair of drm plane states
* @r0: drm plane configured on rect 0
* @r1: drm plane configured on rect 1
*/
struct dpu_multirect_plane_states {
const struct drm_plane_state *r0;
const struct drm_plane_state *r1;
};
#define to_dpu_plane_state(x) \
container_of(x, struct dpu_plane_state, base)
/**
* dpu_plane_pipe - return sspp identifier for the given plane
* @plane: Pointer to DRM plane object
* Returns: sspp identifier of the given plane
*/
enum dpu_sspp dpu_plane_pipe(struct drm_plane *plane);
/**
* is_dpu_plane_virtual - check for virtual plane
* @plane: Pointer to DRM plane object
* returns: true - if the plane is virtual
* false - if the plane is primary
*/
bool is_dpu_plane_virtual(struct drm_plane *plane);
/**
* dpu_plane_get_ctl_flush - get control flush mask
* @plane: Pointer to DRM plane object
* @ctl: Pointer to control hardware
* @flush_sspp: Pointer to sspp flush control word
*/
void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl,
u32 *flush_sspp);
/**
* dpu_plane_restore - restore hw state if previously power collapsed
* @plane: Pointer to drm plane structure
*/
void dpu_plane_restore(struct drm_plane *plane);
/**
* dpu_plane_flush - final plane operations before commit flush
* @plane: Pointer to drm plane structure
*/
void dpu_plane_flush(struct drm_plane *plane);
/**
* dpu_plane_kickoff - final plane operations before commit kickoff
* @plane: Pointer to drm plane structure
*/
void dpu_plane_kickoff(struct drm_plane *plane);
/**
* dpu_plane_set_error: enable/disable error condition
* @plane: pointer to drm_plane structure
*/
void dpu_plane_set_error(struct drm_plane *plane, bool error);
/**
* dpu_plane_init - create new dpu plane for the given pipe
* @dev: Pointer to DRM device
* @pipe: dpu hardware pipe identifier
* @primary_plane: true if this pipe is primary plane for crtc
* @possible_crtcs: bitmask of crtc that can be attached to the given pipe
* @master_plane_id: primary plane id of a multirect pipe. 0 value passed for
* a regular plane initialization. A non-zero primary plane
* id will be passed for a virtual pipe initialization.
*
*/
struct drm_plane *dpu_plane_init(struct drm_device *dev,
uint32_t pipe, bool primary_plane,
unsigned long possible_crtcs, u32 master_plane_id);
/**
* dpu_plane_validate_multirecti_v2 - validate the multirect planes
* against hw limitations
* @plane: drm plate states of the multirect pair
*/
int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane);
/**
* dpu_plane_clear_multirect - clear multirect bits for the given pipe
* @drm_state: Pointer to DRM plane state
*/
void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state);
/**
* dpu_plane_wait_input_fence - wait for input fence object
* @plane: Pointer to DRM plane object
* @wait_ms: Wait timeout value
* Returns: Zero on success
*/
int dpu_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms);
/**
* dpu_plane_color_fill - enables color fill on plane
* @plane: Pointer to DRM plane object
* @color: RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
* @alpha: 8-bit fill alpha value, 255 selects 100% alpha
* Returns: 0 on success
*/
int dpu_plane_color_fill(struct drm_plane *plane,
uint32_t color, uint32_t alpha);
/**
* dpu_plane_set_revalidate - sets revalidate flag which forces a full
* validation of the plane properties in the next atomic check
* @plane: Pointer to DRM plane object
* @enable: Boolean to set/unset the flag
*/
void dpu_plane_set_revalidate(struct drm_plane *plane, bool enable);
#endif /* _DPU_PLANE_H_ */

View File

@ -0,0 +1,249 @@
/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#define pr_fmt(fmt) "[drm:%s:%d]: " fmt, __func__, __LINE__
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/string.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/of_platform.h>
#include "dpu_power_handle.h"
#include "dpu_trace.h"
static const char *data_bus_name[DPU_POWER_HANDLE_DBUS_ID_MAX] = {
[DPU_POWER_HANDLE_DBUS_ID_MNOC] = "qcom,dpu-data-bus",
[DPU_POWER_HANDLE_DBUS_ID_LLCC] = "qcom,dpu-llcc-bus",
[DPU_POWER_HANDLE_DBUS_ID_EBI] = "qcom,dpu-ebi-bus",
};
const char *dpu_power_handle_get_dbus_name(u32 bus_id)
{
if (bus_id < DPU_POWER_HANDLE_DBUS_ID_MAX)
return data_bus_name[bus_id];
return NULL;
}
static void dpu_power_event_trigger_locked(struct dpu_power_handle *phandle,
u32 event_type)
{
struct dpu_power_event *event;
list_for_each_entry(event, &phandle->event_list, list) {
if (event->event_type & event_type)
event->cb_fnc(event_type, event->usr);
}
}
struct dpu_power_client *dpu_power_client_create(
struct dpu_power_handle *phandle, char *client_name)
{
struct dpu_power_client *client;
static u32 id;
if (!client_name || !phandle) {
pr_err("client name is null or invalid power data\n");
return ERR_PTR(-EINVAL);
}
client = kzalloc(sizeof(struct dpu_power_client), GFP_KERNEL);
if (!client)
return ERR_PTR(-ENOMEM);
mutex_lock(&phandle->phandle_lock);
strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN);
client->usecase_ndx = VOTE_INDEX_DISABLE;
client->id = id;
client->active = true;
pr_debug("client %s created:%pK id :%d\n", client_name,
client, id);
id++;
list_add(&client->list, &phandle->power_client_clist);
mutex_unlock(&phandle->phandle_lock);
return client;
}
void dpu_power_client_destroy(struct dpu_power_handle *phandle,
struct dpu_power_client *client)
{
if (!client || !phandle) {
pr_err("reg bus vote: invalid client handle\n");
} else if (!client->active) {
pr_err("dpu power deinit already done\n");
kfree(client);
} else {
pr_debug("bus vote client %s destroyed:%pK id:%u\n",
client->name, client, client->id);
mutex_lock(&phandle->phandle_lock);
list_del_init(&client->list);
mutex_unlock(&phandle->phandle_lock);
kfree(client);
}
}
void dpu_power_resource_init(struct platform_device *pdev,
struct dpu_power_handle *phandle)
{
phandle->dev = &pdev->dev;
INIT_LIST_HEAD(&phandle->power_client_clist);
INIT_LIST_HEAD(&phandle->event_list);
mutex_init(&phandle->phandle_lock);
}
void dpu_power_resource_deinit(struct platform_device *pdev,
struct dpu_power_handle *phandle)
{
struct dpu_power_client *curr_client, *next_client;
struct dpu_power_event *curr_event, *next_event;
if (!phandle || !pdev) {
pr_err("invalid input param\n");
return;
}
mutex_lock(&phandle->phandle_lock);
list_for_each_entry_safe(curr_client, next_client,
&phandle->power_client_clist, list) {
pr_err("cliend:%s-%d still registered with refcount:%d\n",
curr_client->name, curr_client->id,
curr_client->refcount);
curr_client->active = false;
list_del(&curr_client->list);
}
list_for_each_entry_safe(curr_event, next_event,
&phandle->event_list, list) {
pr_err("event:%d, client:%s still registered\n",
curr_event->event_type,
curr_event->client_name);
curr_event->active = false;
list_del(&curr_event->list);
}
mutex_unlock(&phandle->phandle_lock);
}
int dpu_power_resource_enable(struct dpu_power_handle *phandle,
struct dpu_power_client *pclient, bool enable)
{
bool changed = false;
u32 max_usecase_ndx = VOTE_INDEX_DISABLE, prev_usecase_ndx;
struct dpu_power_client *client;
if (!phandle || !pclient) {
pr_err("invalid input argument\n");
return -EINVAL;
}
mutex_lock(&phandle->phandle_lock);
if (enable)
pclient->refcount++;
else if (pclient->refcount)
pclient->refcount--;
if (pclient->refcount)
pclient->usecase_ndx = VOTE_INDEX_LOW;
else
pclient->usecase_ndx = VOTE_INDEX_DISABLE;
list_for_each_entry(client, &phandle->power_client_clist, list) {
if (client->usecase_ndx < VOTE_INDEX_MAX &&
client->usecase_ndx > max_usecase_ndx)
max_usecase_ndx = client->usecase_ndx;
}
if (phandle->current_usecase_ndx != max_usecase_ndx) {
changed = true;
prev_usecase_ndx = phandle->current_usecase_ndx;
phandle->current_usecase_ndx = max_usecase_ndx;
}
pr_debug("%pS: changed=%d current idx=%d request client %s id:%u enable:%d refcount:%d\n",
__builtin_return_address(0), changed, max_usecase_ndx,
pclient->name, pclient->id, enable, pclient->refcount);
if (!changed)
goto end;
if (enable) {
dpu_power_event_trigger_locked(phandle,
DPU_POWER_EVENT_PRE_ENABLE);
dpu_power_event_trigger_locked(phandle,
DPU_POWER_EVENT_POST_ENABLE);
} else {
dpu_power_event_trigger_locked(phandle,
DPU_POWER_EVENT_PRE_DISABLE);
dpu_power_event_trigger_locked(phandle,
DPU_POWER_EVENT_POST_DISABLE);
}
end:
mutex_unlock(&phandle->phandle_lock);
return 0;
}
struct dpu_power_event *dpu_power_handle_register_event(
struct dpu_power_handle *phandle,
u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
void *usr, char *client_name)
{
struct dpu_power_event *event;
if (!phandle) {
pr_err("invalid power handle\n");
return ERR_PTR(-EINVAL);
} else if (!cb_fnc || !event_type) {
pr_err("no callback fnc or event type\n");
return ERR_PTR(-EINVAL);
}
event = kzalloc(sizeof(struct dpu_power_event), GFP_KERNEL);
if (!event)
return ERR_PTR(-ENOMEM);
event->event_type = event_type;
event->cb_fnc = cb_fnc;
event->usr = usr;
strlcpy(event->client_name, client_name, MAX_CLIENT_NAME_LEN);
event->active = true;
mutex_lock(&phandle->phandle_lock);
list_add(&event->list, &phandle->event_list);
mutex_unlock(&phandle->phandle_lock);
return event;
}
void dpu_power_handle_unregister_event(
struct dpu_power_handle *phandle,
struct dpu_power_event *event)
{
if (!phandle || !event) {
pr_err("invalid phandle or event\n");
} else if (!event->active) {
pr_err("power handle deinit already done\n");
kfree(event);
} else {
mutex_lock(&phandle->phandle_lock);
list_del_init(&event->list);
mutex_unlock(&phandle->phandle_lock);
kfree(event);
}
}

View File

@ -0,0 +1,225 @@
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _DPU_POWER_HANDLE_H_
#define _DPU_POWER_HANDLE_H_
#define MAX_CLIENT_NAME_LEN 128
#define DPU_POWER_HANDLE_ENABLE_BUS_AB_QUOTA 0
#define DPU_POWER_HANDLE_DISABLE_BUS_AB_QUOTA 0
#define DPU_POWER_HANDLE_ENABLE_BUS_IB_QUOTA 1600000000
#define DPU_POWER_HANDLE_DISABLE_BUS_IB_QUOTA 0
#include "dpu_io_util.h"
/* event will be triggered before power handler disable */
#define DPU_POWER_EVENT_PRE_DISABLE 0x1
/* event will be triggered after power handler disable */
#define DPU_POWER_EVENT_POST_DISABLE 0x2
/* event will be triggered before power handler enable */
#define DPU_POWER_EVENT_PRE_ENABLE 0x4
/* event will be triggered after power handler enable */
#define DPU_POWER_EVENT_POST_ENABLE 0x8
/**
* mdss_bus_vote_type: register bus vote type
* VOTE_INDEX_DISABLE: removes the client vote
* VOTE_INDEX_LOW: keeps the lowest vote for register bus
* VOTE_INDEX_MAX: invalid
*/
enum mdss_bus_vote_type {
VOTE_INDEX_DISABLE,
VOTE_INDEX_LOW,
VOTE_INDEX_MAX,
};
/**
* enum dpu_power_handle_data_bus_client - type of axi bus clients
* @DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT: core real-time bus client
* @DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT: core non-real-time bus client
* @DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX: maximum number of bus client type
*/
enum dpu_power_handle_data_bus_client {
DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT,
DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT,
DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX
};
/**
* enum DPU_POWER_HANDLE_DBUS_ID - data bus identifier
* @DPU_POWER_HANDLE_DBUS_ID_MNOC: DPU/MNOC data bus
* @DPU_POWER_HANDLE_DBUS_ID_LLCC: MNOC/LLCC data bus
* @DPU_POWER_HANDLE_DBUS_ID_EBI: LLCC/EBI data bus
*/
enum DPU_POWER_HANDLE_DBUS_ID {
DPU_POWER_HANDLE_DBUS_ID_MNOC,
DPU_POWER_HANDLE_DBUS_ID_LLCC,
DPU_POWER_HANDLE_DBUS_ID_EBI,
DPU_POWER_HANDLE_DBUS_ID_MAX,
};
/**
* struct dpu_power_client: stores the power client for dpu driver
* @name: name of the client
* @usecase_ndx: current regs bus vote type
* @refcount: current refcount if multiple modules are using same
* same client for enable/disable. Power module will
* aggregate the refcount and vote accordingly for this
* client.
* @id: assigned during create. helps for debugging.
* @list: list to attach power handle master list
* @ab: arbitrated bandwidth for each bus client
* @ib: instantaneous bandwidth for each bus client
* @active: inidcates the state of dpu power handle
*/
struct dpu_power_client {
char name[MAX_CLIENT_NAME_LEN];
short usecase_ndx;
short refcount;
u32 id;
struct list_head list;
u64 ab[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
u64 ib[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
bool active;
};
/*
* struct dpu_power_event - local event registration structure
* @client_name: name of the client registering
* @cb_fnc: pointer to desired callback function
* @usr: user pointer to pass to callback event trigger
* @event: refer to DPU_POWER_HANDLE_EVENT_*
* @list: list to attach event master list
* @active: indicates the state of dpu power handle
*/
struct dpu_power_event {
char client_name[MAX_CLIENT_NAME_LEN];
void (*cb_fnc)(u32 event_type, void *usr);
void *usr;
u32 event_type;
struct list_head list;
bool active;
};
/**
* struct dpu_power_handle: power handle main struct
* @client_clist: master list to store all clients
* @phandle_lock: lock to synchronize the enable/disable
* @dev: pointer to device structure
* @usecase_ndx: current usecase index
* @event_list: current power handle event list
*/
struct dpu_power_handle {
struct list_head power_client_clist;
struct mutex phandle_lock;
struct device *dev;
u32 current_usecase_ndx;
struct list_head event_list;
};
/**
* dpu_power_resource_init() - initializes the dpu power handle
* @pdev: platform device to search the power resources
* @pdata: power handle to store the power resources
*/
void dpu_power_resource_init(struct platform_device *pdev,
struct dpu_power_handle *pdata);
/**
* dpu_power_resource_deinit() - release the dpu power handle
* @pdev: platform device for power resources
* @pdata: power handle containing the resources
*
* Return: error code.
*/
void dpu_power_resource_deinit(struct platform_device *pdev,
struct dpu_power_handle *pdata);
/**
* dpu_power_client_create() - create the client on power handle
* @pdata: power handle containing the resources
* @client_name: new client name for registration
*
* Return: error code.
*/
struct dpu_power_client *dpu_power_client_create(struct dpu_power_handle *pdata,
char *client_name);
/**
* dpu_power_client_destroy() - destroy the client on power handle
* @pdata: power handle containing the resources
* @client_name: new client name for registration
*
* Return: none
*/
void dpu_power_client_destroy(struct dpu_power_handle *phandle,
struct dpu_power_client *client);
/**
* dpu_power_resource_enable() - enable/disable the power resources
* @pdata: power handle containing the resources
* @client: client information to enable/disable its vote
* @enable: boolean request for enable/disable
*
* Return: error code.
*/
int dpu_power_resource_enable(struct dpu_power_handle *pdata,
struct dpu_power_client *pclient, bool enable);
/**
* dpu_power_data_bus_bandwidth_ctrl() - control data bus bandwidth enable
* @phandle: power handle containing the resources
* @client: client information to bandwidth control
* @enable: true to enable bandwidth for data base
*
* Return: none
*/
void dpu_power_data_bus_bandwidth_ctrl(struct dpu_power_handle *phandle,
struct dpu_power_client *pclient, int enable);
/**
* dpu_power_handle_register_event - register a callback function for an event.
* Clients can register for multiple events with a single register.
* Any block with access to phandle can register for the event
* notification.
* @phandle: power handle containing the resources
* @event_type: event type to register; refer DPU_POWER_HANDLE_EVENT_*
* @cb_fnc: pointer to desired callback function
* @usr: user pointer to pass to callback on event trigger
*
* Return: event pointer if success, or error code otherwise
*/
struct dpu_power_event *dpu_power_handle_register_event(
struct dpu_power_handle *phandle,
u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
void *usr, char *client_name);
/**
* dpu_power_handle_unregister_event - unregister callback for event(s)
* @phandle: power handle containing the resources
* @event: event pointer returned after power handle register
*/
void dpu_power_handle_unregister_event(struct dpu_power_handle *phandle,
struct dpu_power_event *event);
/**
* dpu_power_handle_get_dbus_name - get name of given data bus identifier
* @bus_id: data bus identifier
* Return: Pointer to name string if success; NULL otherwise
*/
const char *dpu_power_handle_get_dbus_name(u32 bus_id);
#endif /* _DPU_POWER_HANDLE_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,199 @@
/*
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __DPU_RM_H__
#define __DPU_RM_H__
#include <linux/list.h>
#include "msm_kms.h"
#include "dpu_hw_top.h"
/**
* enum dpu_rm_topology_name - HW resource use case in use by connector
* @DPU_RM_TOPOLOGY_NONE: No topology in use currently
* @DPU_RM_TOPOLOGY_SINGLEPIPE: 1 LM, 1 PP, 1 INTF/WB
* @DPU_RM_TOPOLOGY_DUALPIPE: 2 LM, 2 PP, 2 INTF/WB
* @DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE: 2 LM, 2 PP, 3DMux, 1 INTF/WB
*/
enum dpu_rm_topology_name {
DPU_RM_TOPOLOGY_NONE = 0,
DPU_RM_TOPOLOGY_SINGLEPIPE,
DPU_RM_TOPOLOGY_DUALPIPE,
DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE,
DPU_RM_TOPOLOGY_MAX,
};
/**
* enum dpu_rm_topology_control - HW resource use case in use by connector
* @DPU_RM_TOPCTL_RESERVE_LOCK: If set, in AtomicTest phase, after a successful
* test, reserve the resources for this display.
* Normal behavior would not impact the reservation
* list during the AtomicTest phase.
* @DPU_RM_TOPCTL_RESERVE_CLEAR: If set, in AtomicTest phase, before testing,
* release any reservation held by this display.
* Normal behavior would not impact the
* reservation list during the AtomicTest phase.
* @DPU_RM_TOPCTL_DS : Require layer mixers with DS capabilities
*/
enum dpu_rm_topology_control {
DPU_RM_TOPCTL_RESERVE_LOCK,
DPU_RM_TOPCTL_RESERVE_CLEAR,
DPU_RM_TOPCTL_DS,
};
/**
* struct dpu_rm - DPU dynamic hardware resource manager
* @dev: device handle for event logging purposes
* @rsvps: list of hardware reservations by each crtc->encoder->connector
* @hw_blks: array of lists of hardware resources present in the system, one
* list per type of hardware block
* @hw_mdp: hardware object for mdp_top
* @lm_max_width: cached layer mixer maximum width
* @rsvp_next_seq: sequence number for next reservation for debugging purposes
* @rm_lock: resource manager mutex
*/
struct dpu_rm {
struct drm_device *dev;
struct list_head rsvps;
struct list_head hw_blks[DPU_HW_BLK_MAX];
struct dpu_hw_mdp *hw_mdp;
uint32_t lm_max_width;
uint32_t rsvp_next_seq;
struct mutex rm_lock;
};
/**
* struct dpu_rm_hw_blk - resource manager internal structure
* forward declaration for single iterator definition without void pointer
*/
struct dpu_rm_hw_blk;
/**
* struct dpu_rm_hw_iter - iterator for use with dpu_rm
* @hw: dpu_hw object requested, or NULL on failure
* @blk: dpu_rm internal block representation. Clients ignore. Used as iterator.
* @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
* @type: Hardware Block Type client wishes to search for.
*/
struct dpu_rm_hw_iter {
void *hw;
struct dpu_rm_hw_blk *blk;
uint32_t enc_id;
enum dpu_hw_blk_type type;
};
/**
* dpu_rm_init - Read hardware catalog and create reservation tracking objects
* for all HW blocks.
* @rm: DPU Resource Manager handle
* @cat: Pointer to hardware catalog
* @mmio: mapped register io address of MDP
* @dev: device handle for event logging purposes
* @Return: 0 on Success otherwise -ERROR
*/
int dpu_rm_init(struct dpu_rm *rm,
struct dpu_mdss_cfg *cat,
void *mmio,
struct drm_device *dev);
/**
* dpu_rm_destroy - Free all memory allocated by dpu_rm_init
* @rm: DPU Resource Manager handle
* @Return: 0 on Success otherwise -ERROR
*/
int dpu_rm_destroy(struct dpu_rm *rm);
/**
* dpu_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze
* the use connections and user requirements, specified through related
* topology control properties, and reserve hardware blocks to that
* display chain.
* HW blocks can then be accessed through dpu_rm_get_* functions.
* HW Reservations should be released via dpu_rm_release_hw.
* @rm: DPU Resource Manager handle
* @drm_enc: DRM Encoder handle
* @crtc_state: Proposed Atomic DRM CRTC State handle
* @conn_state: Proposed Atomic DRM Connector State handle
* @topology: Pointer to topology info for the display
* @test_only: Atomic-Test phase, discard results (unless property overrides)
* @Return: 0 on Success otherwise -ERROR
*/
int dpu_rm_reserve(struct dpu_rm *rm,
struct drm_encoder *drm_enc,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
struct msm_display_topology topology,
bool test_only);
/**
* dpu_rm_reserve - Given the encoder for the display chain, release any
* HW blocks previously reserved for that use case.
* @rm: DPU Resource Manager handle
* @enc: DRM Encoder handle
* @Return: 0 on Success otherwise -ERROR
*/
void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc);
/**
* dpu_rm_get_mdp - Retrieve HW block for MDP TOP.
* This is never reserved, and is usable by any display.
* @rm: DPU Resource Manager handle
* @Return: Pointer to hw block or NULL
*/
struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm);
/**
* dpu_rm_init_hw_iter - setup given iterator for new iteration over hw list
* using dpu_rm_get_hw
* @iter: iter object to initialize
* @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
* @type: Hardware Block Type client wishes to search for.
*/
void dpu_rm_init_hw_iter(
struct dpu_rm_hw_iter *iter,
uint32_t enc_id,
enum dpu_hw_blk_type type);
/**
* dpu_rm_get_hw - retrieve reserved hw object given encoder and hw type
* Meant to do a single pass through the hardware list to iteratively
* retrieve hardware blocks of a given type for a given encoder.
* Initialize an iterator object.
* Set hw block type of interest. Set encoder id of interest, 0 for any.
* Function returns first hw of type for that encoder.
* Subsequent calls will return the next reserved hw of that type in-order.
* Iterator HW pointer will be null on failure to find hw.
* @rm: DPU Resource Manager handle
* @iter: iterator object
* @Return: true on match found, false on no match found
*/
bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *iter);
/**
* dpu_rm_check_property_topctl - validate property bitmask before it is set
* @val: user's proposed topology control bitmask
* @Return: 0 on success or error
*/
int dpu_rm_check_property_topctl(uint64_t val);
/**
* dpu_rm_get_topology_name - returns the name of the the given topology
* definition
* @topology: topology definition
* @Return: name of the topology
*/
enum dpu_rm_topology_name
dpu_rm_get_topology_name(struct msm_display_topology topology);
#endif /* __DPU_RM_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,384 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include <linux/debugfs.h>
#include "dpu_vbif.h"
#include "dpu_hw_vbif.h"
#include "dpu_trace.h"
/**
* _dpu_vbif_wait_for_xin_halt - wait for the xin to halt
* @vbif: Pointer to hardware vbif driver
* @xin_id: Client interface identifier
* @return: 0 if success; error code otherwise
*/
static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id)
{
ktime_t timeout;
bool status;
int rc;
if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) {
DPU_ERROR("invalid arguments vbif %d\n", vbif != 0);
return -EINVAL;
}
timeout = ktime_add_us(ktime_get(), vbif->cap->xin_halt_timeout);
for (;;) {
status = vbif->ops.get_halt_ctrl(vbif, xin_id);
if (status)
break;
if (ktime_compare_safe(ktime_get(), timeout) > 0) {
status = vbif->ops.get_halt_ctrl(vbif, xin_id);
break;
}
usleep_range(501, 1000);
}
if (!status) {
rc = -ETIMEDOUT;
DPU_ERROR("VBIF %d client %d not halting. TIMEDOUT.\n",
vbif->idx - VBIF_0, xin_id);
} else {
rc = 0;
DPU_DEBUG("VBIF %d client %d is halted\n",
vbif->idx - VBIF_0, xin_id);
}
return rc;
}
/**
* _dpu_vbif_apply_dynamic_ot_limit - determine OT based on usecase parameters
* @vbif: Pointer to hardware vbif driver
* @ot_lim: Pointer to OT limit to be modified
* @params: Pointer to usecase parameters
*/
static void _dpu_vbif_apply_dynamic_ot_limit(struct dpu_hw_vbif *vbif,
u32 *ot_lim, struct dpu_vbif_set_ot_params *params)
{
u64 pps;
const struct dpu_vbif_dynamic_ot_tbl *tbl;
u32 i;
if (!vbif || !(vbif->cap->features & BIT(DPU_VBIF_QOS_OTLIM)))
return;
/* Dynamic OT setting done only for WFD */
if (!params->is_wfd)
return;
pps = params->frame_rate;
pps *= params->width;
pps *= params->height;
tbl = params->rd ? &vbif->cap->dynamic_ot_rd_tbl :
&vbif->cap->dynamic_ot_wr_tbl;
for (i = 0; i < tbl->count; i++) {
if (pps <= tbl->cfg[i].pps) {
*ot_lim = tbl->cfg[i].ot_limit;
break;
}
}
DPU_DEBUG("vbif:%d xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
vbif->idx - VBIF_0, params->xin_id,
params->width, params->height, params->frame_rate,
pps, *ot_lim);
}
/**
* _dpu_vbif_get_ot_limit - get OT based on usecase & configuration parameters
* @vbif: Pointer to hardware vbif driver
* @params: Pointer to usecase parameters
* @return: OT limit
*/
static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif,
struct dpu_vbif_set_ot_params *params)
{
u32 ot_lim = 0;
u32 val;
if (!vbif || !vbif->cap) {
DPU_ERROR("invalid arguments vbif %d\n", vbif != 0);
return -EINVAL;
}
if (vbif->cap->default_ot_wr_limit && !params->rd)
ot_lim = vbif->cap->default_ot_wr_limit;
else if (vbif->cap->default_ot_rd_limit && params->rd)
ot_lim = vbif->cap->default_ot_rd_limit;
/*
* If default ot is not set from dt/catalog,
* then do not configure it.
*/
if (ot_lim == 0)
goto exit;
/* Modify the limits if the target and the use case requires it */
_dpu_vbif_apply_dynamic_ot_limit(vbif, &ot_lim, params);
if (vbif && vbif->ops.get_limit_conf) {
val = vbif->ops.get_limit_conf(vbif,
params->xin_id, params->rd);
if (val == ot_lim)
ot_lim = 0;
}
exit:
DPU_DEBUG("vbif:%d xin:%d ot_lim:%d\n",
vbif->idx - VBIF_0, params->xin_id, ot_lim);
return ot_lim;
}
/**
* dpu_vbif_set_ot_limit - set OT based on usecase & configuration parameters
* @vbif: Pointer to hardware vbif driver
* @params: Pointer to usecase parameters
*
* Note this function would block waiting for bus halt.
*/
void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
struct dpu_vbif_set_ot_params *params)
{
struct dpu_hw_vbif *vbif = NULL;
struct dpu_hw_mdp *mdp;
bool forced_on = false;
u32 ot_lim;
int ret, i;
if (!dpu_kms) {
DPU_ERROR("invalid arguments\n");
return;
}
mdp = dpu_kms->hw_mdp;
for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
if (dpu_kms->hw_vbif[i] &&
dpu_kms->hw_vbif[i]->idx == params->vbif_idx)
vbif = dpu_kms->hw_vbif[i];
}
if (!vbif || !mdp) {
DPU_DEBUG("invalid arguments vbif %d mdp %d\n",
vbif != 0, mdp != 0);
return;
}
if (!mdp->ops.setup_clk_force_ctrl ||
!vbif->ops.set_limit_conf ||
!vbif->ops.set_halt_ctrl)
return;
/* set write_gather_en for all write clients */
if (vbif->ops.set_write_gather_en && !params->rd)
vbif->ops.set_write_gather_en(vbif, params->xin_id);
ot_lim = _dpu_vbif_get_ot_limit(vbif, params) & 0xFF;
if (ot_lim == 0)
goto exit;
trace_dpu_perf_set_ot(params->num, params->xin_id, ot_lim,
params->vbif_idx);
forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
vbif->ops.set_limit_conf(vbif, params->xin_id, params->rd, ot_lim);
vbif->ops.set_halt_ctrl(vbif, params->xin_id, true);
ret = _dpu_vbif_wait_for_xin_halt(vbif, params->xin_id);
if (ret)
trace_dpu_vbif_wait_xin_halt_fail(vbif->idx, params->xin_id);
vbif->ops.set_halt_ctrl(vbif, params->xin_id, false);
if (forced_on)
mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
exit:
return;
}
void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
struct dpu_vbif_set_qos_params *params)
{
struct dpu_hw_vbif *vbif = NULL;
struct dpu_hw_mdp *mdp;
bool forced_on = false;
const struct dpu_vbif_qos_tbl *qos_tbl;
int i;
if (!dpu_kms || !params || !dpu_kms->hw_mdp) {
DPU_ERROR("invalid arguments\n");
return;
}
mdp = dpu_kms->hw_mdp;
for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
if (dpu_kms->hw_vbif[i] &&
dpu_kms->hw_vbif[i]->idx == params->vbif_idx) {
vbif = dpu_kms->hw_vbif[i];
break;
}
}
if (!vbif || !vbif->cap) {
DPU_ERROR("invalid vbif %d\n", params->vbif_idx);
return;
}
if (!vbif->ops.set_qos_remap || !mdp->ops.setup_clk_force_ctrl) {
DPU_DEBUG("qos remap not supported\n");
return;
}
qos_tbl = params->is_rt ? &vbif->cap->qos_rt_tbl :
&vbif->cap->qos_nrt_tbl;
if (!qos_tbl->npriority_lvl || !qos_tbl->priority_lvl) {
DPU_DEBUG("qos tbl not defined\n");
return;
}
forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
for (i = 0; i < qos_tbl->npriority_lvl; i++) {
DPU_DEBUG("vbif:%d xin:%d lvl:%d/%d\n",
params->vbif_idx, params->xin_id, i,
qos_tbl->priority_lvl[i]);
vbif->ops.set_qos_remap(vbif, params->xin_id, i,
qos_tbl->priority_lvl[i]);
}
if (forced_on)
mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
}
void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms)
{
struct dpu_hw_vbif *vbif;
u32 i, pnd, src;
if (!dpu_kms) {
DPU_ERROR("invalid argument\n");
return;
}
for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
vbif = dpu_kms->hw_vbif[i];
if (vbif && vbif->ops.clear_errors) {
vbif->ops.clear_errors(vbif, &pnd, &src);
if (pnd || src) {
DRM_DEBUG_KMS("VBIF %d: pnd 0x%X, src 0x%X\n",
vbif->idx - VBIF_0, pnd, src);
}
}
}
}
void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms)
{
struct dpu_hw_vbif *vbif;
int i, j;
if (!dpu_kms) {
DPU_ERROR("invalid argument\n");
return;
}
for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
vbif = dpu_kms->hw_vbif[i];
if (vbif && vbif->cap && vbif->ops.set_mem_type) {
for (j = 0; j < vbif->cap->memtype_count; j++)
vbif->ops.set_mem_type(
vbif, j, vbif->cap->memtype[j]);
}
}
}
#ifdef CONFIG_DEBUG_FS
void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
{
debugfs_remove_recursive(dpu_kms->debugfs_vbif);
dpu_kms->debugfs_vbif = NULL;
}
int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
{
char vbif_name[32];
struct dentry *debugfs_vbif;
int i, j;
dpu_kms->debugfs_vbif = debugfs_create_dir("vbif", debugfs_root);
if (!dpu_kms->debugfs_vbif) {
DPU_ERROR("failed to create vbif debugfs\n");
return -EINVAL;
}
for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
debugfs_vbif = debugfs_create_dir(vbif_name,
dpu_kms->debugfs_vbif);
debugfs_create_u32("features", 0600, debugfs_vbif,
(u32 *)&vbif->features);
debugfs_create_u32("xin_halt_timeout", 0400, debugfs_vbif,
(u32 *)&vbif->xin_halt_timeout);
debugfs_create_u32("default_rd_ot_limit", 0400, debugfs_vbif,
(u32 *)&vbif->default_ot_rd_limit);
debugfs_create_u32("default_wr_ot_limit", 0400, debugfs_vbif,
(u32 *)&vbif->default_ot_wr_limit);
for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
struct dpu_vbif_dynamic_ot_cfg *cfg =
&vbif->dynamic_ot_rd_tbl.cfg[j];
snprintf(vbif_name, sizeof(vbif_name),
"dynamic_ot_rd_%d_pps", j);
debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
(u64 *)&cfg->pps);
snprintf(vbif_name, sizeof(vbif_name),
"dynamic_ot_rd_%d_ot_limit", j);
debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
(u32 *)&cfg->ot_limit);
}
for (j = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) {
struct dpu_vbif_dynamic_ot_cfg *cfg =
&vbif->dynamic_ot_wr_tbl.cfg[j];
snprintf(vbif_name, sizeof(vbif_name),
"dynamic_ot_wr_%d_pps", j);
debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
(u64 *)&cfg->pps);
snprintf(vbif_name, sizeof(vbif_name),
"dynamic_ot_wr_%d_ot_limit", j);
debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
(u32 *)&cfg->ot_limit);
}
}
return 0;
}
#endif

View File

@ -0,0 +1,94 @@
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __DPU_VBIF_H__
#define __DPU_VBIF_H__
#include "dpu_kms.h"
struct dpu_vbif_set_ot_params {
u32 xin_id;
u32 num;
u32 width;
u32 height;
u32 frame_rate;
bool rd;
bool is_wfd;
u32 vbif_idx;
u32 clk_ctrl;
};
struct dpu_vbif_set_memtype_params {
u32 xin_id;
u32 vbif_idx;
u32 clk_ctrl;
bool is_cacheable;
};
/**
* struct dpu_vbif_set_qos_params - QoS remapper parameter
* @vbif_idx: vbif identifier
* @xin_id: client interface identifier
* @clk_ctrl: clock control identifier of the xin
* @num: pipe identifier (debug only)
* @is_rt: true if pipe is used in real-time use case
*/
struct dpu_vbif_set_qos_params {
u32 vbif_idx;
u32 xin_id;
u32 clk_ctrl;
u32 num;
bool is_rt;
};
/**
* dpu_vbif_set_ot_limit - set OT limit for vbif client
* @dpu_kms: DPU handler
* @params: Pointer to OT configuration parameters
*/
void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
struct dpu_vbif_set_ot_params *params);
/**
* dpu_vbif_set_qos_remap - set QoS priority level remap
* @dpu_kms: DPU handler
* @params: Pointer to QoS configuration parameters
*/
void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
struct dpu_vbif_set_qos_params *params);
/**
* dpu_vbif_clear_errors - clear any vbif errors
* @dpu_kms: DPU handler
*/
void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms);
/**
* dpu_vbif_init_memtypes - initialize xin memory types for vbif
* @dpu_kms: DPU handler
*/
void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms);
#ifdef CONFIG_DEBUG_FS
int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root);
void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms);
#else
static inline int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms,
struct dentry *debugfs_root)
{
return 0;
}
static inline void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
{
}
#endif
#endif /* __DPU_VBIF_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,5 @@
/*
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@ -15,6 +16,8 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kthread.h>
#include <uapi/linux/sched/types.h>
#include <drm/drm_of.h>
#include "msm_drv.h"
@ -149,7 +152,7 @@ struct vblank_event {
bool enable;
};
static void vblank_ctrl_worker(struct work_struct *work)
static void vblank_ctrl_worker(struct kthread_work *work)
{
struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
struct msm_vblank_ctrl, work);
@ -197,7 +200,8 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
queue_work(priv->wq, &vbl_ctrl->work);
kthread_queue_work(&priv->disp_thread[crtc_id].worker,
&vbl_ctrl->work);
return 0;
}
@ -211,17 +215,33 @@ static int msm_drm_uninit(struct device *dev)
struct msm_mdss *mdss = priv->mdss;
struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
struct vblank_event *vbl_ev, *tmp;
int i;
/* We must cancel and cleanup any pending vblank enable/disable
* work before drm_irq_uninstall() to avoid work re-enabling an
* irq after uninstall has disabled it.
*/
cancel_work_sync(&vbl_ctrl->work);
kthread_flush_work(&vbl_ctrl->work);
list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
list_del(&vbl_ev->node);
kfree(vbl_ev);
}
/* clean up display commit/event worker threads */
for (i = 0; i < priv->num_crtcs; i++) {
if (priv->disp_thread[i].thread) {
kthread_flush_worker(&priv->disp_thread[i].worker);
kthread_stop(priv->disp_thread[i].thread);
priv->disp_thread[i].thread = NULL;
}
if (priv->event_thread[i].thread) {
kthread_flush_worker(&priv->event_thread[i].worker);
kthread_stop(priv->event_thread[i].thread);
priv->event_thread[i].thread = NULL;
}
}
msm_gem_shrinker_cleanup(ddev);
drm_kms_helper_poll_fini(ddev);
@ -269,6 +289,7 @@ static int msm_drm_uninit(struct device *dev)
#define KMS_MDP4 4
#define KMS_MDP5 5
#define KMS_DPU 3
static int get_mdp_ver(struct platform_device *pdev)
{
@ -360,7 +381,8 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
struct msm_drm_private *priv;
struct msm_kms *kms;
struct msm_mdss *mdss;
int ret;
int ret, i;
struct sched_param param;
ddev = drm_dev_alloc(drv, dev);
if (IS_ERR(ddev)) {
@ -379,7 +401,17 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
ddev->dev_private = priv;
priv->dev = ddev;
ret = mdp5_mdss_init(ddev);
switch (get_mdp_ver(pdev)) {
case KMS_MDP5:
ret = mdp5_mdss_init(ddev);
break;
case KMS_DPU:
ret = dpu_mdss_init(ddev);
break;
default:
ret = 0;
break;
}
if (ret)
goto err_free_priv;
@ -389,7 +421,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
INIT_LIST_HEAD(&priv->inactive_list);
INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
spin_lock_init(&priv->vblank_ctrl.lock);
drm_mode_config_init(ddev);
@ -413,6 +445,10 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
case KMS_MDP5:
kms = mdp5_kms_init(ddev);
break;
case KMS_DPU:
kms = dpu_kms_init(ddev);
priv->kms = kms;
break;
default:
kms = ERR_PTR(-ENODEV);
break;
@ -444,6 +480,79 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
ddev->mode_config.funcs = &mode_config_funcs;
ddev->mode_config.helper_private = &mode_config_helper_funcs;
/**
* this priority was found during empiric testing to have appropriate
* realtime scheduling to process display updates and interact with
* other real time and normal priority task
*/
param.sched_priority = 16;
for (i = 0; i < priv->num_crtcs; i++) {
/* initialize display thread */
priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
kthread_init_worker(&priv->disp_thread[i].worker);
priv->disp_thread[i].dev = ddev;
priv->disp_thread[i].thread =
kthread_run(kthread_worker_fn,
&priv->disp_thread[i].worker,
"crtc_commit:%d", priv->disp_thread[i].crtc_id);
ret = sched_setscheduler(priv->disp_thread[i].thread,
SCHED_FIFO, &param);
if (ret)
pr_warn("display thread priority update failed: %d\n",
ret);
if (IS_ERR(priv->disp_thread[i].thread)) {
dev_err(dev, "failed to create crtc_commit kthread\n");
priv->disp_thread[i].thread = NULL;
}
/* initialize event thread */
priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
kthread_init_worker(&priv->event_thread[i].worker);
priv->event_thread[i].dev = ddev;
priv->event_thread[i].thread =
kthread_run(kthread_worker_fn,
&priv->event_thread[i].worker,
"crtc_event:%d", priv->event_thread[i].crtc_id);
/**
* event thread should also run at same priority as disp_thread
* because it is handling frame_done events. A lower priority
* event thread and higher priority disp_thread can causes
* frame_pending counters beyond 2. This can lead to commit
* failure at crtc commit level.
*/
ret = sched_setscheduler(priv->event_thread[i].thread,
SCHED_FIFO, &param);
if (ret)
pr_warn("display event thread priority update failed: %d\n",
ret);
if (IS_ERR(priv->event_thread[i].thread)) {
dev_err(dev, "failed to create crtc_event kthread\n");
priv->event_thread[i].thread = NULL;
}
if ((!priv->disp_thread[i].thread) ||
!priv->event_thread[i].thread) {
/* clean up previously created threads if any */
for ( ; i >= 0; i--) {
if (priv->disp_thread[i].thread) {
kthread_stop(
priv->disp_thread[i].thread);
priv->disp_thread[i].thread = NULL;
}
if (priv->event_thread[i].thread) {
kthread_stop(
priv->event_thread[i].thread);
priv->event_thread[i].thread = NULL;
}
}
goto err_msm_uninit;
}
}
ret = drm_vblank_init(ddev, priv->num_crtcs);
if (ret < 0) {
dev_err(dev, "failed to initialize vblank\n");
@ -1060,12 +1169,13 @@ static int add_display_components(struct device *dev,
int ret;
/*
* MDP5 based devices don't have a flat hierarchy. There is a top level
* parent: MDSS, and children: MDP5, DSI, HDMI, eDP etc. Populate the
* children devices, find the MDP5 node, and then add the interfaces
* to our components list.
* MDP5/DPU based devices don't have a flat hierarchy. There is a top
* level parent: MDSS, and children: MDP5/DPU, DSI, HDMI, eDP etc.
* Populate the children devices, find the MDP5/DPU node, and then add
* the interfaces to our components list.
*/
if (of_device_is_compatible(dev->of_node, "qcom,mdss")) {
if (of_device_is_compatible(dev->of_node, "qcom,mdss") ||
of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss")) {
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (ret) {
dev_err(dev, "failed to populate children devices\n");
@ -1177,6 +1287,7 @@ static int msm_pdev_remove(struct platform_device *pdev)
static const struct of_device_id dt_match[] = {
{ .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 },
{ .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
{ .compatible = "qcom,sdm845-mdss", .data = (void *)KMS_DPU },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
@ -1198,6 +1309,7 @@ static int __init msm_drm_register(void)
DBG("init");
msm_mdp_register();
msm_dpu_register();
msm_dsi_register();
msm_edp_register();
msm_hdmi_register();
@ -1214,6 +1326,7 @@ static void __exit msm_drm_unregister(void)
msm_edp_unregister();
msm_dsi_unregister();
msm_mdp_unregister();
msm_dpu_unregister();
}
module_init(msm_drm_register);

View File

@ -1,4 +1,5 @@
/*
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@ -33,6 +34,7 @@
#include <linux/of_graph.h>
#include <linux/of_device.h>
#include <asm/sizes.h>
#include <linux/kthread.h>
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
@ -55,7 +57,7 @@ struct msm_gem_address_space;
struct msm_gem_vma;
#define MAX_CRTCS 8
#define MAX_PLANES 16
#define MAX_PLANES 20
#define MAX_ENCODERS 8
#define MAX_BRIDGES 8
#define MAX_CONNECTORS 8
@ -74,12 +76,77 @@ enum msm_mdp_plane_property {
};
struct msm_vblank_ctrl {
struct work_struct work;
struct kthread_work work;
struct list_head event_list;
spinlock_t lock;
};
#define MSM_GPU_MAX_RINGS 4
#define MAX_H_TILES_PER_DISPLAY 2
/**
* enum msm_display_caps - features/capabilities supported by displays
* @MSM_DISPLAY_CAP_VID_MODE: Video or "active" mode supported
* @MSM_DISPLAY_CAP_CMD_MODE: Command mode supported
* @MSM_DISPLAY_CAP_HOT_PLUG: Hot plug detection supported
* @MSM_DISPLAY_CAP_EDID: EDID supported
*/
enum msm_display_caps {
MSM_DISPLAY_CAP_VID_MODE = BIT(0),
MSM_DISPLAY_CAP_CMD_MODE = BIT(1),
MSM_DISPLAY_CAP_HOT_PLUG = BIT(2),
MSM_DISPLAY_CAP_EDID = BIT(3),
};
/**
* enum msm_event_wait - type of HW events to wait for
* @MSM_ENC_COMMIT_DONE - wait for the driver to flush the registers to HW
* @MSM_ENC_TX_COMPLETE - wait for the HW to transfer the frame to panel
* @MSM_ENC_VBLANK - wait for the HW VBLANK event (for driver-internal waiters)
*/
enum msm_event_wait {
MSM_ENC_COMMIT_DONE = 0,
MSM_ENC_TX_COMPLETE,
MSM_ENC_VBLANK,
};
/**
* struct msm_display_topology - defines a display topology pipeline
* @num_lm: number of layer mixers used
* @num_enc: number of compression encoder blocks used
* @num_intf: number of interfaces the panel is mounted on
*/
struct msm_display_topology {
u32 num_lm;
u32 num_enc;
u32 num_intf;
};
/**
* struct msm_display_info - defines display properties
* @intf_type: DRM_MODE_CONNECTOR_ display type
* @capabilities: Bitmask of display flags
* @num_of_h_tiles: Number of horizontal tiles in case of split interface
* @h_tile_instance: Controller instance used per tile. Number of elements is
* based on num_of_h_tiles
* @is_te_using_watchdog_timer: Boolean to indicate watchdog TE is
* used instead of panel TE in cmd mode panels
*/
struct msm_display_info {
int intf_type;
uint32_t capabilities;
uint32_t num_of_h_tiles;
uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY];
bool is_te_using_watchdog_timer;
};
/* Commit/Event thread specific structure */
struct msm_drm_thread {
struct drm_device *dev;
struct task_struct *thread;
unsigned int crtc_id;
struct kthread_worker worker;
};
struct msm_drm_private {
@ -90,7 +157,7 @@ struct msm_drm_private {
/* subordinate devices, if present: */
struct platform_device *gpu_pdev;
/* top level MDSS wrapper device (for MDP5 only) */
/* top level MDSS wrapper device (for MDP5/DPU only) */
struct msm_mdss *mdss;
/* possibly this should be in the kms component, but it is
@ -128,6 +195,9 @@ struct msm_drm_private {
unsigned int num_crtcs;
struct drm_crtc *crtcs[MAX_CRTCS];
struct msm_drm_thread disp_thread[MAX_CRTCS];
struct msm_drm_thread event_thread[MAX_CRTCS];
unsigned int num_encoders;
struct drm_encoder *encoders[MAX_ENCODERS];
@ -180,6 +250,9 @@ struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name);
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
void msm_gem_submit_free(struct msm_gem_submit *submit);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
@ -291,6 +364,8 @@ static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
void __init msm_mdp_register(void);
void __exit msm_mdp_unregister(void);
void __init msm_dpu_register(void);
void __exit msm_dpu_unregister(void);
#ifdef CONFIG_DEBUG_FS
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);

View File

@ -1,4 +1,5 @@
/*
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@ -51,6 +52,11 @@ struct msm_kms_funcs {
const struct msm_format *(*get_format)(struct msm_kms *kms,
const uint32_t format,
const uint64_t modifiers);
/* do format checking on format modified through fb_cmd2 modifiers */
int (*check_modified_format)(const struct msm_kms *kms,
const struct msm_format *msm_fmt,
const struct drm_mode_fb_cmd2 *cmd,
struct drm_gem_object **bos);
/* misc: */
long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
struct drm_encoder *encoder);
@ -90,6 +96,7 @@ static inline void msm_kms_init(struct msm_kms *kms,
struct msm_kms *mdp4_kms_init(struct drm_device *dev);
struct msm_kms *mdp5_kms_init(struct drm_device *dev);
struct msm_kms *dpu_kms_init(struct drm_device *dev);
struct msm_mdss_funcs {
int (*enable)(struct msm_mdss *mdss);
@ -103,5 +110,6 @@ struct msm_mdss {
};
int mdp5_mdss_init(struct drm_device *dev);
int dpu_mdss_init(struct drm_device *dev);
#endif /* __MSM_KMS_H__ */