Merge tag 'drm-next-du-20170803' of git://linuxtv.org/pinchartl/media into drm-next

rcar-du updates, contains vsp1 updates as well.

* tag 'drm-next-du-20170803' of git://linuxtv.org/pinchartl/media: (24 commits)
  drm: rcar-du: Use new iterator macros
  drm: rcar-du: Repair vblank for DRM page flips using the VSP
  drm: rcar-du: Fix race condition when disabling planes at CRTC stop
  drm: rcar-du: Wait for flip completion instead of vblank in commit tail
  drm: rcar-du: Use the VBK interrupt for vblank events
  drm: rcar-du: Add HDMI outputs to R8A7796 device description
  drm: rcar-du: Remove an unneeded NULL check
  drm: rcar-du: Setup planes before enabling CRTC to avoid flicker
  drm: rcar-du: Configure DPAD0 routing through last group on Gen3
  drm: rcar-du: Restrict DPLL duty cycle workaround to H3 ES1.x
  drm: rcar-du: Support multiple sources from the same VSP
  drm: rcar-du: Fix comments to comply with the kernel coding style
  drm: rcar-du: Use of_graph_get_remote_endpoint()
  v4l: vsp1: Add support for header display lists in continuous mode
  v4l: vsp1: Add support for multiple DRM pipelines
  v4l: vsp1: Add support for multiple LIF instances
  v4l: vsp1: Add support for new VSP2-BS, VSP2-DL and VSP2-D instances
  v4l: vsp1: Add support for the BRS entity
  v4l: vsp1: Add pipe index argument to the VSP-DU API
  v4l: vsp1: Don't create links for DRM pipeline
  ...
This commit is contained in:
Dave Airlie 2017-08-04 11:41:24 +10:00
commit 9f589b20b4
29 changed files with 957 additions and 549 deletions

View File

@ -13,6 +13,7 @@
#include <linux/clk.h>
#include <linux/mutex.h>
#include <linux/sys_soc.h>
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
@ -129,10 +130,8 @@ static void rcar_du_dpll_divider(struct rcar_du_crtc *rcrtc,
for (fdpll = 1; fdpll < 32; fdpll++) {
unsigned long output;
/* 1/2 (FRQSEL=1) for duty rate 50% */
output = input * (n + 1) / (m + 1)
/ (fdpll + 1) / 2;
/ (fdpll + 1);
if (output >= 400000000)
continue;
@ -158,6 +157,11 @@ done:
best_diff);
}
static const struct soc_device_attribute rcar_du_r8a7795_es1[] = {
{ .soc_id = "r8a7795", .revision = "ES1.*" },
{ /* sentinel */ }
};
static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
{
const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode;
@ -168,7 +172,8 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
u32 escr;
u32 div;
/* Compute the clock divisor and select the internal or external dot
/*
* Compute the clock divisor and select the internal or external dot
* clock based on the requested frequency.
*/
clk = clk_get_rate(rcrtc->clock);
@ -185,7 +190,20 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
extclk = clk_get_rate(rcrtc->extclock);
if (rcdu->info->dpll_ch & (1 << rcrtc->index)) {
rcar_du_dpll_divider(rcrtc, &dpll, extclk, mode_clock);
unsigned long target = mode_clock;
/*
* The H3 ES1.x exhibits dot clock duty cycle stability
* issues. We can work around them by configuring the
* DPLL to twice the desired frequency, coupled with a
* /2 post-divider. This isn't needed on other SoCs and
* breaks HDMI output on M3-W for a currently unknown
* reason, so restrict the workaround to H3 ES1.x.
*/
if (soc_device_match(rcar_du_r8a7795_es1))
target *= 2;
rcar_du_dpll_divider(rcrtc, &dpll, extclk, target);
extclk = dpll.output;
}
@ -197,8 +215,6 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
if (abs((long)extrate - (long)mode_clock) <
abs((long)rate - (long)mode_clock)) {
dev_dbg(rcrtc->group->dev->dev,
"crtc%u: using external clock\n", rcrtc->index);
if (rcdu->info->dpll_ch & (1 << rcrtc->index)) {
u32 dpllcr = DPLLCR_CODE | DPLLCR_CLKE
@ -215,12 +231,14 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
rcar_du_group_write(rcrtc->group, DPLLCR,
dpllcr);
escr = ESCR_DCLKSEL_DCLKIN | 1;
} else {
escr = ESCR_DCLKSEL_DCLKIN | extdiv;
}
escr = ESCR_DCLKSEL_DCLKIN | extdiv;
}
dev_dbg(rcrtc->group->dev->dev,
"mode clock %lu extrate %lu rate %lu ESCR 0x%08x\n",
mode_clock, extrate, rate, escr);
}
rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? ESCR2 : ESCR,
@ -261,12 +279,14 @@ void rcar_du_crtc_route_output(struct drm_crtc *crtc,
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct rcar_du_device *rcdu = rcrtc->group->dev;
/* Store the route from the CRTC output to the DU output. The DU will be
/*
* Store the route from the CRTC output to the DU output. The DU will be
* configured when starting the CRTC.
*/
rcrtc->outputs |= BIT(output);
/* Store RGB routing to DPAD0, the hardware will be configured when
/*
* Store RGB routing to DPAD0, the hardware will be configured when
* starting the CRTC.
*/
if (output == RCAR_DU_OUTPUT_DPAD0)
@ -342,7 +362,8 @@ static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc)
}
}
/* Update the planes to display timing and dot clock generator
/*
* Update the planes to display timing and dot clock generator
* associations.
*
* Updating the DPTSR register requires restarting the CRTC group,
@ -431,14 +452,8 @@ static void rcar_du_crtc_wait_page_flip(struct rcar_du_crtc *rcrtc)
* Start/Stop and Suspend/Resume
*/
static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
static void rcar_du_crtc_setup(struct rcar_du_crtc *rcrtc)
{
struct drm_crtc *crtc = &rcrtc->crtc;
bool interlaced;
if (rcrtc->started)
return;
/* Set display off and background to black */
rcar_du_crtc_write(rcrtc, DOOR, DOOR_RGB(0, 0, 0));
rcar_du_crtc_write(rcrtc, BPOR, BPOR_RGB(0, 0, 0));
@ -450,7 +465,20 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
/* Start with all planes disabled. */
rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0);
/* Select master sync mode. This enables display operation in master
/* Enable the VSP compositor. */
if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
rcar_du_vsp_enable(rcrtc);
/* Turn vertical blanking interrupt reporting on. */
drm_crtc_vblank_on(&rcrtc->crtc);
}
static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
{
bool interlaced;
/*
* Select master sync mode. This enables display operation in master
* sync mode (with the HSYNC and VSYNC signals configured as outputs and
* actively driven).
*/
@ -460,38 +488,56 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
DSYSR_TVM_MASTER);
rcar_du_group_start_stop(rcrtc->group, true);
}
/* Enable the VSP compositor. */
if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
rcar_du_vsp_enable(rcrtc);
static void rcar_du_crtc_disable_planes(struct rcar_du_crtc *rcrtc)
{
struct rcar_du_device *rcdu = rcrtc->group->dev;
struct drm_crtc *crtc = &rcrtc->crtc;
u32 status;
/* Turn vertical blanking interrupt reporting back on. */
drm_crtc_vblank_on(crtc);
/* Make sure vblank interrupts are enabled. */
drm_crtc_vblank_get(crtc);
rcrtc->started = true;
/*
* Disable planes and calculate how many vertical blanking interrupts we
* have to wait for. If a vertical blanking interrupt has been triggered
* but not processed yet, we don't know whether it occurred before or
* after the planes got disabled. We thus have to wait for two vblank
* interrupts in that case.
*/
spin_lock_irq(&rcrtc->vblank_lock);
rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0);
status = rcar_du_crtc_read(rcrtc, DSSR);
rcrtc->vblank_count = status & DSSR_VBK ? 2 : 1;
spin_unlock_irq(&rcrtc->vblank_lock);
if (!wait_event_timeout(rcrtc->vblank_wait, rcrtc->vblank_count == 0,
msecs_to_jiffies(100)))
dev_warn(rcdu->dev, "vertical blanking timeout\n");
drm_crtc_vblank_put(crtc);
}
static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
{
struct drm_crtc *crtc = &rcrtc->crtc;
if (!rcrtc->started)
return;
/* Disable all planes and wait for the change to take effect. This is
* required as the DSnPR registers are updated on vblank, and no vblank
* will occur once the CRTC is stopped. Disabling planes when starting
* the CRTC thus wouldn't be enough as it would start scanning out
* immediately from old frame buffers until the next vblank.
/*
* Disable all planes and wait for the change to take effect. This is
* required as the plane enable registers are updated on vblank, and no
* vblank will occur once the CRTC is stopped. Disabling planes when
* starting the CRTC thus wouldn't be enough as it would start scanning
* out immediately from old frame buffers until the next vblank.
*
* This increases the CRTC stop delay, especially when multiple CRTCs
* are stopped in one operation as we now wait for one vblank per CRTC.
* Whether this can be improved needs to be researched.
*/
rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0);
drm_crtc_wait_one_vblank(crtc);
rcar_du_crtc_disable_planes(rcrtc);
/* Disable vertical blanking interrupt reporting. We first need to wait
/*
* Disable vertical blanking interrupt reporting. We first need to wait
* for page flip completion before stopping the CRTC as userspace
* expects page flips to eventually complete.
*/
@ -502,14 +548,13 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
rcar_du_vsp_disable(rcrtc);
/* Select switch sync mode. This stops display operation and configures
/*
* Select switch sync mode. This stops display operation and configures
* the HSYNC and VSYNC signals as inputs.
*/
rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_SWITCH);
rcar_du_group_start_stop(rcrtc->group, false);
rcrtc->started = false;
}
void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc)
@ -529,12 +574,10 @@ void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc)
return;
rcar_du_crtc_get(rcrtc);
rcar_du_crtc_start(rcrtc);
rcar_du_crtc_setup(rcrtc);
/* Commit the planes state. */
if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) {
rcar_du_vsp_enable(rcrtc);
} else {
if (!rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) {
for (i = 0; i < rcrtc->group->num_planes; ++i) {
struct rcar_du_plane *plane = &rcrtc->group->planes[i];
@ -546,6 +589,7 @@ void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc)
}
rcar_du_crtc_update_planes(rcrtc);
rcar_du_crtc_start(rcrtc);
}
/* -----------------------------------------------------------------------------
@ -557,7 +601,16 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
rcar_du_crtc_get(rcrtc);
/*
* If the CRTC has already been setup by the .atomic_begin() handler we
* can skip the setup stage.
*/
if (!rcrtc->initialized) {
rcar_du_crtc_get(rcrtc);
rcar_du_crtc_setup(rcrtc);
rcrtc->initialized = true;
}
rcar_du_crtc_start(rcrtc);
}
@ -576,6 +629,7 @@ static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
}
spin_unlock_irq(&crtc->dev->event_lock);
rcrtc->initialized = false;
rcrtc->outputs = 0;
}
@ -584,6 +638,19 @@ static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
WARN_ON(!crtc->state->enable);
/*
* If a mode set is in progress we can be called with the CRTC disabled.
* We then need to first setup the CRTC in order to configure planes.
* The .atomic_enable() handler will notice and skip the CRTC setup.
*/
if (!rcrtc->initialized) {
rcar_du_crtc_get(rcrtc);
rcar_du_crtc_setup(rcrtc);
rcrtc->initialized = true;
}
if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
rcar_du_vsp_atomic_begin(rcrtc);
}
@ -623,6 +690,7 @@ static int rcar_du_crtc_enable_vblank(struct drm_crtc *crtc)
rcar_du_crtc_write(rcrtc, DSRCR, DSRCR_VBCL);
rcar_du_crtc_set(rcrtc, DIER, DIER_VBE);
rcrtc->vblank_enable = true;
return 0;
}
@ -632,6 +700,7 @@ static void rcar_du_crtc_disable_vblank(struct drm_crtc *crtc)
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
rcar_du_crtc_clr(rcrtc, DIER, DIER_VBE);
rcrtc->vblank_enable = false;
}
static const struct drm_crtc_funcs crtc_funcs = {
@ -656,14 +725,30 @@ static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
irqreturn_t ret = IRQ_NONE;
u32 status;
spin_lock(&rcrtc->vblank_lock);
status = rcar_du_crtc_read(rcrtc, DSSR);
rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
if (status & DSSR_FRM) {
drm_crtc_handle_vblank(&rcrtc->crtc);
if (status & DSSR_VBK) {
/*
* Wake up the vblank wait if the counter reaches 0. This must
* be protected by the vblank_lock to avoid races in
* rcar_du_crtc_disable_planes().
*/
if (rcrtc->vblank_count) {
if (--rcrtc->vblank_count == 0)
wake_up(&rcrtc->vblank_wait);
}
}
if (rcdu->info->gen < 3)
spin_unlock(&rcrtc->vblank_lock);
if (status & DSSR_VBK) {
if (rcdu->info->gen < 3) {
drm_crtc_handle_vblank(&rcrtc->crtc);
rcar_du_crtc_finish_page_flip(rcrtc);
}
ret = IRQ_HANDLED;
}
@ -717,13 +802,15 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
}
init_waitqueue_head(&rcrtc->flip_wait);
init_waitqueue_head(&rcrtc->vblank_wait);
spin_lock_init(&rcrtc->vblank_lock);
rcrtc->group = rgrp;
rcrtc->mmio_offset = mmio_offsets[index];
rcrtc->index = index;
if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE))
primary = &rcrtc->vsp->planes[0].plane;
primary = &rcrtc->vsp->planes[rcrtc->vsp_pipe].plane;
else
primary = &rgrp->planes[index % 2].plane;

View File

@ -15,6 +15,7 @@
#define __RCAR_DU_CRTC_H__
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <drm/drmP.h>
@ -30,11 +31,17 @@ struct rcar_du_vsp;
* @extclock: external pixel dot clock (optional)
* @mmio_offset: offset of the CRTC registers in the DU MMIO block
* @index: CRTC software and hardware index
* @started: whether the CRTC has been started and is running
* @initialized: whether the CRTC has been initialized and clocks enabled
* @vblank_enable: whether vblank events are enabled on this CRTC
* @event: event to post when the pending page flip completes
* @flip_wait: wait queue used to signal page flip completion
* @vblank_lock: protects vblank_wait and vblank_count
* @vblank_wait: wait queue used to signal vertical blanking
* @vblank_count: number of vertical blanking interrupts to wait for
* @outputs: bitmask of the outputs (enum rcar_du_output) driven by this CRTC
* @group: CRTC group this CRTC belongs to
* @vsp: VSP feeding video to this CRTC
* @vsp_pipe: index of the VSP pipeline feeding video to this CRTC
*/
struct rcar_du_crtc {
struct drm_crtc crtc;
@ -43,15 +50,21 @@ struct rcar_du_crtc {
struct clk *extclock;
unsigned int mmio_offset;
unsigned int index;
bool started;
bool initialized;
bool vblank_enable;
struct drm_pending_vblank_event *event;
wait_queue_head_t flip_wait;
spinlock_t vblank_lock;
wait_queue_head_t vblank_wait;
unsigned int vblank_count;
unsigned int outputs;
struct rcar_du_group *group;
struct rcar_du_vsp *vsp;
unsigned int vsp_pipe;
};
#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc)

View File

@ -39,7 +39,8 @@ static const struct rcar_du_device_info rcar_du_r8a7779_info = {
.features = 0,
.num_crtcs = 2,
.routes = {
/* R8A7779 has two RGB outputs and one (currently unsupported)
/*
* R8A7779 has two RGB outputs and one (currently unsupported)
* TCON output.
*/
[RCAR_DU_OUTPUT_DPAD0] = {
@ -61,7 +62,8 @@ static const struct rcar_du_device_info rcar_du_r8a7790_info = {
.quirks = RCAR_DU_QUIRK_ALIGN_128B | RCAR_DU_QUIRK_LVDS_LANES,
.num_crtcs = 3,
.routes = {
/* R8A7790 has one RGB output, two LVDS outputs and one
/*
* R8A7790 has one RGB output, two LVDS outputs and one
* (currently unsupported) TCON output.
*/
[RCAR_DU_OUTPUT_DPAD0] = {
@ -87,7 +89,8 @@ static const struct rcar_du_device_info rcar_du_r8a7791_info = {
| RCAR_DU_FEATURE_EXT_CTRL_REGS,
.num_crtcs = 2,
.routes = {
/* R8A779[13] has one RGB output, one LVDS output and one
/*
* R8A779[13] has one RGB output, one LVDS output and one
* (currently unsupported) TCON output.
*/
[RCAR_DU_OUTPUT_DPAD0] = {
@ -127,7 +130,8 @@ static const struct rcar_du_device_info rcar_du_r8a7794_info = {
| RCAR_DU_FEATURE_EXT_CTRL_REGS,
.num_crtcs = 2,
.routes = {
/* R8A7794 has two RGB outputs and one (currently unsupported)
/*
* R8A7794 has two RGB outputs and one (currently unsupported)
* TCON output.
*/
[RCAR_DU_OUTPUT_DPAD0] = {
@ -149,7 +153,8 @@ static const struct rcar_du_device_info rcar_du_r8a7795_info = {
| RCAR_DU_FEATURE_VSP1_SOURCE,
.num_crtcs = 4,
.routes = {
/* R8A7795 has one RGB output, two HDMI outputs and one
/*
* R8A7795 has one RGB output, two HDMI outputs and one
* LVDS output.
*/
[RCAR_DU_OUTPUT_DPAD0] = {
@ -180,19 +185,25 @@ static const struct rcar_du_device_info rcar_du_r8a7796_info = {
| RCAR_DU_FEATURE_VSP1_SOURCE,
.num_crtcs = 3,
.routes = {
/* R8A7796 has one RGB output, one LVDS output and one
* (currently unsupported) HDMI output.
/*
* R8A7796 has one RGB output, one LVDS output and one HDMI
* output.
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(2),
.port = 0,
},
[RCAR_DU_OUTPUT_HDMI0] = {
.possible_crtcs = BIT(1),
.port = 1,
},
[RCAR_DU_OUTPUT_LVDS0] = {
.possible_crtcs = BIT(0),
.port = 2,
},
},
.num_lvds = 1,
.dpll_ch = BIT(1),
};
static const struct of_device_id rcar_du_of_table[] = {
@ -341,7 +352,8 @@ static int rcar_du_probe(struct platform_device *pdev)
ddev->irq_enabled = 1;
/* Register the DRM device with the core and the connectors with
/*
* Register the DRM device with the core and the connectors with
* sysfs.
*/
ret = drm_dev_register(ddev, 0);

View File

@ -64,7 +64,8 @@ static void rcar_du_group_setup_defr8(struct rcar_du_group *rgrp)
if (rcdu->info->gen < 3) {
defr8 |= DEFR8_DEFE8;
/* On Gen2 the DEFR8 register for the first group also controls
/*
* On Gen2 the DEFR8 register for the first group also controls
* RGB output routing to DPAD0 and VSPD1 routing to DU0/1/2 for
* DU instances that support it.
*/
@ -75,7 +76,8 @@ static void rcar_du_group_setup_defr8(struct rcar_du_group *rgrp)
defr8 |= DEFR8_VSCS;
}
} else {
/* On Gen3 VSPD routing can't be configured, but DPAD routing
/*
* On Gen3 VSPD routing can't be configured, but DPAD routing
* needs to be set despite having a single option available.
*/
u32 crtc = ffs(possible_crtcs) - 1;
@ -124,7 +126,8 @@ static void rcar_du_group_setup(struct rcar_du_group *rgrp)
if (rcdu->info->gen >= 3)
rcar_du_group_write(rgrp, DEFR10, DEFR10_CODE | DEFR10_DEFE10);
/* Use DS1PR and DS2PR to configure planes priorities and connects the
/*
* Use DS1PR and DS2PR to configure planes priorities and connects the
* superposition 0 to DU0 pins. DU1 pins will be configured dynamically.
*/
rcar_du_group_write(rgrp, DORCR, DORCR_PG1D_DS1 | DORCR_DPRS);
@ -177,7 +180,8 @@ static void __rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start)
void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start)
{
/* Many of the configuration bits are only updated when the display
/*
* Many of the configuration bits are only updated when the display
* reset (DRES) bit in DSYSR is set to 1, disabling *both* CRTCs. Some
* of those bits could be pre-configured, but others (especially the
* bits related to plane assignment to display timing controllers) need
@ -208,23 +212,32 @@ void rcar_du_group_restart(struct rcar_du_group *rgrp)
int rcar_du_set_dpad0_vsp1_routing(struct rcar_du_device *rcdu)
{
struct rcar_du_group *rgrp;
struct rcar_du_crtc *crtc;
unsigned int index;
int ret;
if (!rcar_du_has(rcdu, RCAR_DU_FEATURE_EXT_CTRL_REGS))
return 0;
/* RGB output routing to DPAD0 and VSP1D routing to DU0/1/2 are
* configured in the DEFR8 register of the first group. As this function
* can be called with the DU0 and DU1 CRTCs disabled, we need to enable
* the first group clock before accessing the register.
/*
* RGB output routing to DPAD0 and VSP1D routing to DU0/1/2 are
* configured in the DEFR8 register of the first group on Gen2 and the
* last group on Gen3. As this function can be called with the DU
* channels of the corresponding CRTCs disabled, we need to enable the
* group clock before accessing the register.
*/
ret = clk_prepare_enable(rcdu->crtcs[0].clock);
index = rcdu->info->gen < 3 ? 0 : DIV_ROUND_UP(rcdu->num_crtcs, 2) - 1;
rgrp = &rcdu->groups[index];
crtc = &rcdu->crtcs[index * 2];
ret = clk_prepare_enable(crtc->clock);
if (ret < 0)
return ret;
rcar_du_group_setup_defr8(&rcdu->groups[0]);
rcar_du_group_setup_defr8(rgrp);
clk_disable_unprepare(rcdu->crtcs[0].clock);
clk_disable_unprepare(crtc->clock);
return 0;
}
@ -236,7 +249,8 @@ int rcar_du_group_set_routing(struct rcar_du_group *rgrp)
dorcr &= ~(DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_MASK);
/* Set the DPAD1 pins sources. Select CRTC 0 if explicitly requested and
/*
* Set the DPAD1 pins sources. Select CRTC 0 if explicitly requested and
* CRTC 1 in all other cases to avoid cloning CRTC 0 to DPAD0 and DPAD1
* by default.
*/

View File

@ -96,7 +96,8 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = {
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
.edf = PnDDCR4_EDF_NONE,
},
/* The following formats are not supported on Gen2 and thus have no
/*
* The following formats are not supported on Gen2 and thus have no
* associated .pnmr or .edf settings.
*/
{
@ -153,7 +154,8 @@ int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
unsigned int align;
/* The R8A7779 DU requires a 16 pixels pitch alignment as documented,
/*
* The R8A7779 DU requires a 16 pixels pitch alignment as documented,
* but the R8A7790 DU seems to require a 128 bytes pitch alignment.
*/
if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B))
@ -255,12 +257,12 @@ static void rcar_du_atomic_commit_tail(struct drm_atomic_state *old_state)
/* Apply the atomic update. */
drm_atomic_helper_commit_modeset_disables(dev, old_state);
drm_atomic_helper_commit_modeset_enables(dev, old_state);
drm_atomic_helper_commit_planes(dev, old_state,
DRM_PLANE_COMMIT_ACTIVE_ONLY);
drm_atomic_helper_commit_modeset_enables(dev, old_state);
drm_atomic_helper_commit_hw_done(old_state);
drm_atomic_helper_wait_for_vblanks(dev, old_state);
drm_atomic_helper_wait_for_flip_done(dev, old_state);
drm_atomic_helper_cleanup_planes(dev, old_state);
}
@ -309,7 +311,7 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
return -ENODEV;
}
entity_ep_node = of_parse_phandle(ep->local_node, "remote-endpoint", 0);
entity_ep_node = of_graph_get_remote_endpoint(ep->local_node);
for_each_endpoint_of_node(entity, ep_node) {
if (ep_node == entity_ep_node)
@ -419,7 +421,8 @@ static int rcar_du_properties_init(struct rcar_du_device *rcdu)
if (rcdu->props.alpha == NULL)
return -ENOMEM;
/* The color key is expressed as an RGB888 triplet stored in a 32-bit
/*
* The color key is expressed as an RGB888 triplet stored in a 32-bit
* integer in XRGB8888 format. Bit 24 is used as a flag to disable (0)
* or enable source color keying (1).
*/
@ -432,6 +435,81 @@ static int rcar_du_properties_init(struct rcar_du_device *rcdu)
return 0;
}
static int rcar_du_vsps_init(struct rcar_du_device *rcdu)
{
const struct device_node *np = rcdu->dev->of_node;
struct of_phandle_args args;
struct {
struct device_node *np;
unsigned int crtcs_mask;
} vsps[RCAR_DU_MAX_VSPS] = { { 0, }, };
unsigned int vsps_count = 0;
unsigned int cells;
unsigned int i;
int ret;
/*
* First parse the DT vsps property to populate the list of VSPs. Each
* entry contains a pointer to the VSP DT node and a bitmask of the
* connected DU CRTCs.
*/
cells = of_property_count_u32_elems(np, "vsps") / rcdu->num_crtcs - 1;
if (cells > 1)
return -EINVAL;
for (i = 0; i < rcdu->num_crtcs; ++i) {
unsigned int j;
ret = of_parse_phandle_with_fixed_args(np, "vsps", cells, i,
&args);
if (ret < 0)
goto error;
/*
* Add the VSP to the list or update the corresponding existing
* entry if the VSP has already been added.
*/
for (j = 0; j < vsps_count; ++j) {
if (vsps[j].np == args.np)
break;
}
if (j < vsps_count)
of_node_put(args.np);
else
vsps[vsps_count++].np = args.np;
vsps[j].crtcs_mask |= BIT(i);
/* Store the VSP pointer and pipe index in the CRTC. */
rcdu->crtcs[i].vsp = &rcdu->vsps[j];
rcdu->crtcs[i].vsp_pipe = cells >= 1 ? args.args[0] : 0;
}
/*
* Then initialize all the VSPs from the node pointers and CRTCs bitmask
* computed previously.
*/
for (i = 0; i < vsps_count; ++i) {
struct rcar_du_vsp *vsp = &rcdu->vsps[i];
vsp->index = i;
vsp->dev = rcdu;
ret = rcar_du_vsp_init(vsp, vsps[i].np, vsps[i].crtcs_mask);
if (ret < 0)
goto error;
}
return 0;
error:
for (i = 0; i < ARRAY_SIZE(vsps); ++i)
of_node_put(vsps[i].np);
return ret;
}
int rcar_du_modeset_init(struct rcar_du_device *rcdu)
{
static const unsigned int mmio_offsets[] = {
@ -461,7 +539,8 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
if (ret < 0)
return ret;
/* Initialize vertical blanking interrupts handling. Start with vblank
/*
* Initialize vertical blanking interrupts handling. Start with vblank
* disabled for all CRTCs.
*/
ret = drm_vblank_init(dev, (1 << rcdu->info->num_crtcs) - 1);
@ -481,7 +560,8 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
rgrp->index = i;
rgrp->num_crtcs = min(rcdu->num_crtcs - 2 * i, 2U);
/* If we have more than one CRTCs in this group pre-associate
/*
* If we have more than one CRTCs in this group pre-associate
* the low-order planes with CRTC 0 and the high-order planes
* with CRTC 1 to minimize flicker occurring when the
* association is changed.
@ -499,17 +579,9 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
/* Initialize the compositors. */
if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) {
for (i = 0; i < rcdu->num_crtcs; ++i) {
struct rcar_du_vsp *vsp = &rcdu->vsps[i];
vsp->index = i;
vsp->dev = rcdu;
rcdu->crtcs[i].vsp = vsp;
ret = rcar_du_vsp_init(vsp);
if (ret < 0)
return ret;
}
ret = rcar_du_vsps_init(rcdu);
if (ret < 0)
return ret;
}
/* Create the CRTCs. */
@ -537,7 +609,8 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
num_encoders = ret;
/* Set the possible CRTCs and possible clones. There's always at least
/*
* Set the possible CRTCs and possible clones. There's always at least
* one way for all encoders to clone each other, set all bits in the
* possible clones field.
*/

View File

@ -59,7 +59,8 @@ static void rcar_du_lvdsenc_start_gen2(struct rcar_du_lvdsenc *lvds,
rcar_lvds_write(lvds, LVDPLLCR, pllcr);
/* Select the input, hardcode mode 0, enable LVDS operation and turn
/*
* Select the input, hardcode mode 0, enable LVDS operation and turn
* bias circuitry on.
*/
lvdcr0 = (lvds->mode << LVDCR0_LVMD_SHIFT) | LVDCR0_BEN | LVDCR0_LVEN;
@ -73,7 +74,8 @@ static void rcar_du_lvdsenc_start_gen2(struct rcar_du_lvdsenc *lvds,
LVDCR1_CHSTBY_GEN2(1) | LVDCR1_CHSTBY_GEN2(0) |
LVDCR1_CLKSTBY_GEN2);
/* Turn the PLL on, wait for the startup delay, and turn the output
/*
* Turn the PLL on, wait for the startup delay, and turn the output
* on.
*/
lvdcr0 |= LVDCR0_PLLON;
@ -140,7 +142,8 @@ static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
if (ret < 0)
return ret;
/* Hardcode the channels and control signals routing for now.
/*
* Hardcode the channels and control signals routing for now.
*
* HSYNC -> CTRL0
* VSYNC -> CTRL1
@ -202,7 +205,8 @@ void rcar_du_lvdsenc_atomic_check(struct rcar_du_lvdsenc *lvds,
{
struct rcar_du_device *rcdu = lvds->dev;
/* The internal LVDS encoder has a restricted clock frequency operating
/*
* The internal LVDS encoder has a restricted clock frequency operating
* range (30MHz to 150MHz on Gen2, 25.175MHz to 148.5MHz on Gen3). Clamp
* the clock accordingly.
*/

View File

@ -50,23 +50,21 @@
* automatically when the core swaps the old and new states.
*/
static bool rcar_du_plane_needs_realloc(struct rcar_du_plane *plane,
struct rcar_du_plane_state *new_state)
static bool rcar_du_plane_needs_realloc(
const struct rcar_du_plane_state *old_state,
const struct rcar_du_plane_state *new_state)
{
struct rcar_du_plane_state *cur_state;
cur_state = to_rcar_plane_state(plane->plane.state);
/* Lowering the number of planes doesn't strictly require reallocation
/*
* Lowering the number of planes doesn't strictly require reallocation
* as the extra hardware plane will be freed when committing, but doing
* so could lead to more fragmentation.
*/
if (!cur_state->format ||
cur_state->format->planes != new_state->format->planes)
if (!old_state->format ||
old_state->format->planes != new_state->format->planes)
return true;
/* Reallocate hardware planes if the source has changed. */
if (cur_state->source != new_state->source)
if (old_state->source != new_state->source)
return true;
return false;
@ -141,37 +139,43 @@ int rcar_du_atomic_check_planes(struct drm_device *dev,
unsigned int groups = 0;
unsigned int i;
struct drm_plane *drm_plane;
struct drm_plane_state *drm_plane_state;
struct drm_plane_state *old_drm_plane_state;
struct drm_plane_state *new_drm_plane_state;
/* Check if hardware planes need to be reallocated. */
for_each_plane_in_state(state, drm_plane, drm_plane_state, i) {
struct rcar_du_plane_state *plane_state;
for_each_oldnew_plane_in_state(state, drm_plane, old_drm_plane_state,
new_drm_plane_state, i) {
struct rcar_du_plane_state *old_plane_state;
struct rcar_du_plane_state *new_plane_state;
struct rcar_du_plane *plane;
unsigned int index;
plane = to_rcar_plane(drm_plane);
plane_state = to_rcar_plane_state(drm_plane_state);
old_plane_state = to_rcar_plane_state(old_drm_plane_state);
new_plane_state = to_rcar_plane_state(new_drm_plane_state);
dev_dbg(rcdu->dev, "%s: checking plane (%u,%tu)\n", __func__,
plane->group->index, plane - plane->group->planes);
/* If the plane is being disabled we don't need to go through
/*
* If the plane is being disabled we don't need to go through
* the full reallocation procedure. Just mark the hardware
* plane(s) as freed.
*/
if (!plane_state->format) {
if (!new_plane_state->format) {
dev_dbg(rcdu->dev, "%s: plane is being disabled\n",
__func__);
index = plane - plane->group->planes;
group_freed_planes[plane->group->index] |= 1 << index;
plane_state->hwindex = -1;
new_plane_state->hwindex = -1;
continue;
}
/* If the plane needs to be reallocated mark it as such, and
/*
* If the plane needs to be reallocated mark it as such, and
* mark the hardware plane(s) as free.
*/
if (rcar_du_plane_needs_realloc(plane, plane_state)) {
if (rcar_du_plane_needs_realloc(old_plane_state, new_plane_state)) {
dev_dbg(rcdu->dev, "%s: plane needs reallocation\n",
__func__);
groups |= 1 << plane->group->index;
@ -179,14 +183,15 @@ int rcar_du_atomic_check_planes(struct drm_device *dev,
index = plane - plane->group->planes;
group_freed_planes[plane->group->index] |= 1 << index;
plane_state->hwindex = -1;
new_plane_state->hwindex = -1;
}
}
if (!needs_realloc)
return 0;
/* Grab all plane states for the groups that need reallocation to ensure
/*
* Grab all plane states for the groups that need reallocation to ensure
* locking and avoid racy updates. This serializes the update operation,
* but there's not much we can do about it as that's the hardware
* design.
@ -204,14 +209,15 @@ int rcar_du_atomic_check_planes(struct drm_device *dev,
for (i = 0; i < group->num_planes; ++i) {
struct rcar_du_plane *plane = &group->planes[i];
struct rcar_du_plane_state *plane_state;
struct rcar_du_plane_state *new_plane_state;
struct drm_plane_state *s;
s = drm_atomic_get_plane_state(state, &plane->plane);
if (IS_ERR(s))
return PTR_ERR(s);
/* If the plane has been freed in the above loop its
/*
* If the plane has been freed in the above loop its
* hardware planes must not be added to the used planes
* bitmask. However, the current state doesn't reflect
* the free state yet, as we've modified the new state
@ -226,16 +232,16 @@ int rcar_du_atomic_check_planes(struct drm_device *dev,
continue;
}
plane_state = to_rcar_plane_state(plane->plane.state);
used_planes |= rcar_du_plane_hwmask(plane_state);
new_plane_state = to_rcar_plane_state(s);
used_planes |= rcar_du_plane_hwmask(new_plane_state);
dev_dbg(rcdu->dev,
"%s: plane (%u,%tu) uses %u hwplanes (index %d)\n",
__func__, plane->group->index,
plane - plane->group->planes,
plane_state->format ?
plane_state->format->planes : 0,
plane_state->hwindex);
new_plane_state->format ?
new_plane_state->format->planes : 0,
new_plane_state->hwindex);
}
group_free_planes[index] = 0xff & ~used_planes;
@ -246,40 +252,45 @@ int rcar_du_atomic_check_planes(struct drm_device *dev,
}
/* Reallocate hardware planes for each plane that needs it. */
for_each_plane_in_state(state, drm_plane, drm_plane_state, i) {
struct rcar_du_plane_state *plane_state;
for_each_oldnew_plane_in_state(state, drm_plane, old_drm_plane_state,
new_drm_plane_state, i) {
struct rcar_du_plane_state *old_plane_state;
struct rcar_du_plane_state *new_plane_state;
struct rcar_du_plane *plane;
unsigned int crtc_planes;
unsigned int free;
int idx;
plane = to_rcar_plane(drm_plane);
plane_state = to_rcar_plane_state(drm_plane_state);
old_plane_state = to_rcar_plane_state(old_drm_plane_state);
new_plane_state = to_rcar_plane_state(new_drm_plane_state);
dev_dbg(rcdu->dev, "%s: allocating plane (%u,%tu)\n", __func__,
plane->group->index, plane - plane->group->planes);
/* Skip planes that are being disabled or don't need to be
/*
* Skip planes that are being disabled or don't need to be
* reallocated.
*/
if (!plane_state->format ||
!rcar_du_plane_needs_realloc(plane, plane_state))
if (!new_plane_state->format ||
!rcar_du_plane_needs_realloc(old_plane_state, new_plane_state))
continue;
/* Try to allocate the plane from the free planes currently
/*
* Try to allocate the plane from the free planes currently
* associated with the target CRTC to avoid restarting the CRTC
* group and thus minimize flicker. If it fails fall back to
* allocating from all free planes.
*/
crtc_planes = to_rcar_crtc(plane_state->state.crtc)->index % 2
crtc_planes = to_rcar_crtc(new_plane_state->state.crtc)->index % 2
? plane->group->dptsr_planes
: ~plane->group->dptsr_planes;
free = group_free_planes[plane->group->index];
idx = rcar_du_plane_hwalloc(plane, plane_state,
idx = rcar_du_plane_hwalloc(plane, new_plane_state,
free & crtc_planes);
if (idx < 0)
idx = rcar_du_plane_hwalloc(plane, plane_state,
idx = rcar_du_plane_hwalloc(plane, new_plane_state,
free);
if (idx < 0) {
dev_dbg(rcdu->dev, "%s: no available hardware plane\n",
@ -288,12 +299,12 @@ int rcar_du_atomic_check_planes(struct drm_device *dev,
}
dev_dbg(rcdu->dev, "%s: allocated %u hwplanes (index %u)\n",
__func__, plane_state->format->planes, idx);
__func__, new_plane_state->format->planes, idx);
plane_state->hwindex = idx;
new_plane_state->hwindex = idx;
group_free_planes[plane->group->index] &=
~rcar_du_plane_hwmask(plane_state);
~rcar_du_plane_hwmask(new_plane_state);
dev_dbg(rcdu->dev, "%s: group %u free planes mask 0x%02x\n",
__func__, plane->group->index,
@ -351,14 +362,16 @@ static void rcar_du_plane_setup_scanout(struct rcar_du_group *rgrp,
dma[1] = 0;
}
/* Memory pitch (expressed in pixels). Must be doubled for interlaced
/*
* Memory pitch (expressed in pixels). Must be doubled for interlaced
* operation with 32bpp formats.
*/
rcar_du_plane_write(rgrp, index, PnMWR,
(interlaced && state->format->bpp == 32) ?
pitch * 2 : pitch);
/* The Y position is expressed in raster line units and must be doubled
/*
* The Y position is expressed in raster line units and must be doubled
* for 32bpp formats, according to the R8A7790 datasheet. No mention of
* doubling the Y position is found in the R8A7779 datasheet, but the
* rule seems to apply there as well.
@ -396,7 +409,8 @@ static void rcar_du_plane_setup_mode(struct rcar_du_group *rgrp,
u32 colorkey;
u32 pnmr;
/* The PnALPHAR register controls alpha-blending in 16bpp formats
/*
* The PnALPHAR register controls alpha-blending in 16bpp formats
* (ARGB1555 and XRGB1555).
*
* For ARGB, set the alpha value to 0, and enable alpha-blending when
@ -413,7 +427,8 @@ static void rcar_du_plane_setup_mode(struct rcar_du_group *rgrp,
pnmr = PnMR_BM_MD | state->format->pnmr;
/* Disable color keying when requested. YUV formats have the
/*
* Disable color keying when requested. YUV formats have the
* PnMR_SPIM_TP_OFF bit set in their pnmr field, disabling color keying
* automatically.
*/
@ -457,7 +472,8 @@ static void rcar_du_plane_setup_format_gen2(struct rcar_du_group *rgrp,
u32 ddcr2 = PnDDCR2_CODE;
u32 ddcr4;
/* Data format
/*
* Data format
*
* The data format is selected by the DDDF field in PnMR and the EDF
* field in DDCR4.
@ -589,7 +605,8 @@ static void rcar_du_plane_atomic_update(struct drm_plane *plane,
rcar_du_plane_setup(rplane);
/* Check whether the source has changed from memory to live source or
/*
* Check whether the source has changed from memory to live source or
* from live source to memory. The source has been configured by the
* VSPS bit in the PnDDCR4 register. Although the datasheet states that
* the bit is updated during vertical blanking, it seems that updates
@ -726,7 +743,8 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
unsigned int i;
int ret;
/* Create one primary plane per CRTC in this group and seven overlay
/*
* Create one primary plane per CRTC in this group and seven overlay
* planes.
*/
rgrp->num_planes = rgrp->num_crtcs + 7;

View File

@ -20,7 +20,8 @@
struct rcar_du_format_info;
struct rcar_du_group;
/* The RCAR DU has 8 hardware planes, shared between primary and overlay planes.
/*
* The RCAR DU has 8 hardware planes, shared between primary and overlay planes.
* As using overlay planes requires at least one of the CRTCs being enabled, no
* more than 7 overlay planes can be available. We thus create 1 primary plane
* per CRTC and 7 overlay planes, for a total of up to 9 KMS planes.

View File

@ -19,6 +19,7 @@
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane_helper.h>
#include <linux/bitops.h>
#include <linux/dma-mapping.h>
#include <linux/of_platform.h>
#include <linux/scatterlist.h>
@ -30,11 +31,15 @@
#include "rcar_du_kms.h"
#include "rcar_du_vsp.h"
static void rcar_du_vsp_complete(void *private)
static void rcar_du_vsp_complete(void *private, bool completed)
{
struct rcar_du_crtc *crtc = private;
rcar_du_crtc_finish_page_flip(crtc);
if (crtc->vblank_enable)
drm_crtc_handle_vblank(&crtc->crtc);
if (completed)
rcar_du_crtc_finish_page_flip(crtc);
}
void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
@ -73,7 +78,8 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
__rcar_du_plane_setup(crtc->group, &state);
/* Ensure that the plane source configuration takes effect by requesting
/*
* Ensure that the plane source configuration takes effect by requesting
* a restart of the group. See rcar_du_plane_atomic_update() for a more
* detailed explanation.
*
@ -81,22 +87,22 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
*/
crtc->group->need_restart = true;
vsp1_du_setup_lif(crtc->vsp->vsp, &cfg);
vsp1_du_setup_lif(crtc->vsp->vsp, crtc->vsp_pipe, &cfg);
}
void rcar_du_vsp_disable(struct rcar_du_crtc *crtc)
{
vsp1_du_setup_lif(crtc->vsp->vsp, NULL);
vsp1_du_setup_lif(crtc->vsp->vsp, crtc->vsp_pipe, NULL);
}
void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc)
{
vsp1_du_atomic_begin(crtc->vsp->vsp);
vsp1_du_atomic_begin(crtc->vsp->vsp, crtc->vsp_pipe);
}
void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc)
{
vsp1_du_atomic_flush(crtc->vsp->vsp);
vsp1_du_atomic_flush(crtc->vsp->vsp, crtc->vsp_pipe);
}
/* Keep the two tables in sync. */
@ -162,6 +168,7 @@ static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
{
struct rcar_du_vsp_plane_state *state =
to_rcar_vsp_plane_state(plane->plane.state);
struct rcar_du_crtc *crtc = to_rcar_crtc(state->state.crtc);
struct drm_framebuffer *fb = plane->plane.state->fb;
struct vsp1_du_atomic_config cfg = {
.pixelformat = 0,
@ -192,7 +199,8 @@ static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
}
}
vsp1_du_atomic_update(plane->vsp->vsp, plane->index, &cfg);
vsp1_du_atomic_update(plane->vsp->vsp, crtc->vsp_pipe,
plane->index, &cfg);
}
static int rcar_du_vsp_plane_prepare_fb(struct drm_plane *plane,
@ -288,11 +296,13 @@ static void rcar_du_vsp_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct rcar_du_vsp_plane *rplane = to_rcar_vsp_plane(plane);
struct rcar_du_crtc *crtc = to_rcar_crtc(old_state->crtc);
if (plane->state->crtc)
rcar_du_vsp_plane_setup(rplane);
else
vsp1_du_atomic_update(rplane->vsp->vsp, rplane->index, NULL);
vsp1_du_atomic_update(rplane->vsp->vsp, crtc->vsp_pipe,
rplane->index, NULL);
}
static const struct drm_plane_helper_funcs rcar_du_vsp_plane_helper_funcs = {
@ -391,23 +401,17 @@ static const struct drm_plane_funcs rcar_du_vsp_plane_funcs = {
.atomic_get_property = rcar_du_vsp_plane_atomic_get_property,
};
int rcar_du_vsp_init(struct rcar_du_vsp *vsp)
int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
unsigned int crtcs)
{
struct rcar_du_device *rcdu = vsp->dev;
struct platform_device *pdev;
struct device_node *np;
unsigned int num_crtcs = hweight32(crtcs);
unsigned int i;
int ret;
/* Find the VSP device and initialize it. */
np = of_parse_phandle(rcdu->dev->of_node, "vsps", vsp->index);
if (!np) {
dev_err(rcdu->dev, "vsps node not found\n");
return -ENXIO;
}
pdev = of_find_device_by_node(np);
of_node_put(np);
if (!pdev)
return -ENXIO;
@ -417,7 +421,8 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp)
if (ret < 0)
return ret;
/* The VSP2D (Gen3) has 5 RPFs, but the VSP1D (Gen2) is limited to
/*
* The VSP2D (Gen3) has 5 RPFs, but the VSP1D (Gen2) is limited to
* 4 RPFs.
*/
vsp->num_planes = rcdu->info->gen >= 3 ? 5 : 4;
@ -428,15 +433,15 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp)
return -ENOMEM;
for (i = 0; i < vsp->num_planes; ++i) {
enum drm_plane_type type = i ? DRM_PLANE_TYPE_OVERLAY
: DRM_PLANE_TYPE_PRIMARY;
enum drm_plane_type type = i < num_crtcs
? DRM_PLANE_TYPE_PRIMARY
: DRM_PLANE_TYPE_OVERLAY;
struct rcar_du_vsp_plane *plane = &vsp->planes[i];
plane->vsp = vsp;
plane->index = i;
ret = drm_universal_plane_init(rcdu->ddev, &plane->plane,
1 << vsp->index,
ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, crtcs,
&rcar_du_vsp_plane_funcs,
formats_kms,
ARRAY_SIZE(formats_kms), type,

View File

@ -64,13 +64,19 @@ to_rcar_vsp_plane_state(struct drm_plane_state *state)
}
#ifdef CONFIG_DRM_RCAR_VSP
int rcar_du_vsp_init(struct rcar_du_vsp *vsp);
int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
unsigned int crtcs);
void rcar_du_vsp_enable(struct rcar_du_crtc *crtc);
void rcar_du_vsp_disable(struct rcar_du_crtc *crtc);
void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc);
void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc);
#else
static inline int rcar_du_vsp_init(struct rcar_du_vsp *vsp) { return -ENXIO; };
static inline int rcar_du_vsp_init(struct rcar_du_vsp *vsp,
struct device_node *np,
unsigned int crtcs)
{
return -ENXIO;
}
static inline void rcar_du_vsp_enable(struct rcar_du_crtc *crtc) { };
static inline void rcar_du_vsp_disable(struct rcar_du_crtc *crtc) { };
static inline void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc) { };

View File

@ -45,7 +45,7 @@ static int rcar_hdmi_phy_configure(struct dw_hdmi *hdmi,
{
const struct rcar_hdmi_phy_params *params = rcar_hdmi_phy_params;
for (; params && params->mpixelclock != ~0UL; ++params) {
for (; params->mpixelclock != ~0UL; ++params) {
if (mpixelclock <= params->mpixelclock)
break;
}

View File

@ -41,11 +41,11 @@ struct vsp1_rwpf;
struct vsp1_sru;
struct vsp1_uds;
#define VSP1_MAX_LIF 2
#define VSP1_MAX_RPF 5
#define VSP1_MAX_UDS 3
#define VSP1_MAX_WPF 4
#define VSP1_HAS_LIF (1 << 0)
#define VSP1_HAS_LUT (1 << 1)
#define VSP1_HAS_SRU (1 << 2)
#define VSP1_HAS_BRU (1 << 3)
@ -54,12 +54,14 @@ struct vsp1_uds;
#define VSP1_HAS_WPF_HFLIP (1 << 6)
#define VSP1_HAS_HGO (1 << 7)
#define VSP1_HAS_HGT (1 << 8)
#define VSP1_HAS_BRS (1 << 9)
struct vsp1_device_info {
u32 version;
const char *model;
unsigned int gen;
unsigned int features;
unsigned int lif_count;
unsigned int rpf_count;
unsigned int uds_count;
unsigned int wpf_count;
@ -76,13 +78,14 @@ struct vsp1_device {
struct rcar_fcp_device *fcp;
struct device *bus_master;
struct vsp1_bru *brs;
struct vsp1_bru *bru;
struct vsp1_clu *clu;
struct vsp1_hgo *hgo;
struct vsp1_hgt *hgt;
struct vsp1_hsit *hsi;
struct vsp1_hsit *hst;
struct vsp1_lif *lif;
struct vsp1_lif *lif[VSP1_MAX_LIF];
struct vsp1_lut *lut;
struct vsp1_rwpf *rpf[VSP1_MAX_RPF];
struct vsp1_sru *sru;

View File

@ -33,7 +33,7 @@
static inline void vsp1_bru_write(struct vsp1_bru *bru, struct vsp1_dl_list *dl,
u32 reg, u32 data)
{
vsp1_dl_list_write(dl, reg, data);
vsp1_dl_list_write(dl, bru->base + reg, data);
}
/* -----------------------------------------------------------------------------
@ -332,11 +332,14 @@ static void bru_configure(struct vsp1_entity *entity,
/*
* Route BRU input 1 as SRC input to the ROP unit and configure the ROP
* unit with a NOP operation to make BRU input 1 available as the
* Blend/ROP unit B SRC input.
* Blend/ROP unit B SRC input. Only needed for BRU, the BRS has no ROP
* unit.
*/
vsp1_bru_write(bru, dl, VI6_BRU_ROP, VI6_BRU_ROP_DSTSEL_BRUIN(1) |
VI6_BRU_ROP_CROP(VI6_ROP_NOP) |
VI6_BRU_ROP_AROP(VI6_ROP_NOP));
if (entity->type == VSP1_ENTITY_BRU)
vsp1_bru_write(bru, dl, VI6_BRU_ROP,
VI6_BRU_ROP_DSTSEL_BRUIN(1) |
VI6_BRU_ROP_CROP(VI6_ROP_NOP) |
VI6_BRU_ROP_AROP(VI6_ROP_NOP));
for (i = 0; i < bru->entity.source_pad; ++i) {
bool premultiplied = false;
@ -366,12 +369,13 @@ static void bru_configure(struct vsp1_entity *entity,
ctrl |= VI6_BRU_CTRL_DSTSEL_VRPF;
/*
* Route BRU inputs 0 to 3 as SRC inputs to Blend/ROP units A to
* D in that order. The Blend/ROP unit B SRC is hardwired to the
* ROP unit output, the corresponding register bits must be set
* to 0.
* Route inputs 0 to 3 as SRC inputs to Blend/ROP units A to D
* in that order. In the BRU the Blend/ROP unit B SRC is
* hardwired to the ROP unit output, the corresponding register
* bits must be set to 0. The BRS has no ROP unit and doesn't
* need any special processing.
*/
if (i != 1)
if (!(entity->type == VSP1_ENTITY_BRU && i == 1))
ctrl |= VI6_BRU_CTRL_SRCSEL_BRUIN(i);
vsp1_bru_write(bru, dl, VI6_BRU_CTRL(i), ctrl);
@ -407,20 +411,31 @@ static const struct vsp1_entity_operations bru_entity_ops = {
* Initialization and Cleanup
*/
struct vsp1_bru *vsp1_bru_create(struct vsp1_device *vsp1)
struct vsp1_bru *vsp1_bru_create(struct vsp1_device *vsp1,
enum vsp1_entity_type type)
{
struct vsp1_bru *bru;
unsigned int num_pads;
const char *name;
int ret;
bru = devm_kzalloc(vsp1->dev, sizeof(*bru), GFP_KERNEL);
if (bru == NULL)
return ERR_PTR(-ENOMEM);
bru->base = type == VSP1_ENTITY_BRU ? VI6_BRU_BASE : VI6_BRS_BASE;
bru->entity.ops = &bru_entity_ops;
bru->entity.type = VSP1_ENTITY_BRU;
bru->entity.type = type;
ret = vsp1_entity_init(vsp1, &bru->entity, "bru",
vsp1->info->num_bru_inputs + 1, &bru_ops,
if (type == VSP1_ENTITY_BRU) {
num_pads = vsp1->info->num_bru_inputs + 1;
name = "bru";
} else {
num_pads = 3;
name = "brs";
}
ret = vsp1_entity_init(vsp1, &bru->entity, name, num_pads, &bru_ops,
MEDIA_ENT_F_PROC_VIDEO_COMPOSER);
if (ret < 0)
return ERR_PTR(ret);
@ -435,7 +450,7 @@ struct vsp1_bru *vsp1_bru_create(struct vsp1_device *vsp1)
bru->entity.subdev.ctrl_handler = &bru->ctrls;
if (bru->ctrls.error) {
dev_err(vsp1->dev, "bru: failed to initialize controls\n");
dev_err(vsp1->dev, "%s: failed to initialize controls\n", name);
ret = bru->ctrls.error;
vsp1_entity_destroy(&bru->entity);
return ERR_PTR(ret);

View File

@ -26,6 +26,7 @@ struct vsp1_rwpf;
struct vsp1_bru {
struct vsp1_entity entity;
unsigned int base;
struct v4l2_ctrl_handler ctrls;
@ -41,6 +42,7 @@ static inline struct vsp1_bru *to_bru(struct v4l2_subdev *subdev)
return container_of(subdev, struct vsp1_bru, entity.subdev);
}
struct vsp1_bru *vsp1_bru_create(struct vsp1_device *vsp1);
struct vsp1_bru *vsp1_bru_create(struct vsp1_device *vsp1,
enum vsp1_entity_type type);
#endif /* __VSP1_BRU_H__ */

View File

@ -95,6 +95,7 @@ enum vsp1_dl_mode {
* struct vsp1_dl_manager - Display List manager
* @index: index of the related WPF
* @mode: display list operation mode (header or headerless)
* @singleshot: execute the display list in single-shot mode
* @vsp1: the VSP1 device
* @lock: protects the free, active, queued, pending and gc_fragments lists
* @free: array of all free display lists
@ -107,6 +108,7 @@ enum vsp1_dl_mode {
struct vsp1_dl_manager {
unsigned int index;
enum vsp1_dl_mode mode;
bool singleshot;
struct vsp1_device *vsp1;
spinlock_t lock;
@ -437,6 +439,7 @@ int vsp1_dl_list_add_chain(struct vsp1_dl_list *head,
static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last)
{
struct vsp1_dl_manager *dlm = dl->dlm;
struct vsp1_dl_header_list *hdr = dl->header->lists;
struct vsp1_dl_body *dlb;
unsigned int num_lists = 0;
@ -461,38 +464,128 @@ static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last)
dl->header->num_lists = num_lists;
/*
* If this display list's chain is not empty, we are on a list, where
* the next item in the list is the display list entity which should be
* automatically queued by the hardware.
*/
if (!list_empty(&dl->chain) && !is_last) {
/*
* If this display list's chain is not empty, we are on a list,
* and the next item is the display list that we must queue for
* automatic processing by the hardware.
*/
struct vsp1_dl_list *next = list_next_entry(dl, chain);
dl->header->next_header = next->dma;
dl->header->flags = VSP1_DLH_AUTO_START;
} else if (!dlm->singleshot) {
/*
* if the display list manager works in continuous mode, the VSP
* should loop over the display list continuously until
* instructed to do otherwise.
*/
dl->header->next_header = dl->dma;
dl->header->flags = VSP1_DLH_INT_ENABLE | VSP1_DLH_AUTO_START;
} else {
/*
* Otherwise, in mem-to-mem mode, we work in single-shot mode
* and the next display list must not be started automatically.
*/
dl->header->flags = VSP1_DLH_INT_ENABLE;
}
}
static bool vsp1_dl_list_hw_update_pending(struct vsp1_dl_manager *dlm)
{
struct vsp1_device *vsp1 = dlm->vsp1;
if (!dlm->queued)
return false;
/*
* Check whether the VSP1 has taken the update. In headerless mode the
* hardware indicates this by clearing the UPD bit in the DL_BODY_SIZE
* register, and in header mode by clearing the UPDHDR bit in the CMD
* register.
*/
if (dlm->mode == VSP1_DL_MODE_HEADERLESS)
return !!(vsp1_read(vsp1, VI6_DL_BODY_SIZE)
& VI6_DL_BODY_SIZE_UPD);
else
return !!(vsp1_read(vsp1, VI6_CMD(dlm->index) & VI6_CMD_UPDHDR));
}
static void vsp1_dl_list_hw_enqueue(struct vsp1_dl_list *dl)
{
struct vsp1_dl_manager *dlm = dl->dlm;
struct vsp1_device *vsp1 = dlm->vsp1;
if (dlm->mode == VSP1_DL_MODE_HEADERLESS) {
/*
* In headerless mode, program the hardware directly with the
* display list body address and size and set the UPD bit. The
* bit will be cleared by the hardware when the display list
* processing starts.
*/
vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma);
vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD |
(dl->body0.num_entries * sizeof(*dl->header->lists)));
} else {
/*
* In header mode, program the display list header address. If
* the hardware is idle (single-shot mode or first frame in
* continuous mode) it will then be started independently. If
* the hardware is operating, the VI6_DL_HDR_REF_ADDR register
* will be updated with the display list address.
*/
vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma);
}
}
static void vsp1_dl_list_commit_continuous(struct vsp1_dl_list *dl)
{
struct vsp1_dl_manager *dlm = dl->dlm;
/*
* If a previous display list has been queued to the hardware but not
* processed yet, the VSP can start processing it at any time. In that
* case we can't replace the queued list by the new one, as we could
* race with the hardware. We thus mark the update as pending, it will
* be queued up to the hardware by the frame end interrupt handler.
*/
if (vsp1_dl_list_hw_update_pending(dlm)) {
__vsp1_dl_list_put(dlm->pending);
dlm->pending = dl;
return;
}
/*
* Pass the new display list to the hardware and mark it as queued. It
* will become active when the hardware starts processing it.
*/
vsp1_dl_list_hw_enqueue(dl);
__vsp1_dl_list_put(dlm->queued);
dlm->queued = dl;
}
static void vsp1_dl_list_commit_singleshot(struct vsp1_dl_list *dl)
{
struct vsp1_dl_manager *dlm = dl->dlm;
/*
* When working in single-shot mode, the caller guarantees that the
* hardware is idle at this point. Just commit the head display list
* to hardware. Chained lists will be started automatically.
*/
vsp1_dl_list_hw_enqueue(dl);
dlm->active = dl;
}
void vsp1_dl_list_commit(struct vsp1_dl_list *dl)
{
struct vsp1_dl_manager *dlm = dl->dlm;
struct vsp1_device *vsp1 = dlm->vsp1;
struct vsp1_dl_list *dl_child;
unsigned long flags;
bool update;
spin_lock_irqsave(&dlm->lock, flags);
if (dl->dlm->mode == VSP1_DL_MODE_HEADER) {
struct vsp1_dl_list *dl_child;
/*
* In header mode the caller guarantees that the hardware is
* idle at this point.
*/
if (dlm->mode == VSP1_DL_MODE_HEADER) {
/* Fill the header for the head and chained display lists. */
vsp1_dl_list_fill_header(dl, list_empty(&dl->chain));
@ -501,43 +594,15 @@ void vsp1_dl_list_commit(struct vsp1_dl_list *dl)
vsp1_dl_list_fill_header(dl_child, last);
}
/*
* Commit the head display list to hardware. Chained headers
* will auto-start.
*/
vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma);
dlm->active = dl;
goto done;
}
/*
* Once the UPD bit has been set the hardware can start processing the
* display list at any time and we can't touch the address and size
* registers. In that case mark the update as pending, it will be
* queued up to the hardware by the frame end interrupt handler.
*/
update = !!(vsp1_read(vsp1, VI6_DL_BODY_SIZE) & VI6_DL_BODY_SIZE_UPD);
if (update) {
__vsp1_dl_list_put(dlm->pending);
dlm->pending = dl;
goto done;
}
spin_lock_irqsave(&dlm->lock, flags);
/*
* Program the hardware with the display list body address and size.
* The UPD bit will be cleared by the device when the display list is
* processed.
*/
vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma);
vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD |
(dl->body0.num_entries * sizeof(*dl->header->lists)));
if (dlm->singleshot)
vsp1_dl_list_commit_singleshot(dl);
else
vsp1_dl_list_commit_continuous(dl);
__vsp1_dl_list_put(dlm->queued);
dlm->queued = dl;
done:
spin_unlock_irqrestore(&dlm->lock, flags);
}
@ -545,22 +610,6 @@ done:
* Display List Manager
*/
/* Interrupt Handling */
void vsp1_dlm_irq_display_start(struct vsp1_dl_manager *dlm)
{
spin_lock(&dlm->lock);
/*
* The display start interrupt signals the end of the display list
* processing by the device. The active display list, if any, won't be
* accessed anymore and can be reused.
*/
__vsp1_dl_list_put(dlm->active);
dlm->active = NULL;
spin_unlock(&dlm->lock);
}
/**
* vsp1_dlm_irq_frame_end - Display list handler for the frame end interrupt
* @dlm: the display list manager
@ -572,31 +621,28 @@ void vsp1_dlm_irq_display_start(struct vsp1_dl_manager *dlm)
*/
bool vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
{
struct vsp1_device *vsp1 = dlm->vsp1;
bool completed = false;
spin_lock(&dlm->lock);
__vsp1_dl_list_put(dlm->active);
dlm->active = NULL;
/*
* Header mode is used for mem-to-mem pipelines only. We don't need to
* perform any operation as there can't be any new display list queued
* in that case.
* The mem-to-mem pipelines work in single-shot mode. No new display
* list can be queued, we don't have to do anything.
*/
if (dlm->mode == VSP1_DL_MODE_HEADER) {
if (dlm->singleshot) {
__vsp1_dl_list_put(dlm->active);
dlm->active = NULL;
completed = true;
goto done;
}
/*
* The UPD bit set indicates that the commit operation raced with the
* interrupt and occurred after the frame end event and UPD clear but
* before interrupt processing. The hardware hasn't taken the update
* into account yet, we'll thus skip one frame and retry.
* If the commit operation raced with the interrupt and occurred after
* the frame end event but before interrupt processing, the hardware
* hasn't taken the update into account yet. We have to skip one frame
* and retry.
*/
if (vsp1_read(vsp1, VI6_DL_BODY_SIZE) & VI6_DL_BODY_SIZE_UPD)
if (vsp1_dl_list_hw_update_pending(dlm))
goto done;
/*
@ -604,24 +650,20 @@ bool vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
* frame end interrupt. The display list thus becomes active.
*/
if (dlm->queued) {
__vsp1_dl_list_put(dlm->active);
dlm->active = dlm->queued;
dlm->queued = NULL;
completed = true;
}
/*
* Now that the UPD bit has been cleared we can queue the next display
* list to the hardware if one has been prepared.
* Now that the VSP has started processing the queued display list, we
* can queue the pending display list to the hardware if one has been
* prepared.
*/
if (dlm->pending) {
struct vsp1_dl_list *dl = dlm->pending;
vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma);
vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD |
(dl->body0.num_entries *
sizeof(*dl->header->lists)));
dlm->queued = dl;
vsp1_dl_list_hw_enqueue(dlm->pending);
dlm->queued = dlm->pending;
dlm->pending = NULL;
}
@ -714,6 +756,7 @@ struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
dlm->index = index;
dlm->mode = index == 0 && !vsp1->info->uapi
? VSP1_DL_MODE_HEADERLESS : VSP1_DL_MODE_HEADER;
dlm->singleshot = vsp1->info->uapi;
dlm->vsp1 = vsp1;
spin_lock_init(&dlm->lock);

View File

@ -27,7 +27,6 @@ struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
unsigned int prealloc);
void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm);
void vsp1_dlm_reset(struct vsp1_dl_manager *dlm);
void vsp1_dlm_irq_display_start(struct vsp1_dl_manager *dlm);
bool vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm);
struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm);

View File

@ -32,17 +32,13 @@
* Interrupt Handling
*/
void vsp1_drm_display_start(struct vsp1_device *vsp1)
static void vsp1_du_pipeline_frame_end(struct vsp1_pipeline *pipe,
bool completed)
{
vsp1_dlm_irq_display_start(vsp1->drm->pipe.output->dlm);
}
struct vsp1_drm_pipeline *drm_pipe = to_vsp1_drm_pipeline(pipe);
static void vsp1_du_pipeline_frame_end(struct vsp1_pipeline *pipe)
{
struct vsp1_drm *drm = to_vsp1_drm(pipe);
if (drm->du_complete)
drm->du_complete(drm->du_private);
if (drm_pipe->du_complete)
drm_pipe->du_complete(drm_pipe->du_private, completed);
}
/* -----------------------------------------------------------------------------
@ -63,29 +59,44 @@ EXPORT_SYMBOL_GPL(vsp1_du_init);
/**
* vsp1_du_setup_lif - Setup the output part of the VSP pipeline
* @dev: the VSP device
* @pipe_index: the DRM pipeline index
* @cfg: the LIF configuration
*
* Configure the output part of VSP DRM pipeline for the given frame @cfg.width
* and @cfg.height. This sets up formats on the BRU source pad, the WPF0 sink
* and source pads, and the LIF sink pad.
* and @cfg.height. This sets up formats on the blend unit (BRU or BRS) source
* pad, the WPF sink and source pads, and the LIF sink pad.
*
* As the media bus code on the BRU source pad is conditioned by the
* configuration of the BRU sink 0 pad, we also set up the formats on all BRU
* The @pipe_index argument selects which DRM pipeline to setup. The number of
* available pipelines depend on the VSP instance.
*
* As the media bus code on the blend unit source pad is conditioned by the
* configuration of its sink 0 pad, we also set up the formats on all blend unit
* sinks, even if the configuration will be overwritten later by
* vsp1_du_setup_rpf(). This ensures that the BRU configuration is set to a well
* defined state.
* vsp1_du_setup_rpf(). This ensures that the blend unit configuration is set to
* a well defined state.
*
* Return 0 on success or a negative error code on failure.
*/
int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg)
int vsp1_du_setup_lif(struct device *dev, unsigned int pipe_index,
const struct vsp1_du_lif_config *cfg)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
struct vsp1_pipeline *pipe = &vsp1->drm->pipe;
struct vsp1_bru *bru = vsp1->bru;
struct vsp1_drm_pipeline *drm_pipe;
struct vsp1_pipeline *pipe;
struct vsp1_bru *bru;
struct v4l2_subdev_format format;
const char *bru_name;
unsigned int i;
int ret;
if (pipe_index >= vsp1->info->lif_count)
return -EINVAL;
drm_pipe = &vsp1->drm->pipe[pipe_index];
pipe = &drm_pipe->pipe;
bru = to_bru(&pipe->bru->subdev);
bru_name = pipe->bru->type == VSP1_ENTITY_BRU ? "BRU" : "BRS";
if (!cfg) {
/*
* NULL configuration means the CRTC is being disabled, stop
@ -97,14 +108,25 @@ int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg)
media_pipeline_stop(&pipe->output->entity.subdev.entity);
for (i = 0; i < bru->entity.source_pad; ++i) {
vsp1->drm->inputs[i].enabled = false;
bru->inputs[i].rpf = NULL;
for (i = 0; i < ARRAY_SIZE(pipe->inputs); ++i) {
struct vsp1_rwpf *rpf = pipe->inputs[i];
if (!rpf)
continue;
/*
* Remove the RPF from the pipe and the list of BRU
* inputs.
*/
WARN_ON(list_empty(&rpf->entity.list_pipe));
list_del_init(&rpf->entity.list_pipe);
pipe->inputs[i] = NULL;
bru->inputs[rpf->bru_input].rpf = NULL;
}
drm_pipe->du_complete = NULL;
pipe->num_inputs = 0;
vsp1->drm->du_complete = NULL;
vsp1_dlm_reset(pipe->output->dlm);
vsp1_device_put(vsp1);
@ -114,8 +136,8 @@ int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg)
return 0;
}
dev_dbg(vsp1->dev, "%s: configuring LIF with format %ux%u\n",
__func__, cfg->width, cfg->height);
dev_dbg(vsp1->dev, "%s: configuring LIF%u with format %ux%u\n",
__func__, pipe_index, cfg->width, cfg->height);
/*
* Configure the format at the BRU sinks and propagate it through the
@ -124,7 +146,7 @@ int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg)
memset(&format, 0, sizeof(format));
format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
for (i = 0; i < bru->entity.source_pad; ++i) {
for (i = 0; i < pipe->bru->source_pad; ++i) {
format.pad = i;
format.format.width = cfg->width;
@ -132,60 +154,60 @@ int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg)
format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32;
format.format.field = V4L2_FIELD_NONE;
ret = v4l2_subdev_call(&bru->entity.subdev, pad,
ret = v4l2_subdev_call(&pipe->bru->subdev, pad,
set_fmt, NULL, &format);
if (ret < 0)
return ret;
dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on BRU pad %u\n",
dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on %s pad %u\n",
__func__, format.format.width, format.format.height,
format.format.code, i);
format.format.code, bru_name, i);
}
format.pad = bru->entity.source_pad;
format.pad = pipe->bru->source_pad;
format.format.width = cfg->width;
format.format.height = cfg->height;
format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32;
format.format.field = V4L2_FIELD_NONE;
ret = v4l2_subdev_call(&bru->entity.subdev, pad, set_fmt, NULL,
ret = v4l2_subdev_call(&pipe->bru->subdev, pad, set_fmt, NULL,
&format);
if (ret < 0)
return ret;
dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on BRU pad %u\n",
dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on %s pad %u\n",
__func__, format.format.width, format.format.height,
format.format.code, i);
format.format.code, bru_name, i);
format.pad = RWPF_PAD_SINK;
ret = v4l2_subdev_call(&vsp1->wpf[0]->entity.subdev, pad, set_fmt, NULL,
ret = v4l2_subdev_call(&pipe->output->entity.subdev, pad, set_fmt, NULL,
&format);
if (ret < 0)
return ret;
dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on WPF0 sink\n",
dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on WPF%u sink\n",
__func__, format.format.width, format.format.height,
format.format.code);
format.format.code, pipe->output->entity.index);
format.pad = RWPF_PAD_SOURCE;
ret = v4l2_subdev_call(&vsp1->wpf[0]->entity.subdev, pad, get_fmt, NULL,
ret = v4l2_subdev_call(&pipe->output->entity.subdev, pad, get_fmt, NULL,
&format);
if (ret < 0)
return ret;
dev_dbg(vsp1->dev, "%s: got format %ux%u (%x) on WPF0 source\n",
dev_dbg(vsp1->dev, "%s: got format %ux%u (%x) on WPF%u source\n",
__func__, format.format.width, format.format.height,
format.format.code);
format.format.code, pipe->output->entity.index);
format.pad = LIF_PAD_SINK;
ret = v4l2_subdev_call(&vsp1->lif->entity.subdev, pad, set_fmt, NULL,
ret = v4l2_subdev_call(&pipe->lif->subdev, pad, set_fmt, NULL,
&format);
if (ret < 0)
return ret;
dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on LIF sink\n",
dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on LIF%u sink\n",
__func__, format.format.width, format.format.height,
format.format.code);
format.format.code, pipe_index);
/*
* Verify that the format at the output of the pipeline matches the
@ -213,8 +235,8 @@ int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg)
* Register a callback to allow us to notify the DRM driver of frame
* completion events.
*/
vsp1->drm->du_complete = cfg->callback;
vsp1->drm->du_private = cfg->callback_data;
drm_pipe->du_complete = cfg->callback;
drm_pipe->du_private = cfg->callback_data;
ret = media_pipeline_start(&pipe->output->entity.subdev.entity,
&pipe->pipe);
@ -224,6 +246,10 @@ int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg)
return ret;
}
/* Disable the display interrupts. */
vsp1_write(vsp1, VI6_DISP_IRQ_STA, 0);
vsp1_write(vsp1, VI6_DISP_IRQ_ENB, 0);
dev_dbg(vsp1->dev, "%s: pipeline enabled\n", __func__);
return 0;
@ -233,19 +259,21 @@ EXPORT_SYMBOL_GPL(vsp1_du_setup_lif);
/**
* vsp1_du_atomic_begin - Prepare for an atomic update
* @dev: the VSP device
* @pipe_index: the DRM pipeline index
*/
void vsp1_du_atomic_begin(struct device *dev)
void vsp1_du_atomic_begin(struct device *dev, unsigned int pipe_index)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
struct vsp1_pipeline *pipe = &vsp1->drm->pipe;
struct vsp1_drm_pipeline *drm_pipe = &vsp1->drm->pipe[pipe_index];
vsp1->drm->num_inputs = pipe->num_inputs;
drm_pipe->enabled = drm_pipe->pipe.num_inputs != 0;
}
EXPORT_SYMBOL_GPL(vsp1_du_atomic_begin);
/**
* vsp1_du_atomic_update - Setup one RPF input of the VSP pipeline
* @dev: the VSP device
* @pipe_index: the DRM pipeline index
* @rpf_index: index of the RPF to setup (0-based)
* @cfg: the RPF configuration
*
@ -272,10 +300,12 @@ EXPORT_SYMBOL_GPL(vsp1_du_atomic_begin);
*
* Return 0 on success or a negative error code on failure.
*/
int vsp1_du_atomic_update(struct device *dev, unsigned int rpf_index,
int vsp1_du_atomic_update(struct device *dev, unsigned int pipe_index,
unsigned int rpf_index,
const struct vsp1_du_atomic_config *cfg)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
struct vsp1_drm_pipeline *drm_pipe = &vsp1->drm->pipe[pipe_index];
const struct vsp1_format_info *fmtinfo;
struct vsp1_rwpf *rpf;
@ -288,7 +318,12 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int rpf_index,
dev_dbg(vsp1->dev, "%s: RPF%u: disable requested\n", __func__,
rpf_index);
vsp1->drm->inputs[rpf_index].enabled = false;
/*
* Remove the RPF from the pipe's inputs. The atomic flush
* handler will disable the input and remove the entity from the
* pipe's entities list.
*/
drm_pipe->pipe.inputs[rpf_index] = NULL;
return 0;
}
@ -324,13 +359,15 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int rpf_index,
vsp1->drm->inputs[rpf_index].crop = cfg->src;
vsp1->drm->inputs[rpf_index].compose = cfg->dst;
vsp1->drm->inputs[rpf_index].zpos = cfg->zpos;
vsp1->drm->inputs[rpf_index].enabled = true;
drm_pipe->pipe.inputs[rpf_index] = rpf;
return 0;
}
EXPORT_SYMBOL_GPL(vsp1_du_atomic_update);
static int vsp1_du_setup_rpf_pipe(struct vsp1_device *vsp1,
struct vsp1_pipeline *pipe,
struct vsp1_rwpf *rpf, unsigned int bru_input)
{
struct v4l2_subdev_selection sel;
@ -404,7 +441,7 @@ static int vsp1_du_setup_rpf_pipe(struct vsp1_device *vsp1,
/* BRU sink, propagate the format from the RPF source. */
format.pad = bru_input;
ret = v4l2_subdev_call(&vsp1->bru->entity.subdev, pad, set_fmt, NULL,
ret = v4l2_subdev_call(&pipe->bru->subdev, pad, set_fmt, NULL,
&format);
if (ret < 0)
return ret;
@ -417,8 +454,8 @@ static int vsp1_du_setup_rpf_pipe(struct vsp1_device *vsp1,
sel.target = V4L2_SEL_TGT_COMPOSE;
sel.r = vsp1->drm->inputs[rpf->entity.index].compose;
ret = v4l2_subdev_call(&vsp1->bru->entity.subdev, pad, set_selection,
NULL, &sel);
ret = v4l2_subdev_call(&pipe->bru->subdev, pad, set_selection, NULL,
&sel);
if (ret < 0)
return ret;
@ -438,18 +475,25 @@ static unsigned int rpf_zpos(struct vsp1_device *vsp1, struct vsp1_rwpf *rpf)
/**
* vsp1_du_atomic_flush - Commit an atomic update
* @dev: the VSP device
* @pipe_index: the DRM pipeline index
*/
void vsp1_du_atomic_flush(struct device *dev)
void vsp1_du_atomic_flush(struct device *dev, unsigned int pipe_index)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
struct vsp1_pipeline *pipe = &vsp1->drm->pipe;
struct vsp1_drm_pipeline *drm_pipe = &vsp1->drm->pipe[pipe_index];
struct vsp1_pipeline *pipe = &drm_pipe->pipe;
struct vsp1_rwpf *inputs[VSP1_MAX_RPF] = { NULL, };
struct vsp1_bru *bru = to_bru(&pipe->bru->subdev);
struct vsp1_entity *entity;
struct vsp1_entity *next;
struct vsp1_dl_list *dl;
const char *bru_name;
unsigned long flags;
unsigned int i;
int ret;
bru_name = pipe->bru->type == VSP1_ENTITY_BRU ? "BRU" : "BRS";
/* Prepare the display list. */
dl = vsp1_dl_list_get(pipe->output->dlm);
@ -460,12 +504,8 @@ void vsp1_du_atomic_flush(struct device *dev)
struct vsp1_rwpf *rpf = vsp1->rpf[i];
unsigned int j;
if (!vsp1->drm->inputs[i].enabled) {
pipe->inputs[i] = NULL;
if (!pipe->inputs[i])
continue;
}
pipe->inputs[i] = rpf;
/* Insert the RPF in the sorted RPFs array. */
for (j = pipe->num_inputs++; j > 0; --j) {
@ -478,22 +518,26 @@ void vsp1_du_atomic_flush(struct device *dev)
}
/* Setup the RPF input pipeline for every enabled input. */
for (i = 0; i < vsp1->info->num_bru_inputs; ++i) {
for (i = 0; i < pipe->bru->source_pad; ++i) {
struct vsp1_rwpf *rpf = inputs[i];
if (!rpf) {
vsp1->bru->inputs[i].rpf = NULL;
bru->inputs[i].rpf = NULL;
continue;
}
vsp1->bru->inputs[i].rpf = rpf;
if (list_empty(&rpf->entity.list_pipe))
list_add_tail(&rpf->entity.list_pipe, &pipe->entities);
bru->inputs[i].rpf = rpf;
rpf->bru_input = i;
rpf->entity.sink = pipe->bru;
rpf->entity.sink_pad = i;
dev_dbg(vsp1->dev, "%s: connecting RPF.%u to BRU:%u\n",
__func__, rpf->entity.index, i);
dev_dbg(vsp1->dev, "%s: connecting RPF.%u to %s:%u\n",
__func__, rpf->entity.index, bru_name, i);
ret = vsp1_du_setup_rpf_pipe(vsp1, rpf, i);
ret = vsp1_du_setup_rpf_pipe(vsp1, pipe, rpf, i);
if (ret < 0)
dev_err(vsp1->dev,
"%s: failed to setup RPF.%u\n",
@ -501,16 +545,16 @@ void vsp1_du_atomic_flush(struct device *dev)
}
/* Configure all entities in the pipeline. */
list_for_each_entry(entity, &pipe->entities, list_pipe) {
list_for_each_entry_safe(entity, next, &pipe->entities, list_pipe) {
/* Disconnect unused RPFs from the pipeline. */
if (entity->type == VSP1_ENTITY_RPF) {
struct vsp1_rwpf *rpf = to_rwpf(&entity->subdev);
if (entity->type == VSP1_ENTITY_RPF &&
!pipe->inputs[entity->index]) {
vsp1_dl_list_write(dl, entity->route->reg,
VI6_DPR_NODE_UNUSED);
if (!pipe->inputs[rpf->entity.index]) {
vsp1_dl_list_write(dl, entity->route->reg,
VI6_DPR_NODE_UNUSED);
continue;
}
list_del_init(&entity->list_pipe);
continue;
}
vsp1_entity_route_setup(entity, pipe, dl);
@ -528,14 +572,11 @@ void vsp1_du_atomic_flush(struct device *dev)
vsp1_dl_list_commit(dl);
/* Start or stop the pipeline if needed. */
if (!vsp1->drm->num_inputs && pipe->num_inputs) {
vsp1_write(vsp1, VI6_DISP_IRQ_STA, 0);
vsp1_write(vsp1, VI6_DISP_IRQ_ENB, VI6_DISP_IRQ_ENB_DSTE);
if (!drm_pipe->enabled && pipe->num_inputs) {
spin_lock_irqsave(&pipe->irqlock, flags);
vsp1_pipeline_run(pipe);
spin_unlock_irqrestore(&pipe->irqlock, flags);
} else if (vsp1->drm->num_inputs && !pipe->num_inputs) {
vsp1_write(vsp1, VI6_DISP_IRQ_ENB, 0);
} else if (drm_pipe->enabled && !pipe->num_inputs) {
vsp1_pipeline_stop(pipe);
}
}
@ -568,83 +609,48 @@ EXPORT_SYMBOL_GPL(vsp1_du_unmap_sg);
* Initialization
*/
int vsp1_drm_create_links(struct vsp1_device *vsp1)
{
const u32 flags = MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE;
unsigned int i;
int ret;
/*
* VSPD instances require a BRU to perform composition and a LIF to
* output to the DU.
*/
if (!vsp1->bru || !vsp1->lif)
return -ENXIO;
for (i = 0; i < vsp1->info->rpf_count; ++i) {
struct vsp1_rwpf *rpf = vsp1->rpf[i];
ret = media_create_pad_link(&rpf->entity.subdev.entity,
RWPF_PAD_SOURCE,
&vsp1->bru->entity.subdev.entity,
i, flags);
if (ret < 0)
return ret;
rpf->entity.sink = &vsp1->bru->entity.subdev.entity;
rpf->entity.sink_pad = i;
}
ret = media_create_pad_link(&vsp1->bru->entity.subdev.entity,
vsp1->bru->entity.source_pad,
&vsp1->wpf[0]->entity.subdev.entity,
RWPF_PAD_SINK, flags);
if (ret < 0)
return ret;
vsp1->bru->entity.sink = &vsp1->wpf[0]->entity.subdev.entity;
vsp1->bru->entity.sink_pad = RWPF_PAD_SINK;
ret = media_create_pad_link(&vsp1->wpf[0]->entity.subdev.entity,
RWPF_PAD_SOURCE,
&vsp1->lif->entity.subdev.entity,
LIF_PAD_SINK, flags);
if (ret < 0)
return ret;
return 0;
}
int vsp1_drm_init(struct vsp1_device *vsp1)
{
struct vsp1_pipeline *pipe;
unsigned int i;
vsp1->drm = devm_kzalloc(vsp1->dev, sizeof(*vsp1->drm), GFP_KERNEL);
if (!vsp1->drm)
return -ENOMEM;
pipe = &vsp1->drm->pipe;
/* Create one DRM pipeline per LIF. */
for (i = 0; i < vsp1->info->lif_count; ++i) {
struct vsp1_drm_pipeline *drm_pipe = &vsp1->drm->pipe[i];
struct vsp1_pipeline *pipe = &drm_pipe->pipe;
vsp1_pipeline_init(pipe);
vsp1_pipeline_init(pipe);
/* The DRM pipeline is static, add entities manually. */
/*
* The DRM pipeline is static, add entities manually. The first
* pipeline uses the BRU and the second pipeline the BRS.
*/
pipe->bru = i == 0 ? &vsp1->bru->entity : &vsp1->brs->entity;
pipe->lif = &vsp1->lif[i]->entity;
pipe->output = vsp1->wpf[i];
pipe->output->pipe = pipe;
pipe->frame_end = vsp1_du_pipeline_frame_end;
pipe->bru->sink = &pipe->output->entity;
pipe->bru->sink_pad = 0;
pipe->output->entity.sink = pipe->lif;
pipe->output->entity.sink_pad = 0;
list_add_tail(&pipe->bru->list_pipe, &pipe->entities);
list_add_tail(&pipe->lif->list_pipe, &pipe->entities);
list_add_tail(&pipe->output->entity.list_pipe, &pipe->entities);
}
/* Disable all RPFs initially. */
for (i = 0; i < vsp1->info->rpf_count; ++i) {
struct vsp1_rwpf *input = vsp1->rpf[i];
list_add_tail(&input->entity.list_pipe, &pipe->entities);
INIT_LIST_HEAD(&input->entity.list_pipe);
}
list_add_tail(&vsp1->bru->entity.list_pipe, &pipe->entities);
list_add_tail(&vsp1->wpf[0]->entity.list_pipe, &pipe->entities);
list_add_tail(&vsp1->lif->entity.list_pipe, &pipe->entities);
pipe->bru = &vsp1->bru->entity;
pipe->lif = &vsp1->lif->entity;
pipe->output = vsp1->wpf[0];
pipe->output->pipe = pipe;
pipe->frame_end = vsp1_du_pipeline_frame_end;
return 0;
}

View File

@ -18,38 +18,44 @@
#include "vsp1_pipe.h"
/**
* vsp1_drm - State for the API exposed to the DRM driver
* vsp1_drm_pipeline - State for the API exposed to the DRM driver
* @pipe: the VSP1 pipeline used for display
* @num_inputs: number of active pipeline inputs at the beginning of an update
* @inputs: source crop rectangle, destination compose rectangle and z-order
* position for every input
* @enabled: pipeline state at the beginning of an update
* @du_complete: frame completion callback for the DU driver (optional)
* @du_private: data to be passed to the du_complete callback
*/
struct vsp1_drm {
struct vsp1_drm_pipeline {
struct vsp1_pipeline pipe;
unsigned int num_inputs;
bool enabled;
/* Frame synchronisation */
void (*du_complete)(void *, bool);
void *du_private;
};
/**
* vsp1_drm - State for the API exposed to the DRM driver
* @pipe: the VSP1 DRM pipeline used for display
* @inputs: source crop rectangle, destination compose rectangle and z-order
* position for every input (indexed by RPF index)
*/
struct vsp1_drm {
struct vsp1_drm_pipeline pipe[VSP1_MAX_LIF];
struct {
bool enabled;
struct v4l2_rect crop;
struct v4l2_rect compose;
unsigned int zpos;
} inputs[VSP1_MAX_RPF];
/* Frame synchronisation */
void (*du_complete)(void *);
void *du_private;
};
static inline struct vsp1_drm *to_vsp1_drm(struct vsp1_pipeline *pipe)
static inline struct vsp1_drm_pipeline *
to_vsp1_drm_pipeline(struct vsp1_pipeline *pipe)
{
return container_of(pipe, struct vsp1_drm, pipe);
return container_of(pipe, struct vsp1_drm_pipeline, pipe);
}
int vsp1_drm_init(struct vsp1_device *vsp1);
void vsp1_drm_cleanup(struct vsp1_device *vsp1);
int vsp1_drm_create_links(struct vsp1_device *vsp1);
void vsp1_drm_display_start(struct vsp1_device *vsp1);
#endif /* __VSP1_DRM_H__ */

View File

@ -68,14 +68,6 @@ static irqreturn_t vsp1_irq_handler(int irq, void *data)
}
}
status = vsp1_read(vsp1, VI6_DISP_IRQ_STA);
vsp1_write(vsp1, VI6_DISP_IRQ_STA, ~status & VI6_DISP_IRQ_STA_DST);
if (status & VI6_DISP_IRQ_STA_DST) {
vsp1_drm_display_start(vsp1);
ret = IRQ_HANDLED;
}
return ret;
}
@ -92,6 +84,10 @@ static irqreturn_t vsp1_irq_handler(int irq, void *data)
*
* - from a UDS to a UDS (UDS entities can't be chained)
* - from an entity to itself (no loops are allowed)
*
* Furthermore, the BRS can't be connected to histogram generators, but no
* special check is currently needed as all VSP instances that include a BRS
* have no histogram generator.
*/
static int vsp1_create_sink_links(struct vsp1_device *vsp1,
struct vsp1_entity *sink)
@ -129,7 +125,7 @@ static int vsp1_create_sink_links(struct vsp1_device *vsp1,
return ret;
if (flags & MEDIA_LNK_FL_ENABLED)
source->sink = entity;
source->sink = sink;
}
}
@ -172,10 +168,13 @@ static int vsp1_uapi_create_links(struct vsp1_device *vsp1)
return ret;
}
if (vsp1->lif) {
ret = media_create_pad_link(&vsp1->wpf[0]->entity.subdev.entity,
for (i = 0; i < vsp1->info->lif_count; ++i) {
if (!vsp1->lif[i])
continue;
ret = media_create_pad_link(&vsp1->wpf[i]->entity.subdev.entity,
RWPF_PAD_SOURCE,
&vsp1->lif->entity.subdev.entity,
&vsp1->lif[i]->entity.subdev.entity,
LIF_PAD_SINK, 0);
if (ret < 0)
return ret;
@ -269,8 +268,18 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
}
/* Instantiate all the entities. */
if (vsp1->info->features & VSP1_HAS_BRS) {
vsp1->brs = vsp1_bru_create(vsp1, VSP1_ENTITY_BRS);
if (IS_ERR(vsp1->brs)) {
ret = PTR_ERR(vsp1->brs);
goto done;
}
list_add_tail(&vsp1->brs->entity.list_dev, &vsp1->entities);
}
if (vsp1->info->features & VSP1_HAS_BRU) {
vsp1->bru = vsp1_bru_create(vsp1);
vsp1->bru = vsp1_bru_create(vsp1, VSP1_ENTITY_BRU);
if (IS_ERR(vsp1->bru)) {
ret = PTR_ERR(vsp1->bru);
goto done;
@ -328,18 +337,23 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
}
/*
* The LIF is only supported when used in conjunction with the DU, in
* The LIFs are only supported when used in conjunction with the DU, in
* which case the userspace API is disabled. If the userspace API is
* enabled skip the LIF, even when present.
* enabled skip the LIFs, even when present.
*/
if (vsp1->info->features & VSP1_HAS_LIF && !vsp1->info->uapi) {
vsp1->lif = vsp1_lif_create(vsp1);
if (IS_ERR(vsp1->lif)) {
ret = PTR_ERR(vsp1->lif);
goto done;
}
if (!vsp1->info->uapi) {
for (i = 0; i < vsp1->info->lif_count; ++i) {
struct vsp1_lif *lif;
list_add_tail(&vsp1->lif->entity.list_dev, &vsp1->entities);
lif = vsp1_lif_create(vsp1, i);
if (IS_ERR(lif)) {
ret = PTR_ERR(lif);
goto done;
}
vsp1->lif[i] = lif;
list_add_tail(&lif->entity.list_dev, &vsp1->entities);
}
}
if (vsp1->info->features & VSP1_HAS_LUT) {
@ -420,7 +434,6 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
}
list_add_tail(&video->list, &vsp1->videos);
wpf->entity.sink = &video->video.entity;
}
}
@ -432,19 +445,15 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
goto done;
}
/* Create links. */
if (vsp1->info->uapi)
ret = vsp1_uapi_create_links(vsp1);
else
ret = vsp1_drm_create_links(vsp1);
if (ret < 0)
goto done;
/*
* Register subdev nodes if the userspace API is enabled or initialize
* the DRM pipeline otherwise.
* Create links and register subdev nodes if the userspace API is
* enabled or initialize the DRM pipeline otherwise.
*/
if (vsp1->info->uapi) {
ret = vsp1_uapi_create_links(vsp1);
if (ret < 0)
goto done;
ret = v4l2_device_register_subdev_nodes(&vsp1->v4l2_dev);
if (ret < 0)
goto done;
@ -515,6 +524,9 @@ static int vsp1_device_init(struct vsp1_device *vsp1)
vsp1_write(vsp1, VI6_DPR_HSI_ROUTE, VI6_DPR_NODE_UNUSED);
vsp1_write(vsp1, VI6_DPR_BRU_ROUTE, VI6_DPR_NODE_UNUSED);
if (vsp1->info->features & VSP1_HAS_BRS)
vsp1_write(vsp1, VI6_DPR_ILV_BRS_ROUTE, VI6_DPR_NODE_UNUSED);
vsp1_write(vsp1, VI6_DPR_HGO_SMPPT, (7 << VI6_DPR_SMPPT_TGW_SHIFT) |
(VI6_DPR_NODE_UNUSED << VI6_DPR_SMPPT_PT_SHIFT));
vsp1_write(vsp1, VI6_DPR_HGT_SMPPT, (7 << VI6_DPR_SMPPT_TGW_SHIFT) |
@ -634,8 +646,8 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.version = VI6_IP_VERSION_MODEL_VSPD_GEN2,
.model = "VSP1-D",
.gen = 2,
.features = VSP1_HAS_BRU | VSP1_HAS_HGO | VSP1_HAS_LIF
| VSP1_HAS_LUT,
.features = VSP1_HAS_BRU | VSP1_HAS_HGO | VSP1_HAS_LUT,
.lif_count = 1,
.rpf_count = 4,
.uds_count = 1,
.wpf_count = 1,
@ -668,8 +680,8 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.version = VI6_IP_VERSION_MODEL_VSPD_V2H,
.model = "VSP1V-D",
.gen = 2,
.features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT
| VSP1_HAS_LIF,
.features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT,
.lif_count = 1,
.rpf_count = 4,
.uds_count = 1,
.wpf_count = 1,
@ -705,11 +717,38 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.wpf_count = 1,
.num_bru_inputs = 5,
.uapi = true,
}, {
.version = VI6_IP_VERSION_MODEL_VSPBS_GEN3,
.model = "VSP2-BS",
.gen = 3,
.features = VSP1_HAS_BRS | VSP1_HAS_WPF_VFLIP,
.rpf_count = 2,
.wpf_count = 1,
.uapi = true,
}, {
.version = VI6_IP_VERSION_MODEL_VSPD_GEN3,
.model = "VSP2-D",
.gen = 3,
.features = VSP1_HAS_BRU | VSP1_HAS_LIF | VSP1_HAS_WPF_VFLIP,
.features = VSP1_HAS_BRU | VSP1_HAS_WPF_VFLIP,
.lif_count = 1,
.rpf_count = 5,
.wpf_count = 2,
.num_bru_inputs = 5,
}, {
.version = VI6_IP_VERSION_MODEL_VSPD_V3,
.model = "VSP2-D",
.gen = 3,
.features = VSP1_HAS_BRS | VSP1_HAS_BRU,
.lif_count = 1,
.rpf_count = 5,
.wpf_count = 1,
.num_bru_inputs = 5,
}, {
.version = VI6_IP_VERSION_MODEL_VSPDL_GEN3,
.model = "VSP2-DL",
.gen = 3,
.features = VSP1_HAS_BRS | VSP1_HAS_BRU,
.lif_count = 2,
.rpf_count = 5,
.wpf_count = 2,
.num_bru_inputs = 5,

View File

@ -24,18 +24,12 @@
#include "vsp1_pipe.h"
#include "vsp1_rwpf.h"
static inline struct vsp1_entity *
media_entity_to_vsp1_entity(struct media_entity *entity)
{
return container_of(entity, struct vsp1_entity, subdev.entity);
}
void vsp1_entity_route_setup(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
struct vsp1_dl_list *dl)
{
struct vsp1_entity *source;
struct vsp1_entity *sink;
u32 route;
if (entity->type == VSP1_ENTITY_HGO) {
u32 smppt;
@ -44,7 +38,7 @@ void vsp1_entity_route_setup(struct vsp1_entity *entity,
* The HGO is a special case, its routing is configured on the
* sink pad.
*/
source = media_entity_to_vsp1_entity(entity->sources[0]);
source = entity->sources[0];
smppt = (pipe->output->entity.index << VI6_DPR_SMPPT_TGW_SHIFT)
| (source->route->output << VI6_DPR_SMPPT_PT_SHIFT);
@ -57,7 +51,7 @@ void vsp1_entity_route_setup(struct vsp1_entity *entity,
* The HGT is a special case, its routing is configured on the
* sink pad.
*/
source = media_entity_to_vsp1_entity(entity->sources[0]);
source = entity->sources[0];
smppt = (pipe->output->entity.index << VI6_DPR_SMPPT_TGW_SHIFT)
| (source->route->output << VI6_DPR_SMPPT_PT_SHIFT);
@ -69,9 +63,14 @@ void vsp1_entity_route_setup(struct vsp1_entity *entity,
if (source->route->reg == 0)
return;
sink = media_entity_to_vsp1_entity(source->sink);
vsp1_dl_list_write(dl, source->route->reg,
sink->route->inputs[source->sink_pad]);
route = source->sink->route->inputs[source->sink_pad];
/*
* The ILV and BRS share the same data path route. The extra BRSSEL bit
* selects between the ILV and BRS.
*/
if (source->type == VSP1_ENTITY_BRS)
route |= VI6_DPR_ROUTE_BRSSEL;
vsp1_dl_list_write(dl, source->route->reg, route);
}
/* -----------------------------------------------------------------------------
@ -316,6 +315,12 @@ done:
* Media Operations
*/
static inline struct vsp1_entity *
media_entity_to_vsp1_entity(struct media_entity *entity)
{
return container_of(entity, struct vsp1_entity, subdev.entity);
}
static int vsp1_entity_link_setup_source(const struct media_pad *source_pad,
const struct media_pad *sink_pad,
u32 flags)
@ -339,7 +344,7 @@ static int vsp1_entity_link_setup_source(const struct media_pad *source_pad,
sink->type != VSP1_ENTITY_HGT) {
if (source->sink)
return -EBUSY;
source->sink = sink_pad->entity;
source->sink = sink;
source->sink_pad = sink_pad->index;
}
} else {
@ -355,15 +360,17 @@ static int vsp1_entity_link_setup_sink(const struct media_pad *source_pad,
u32 flags)
{
struct vsp1_entity *sink;
struct vsp1_entity *source;
sink = media_entity_to_vsp1_entity(sink_pad->entity);
source = media_entity_to_vsp1_entity(source_pad->entity);
if (flags & MEDIA_LNK_FL_ENABLED) {
/* Fan-in is limited to one. */
if (sink->sources[sink_pad->index])
return -EBUSY;
sink->sources[sink_pad->index] = source_pad->entity;
sink->sources[sink_pad->index] = source;
} else {
sink->sources[sink_pad->index] = NULL;
}
@ -450,6 +457,8 @@ struct media_pad *vsp1_entity_remote_pad(struct media_pad *pad)
{ VI6_DPR_NODE_WPF(idx) }, VI6_DPR_NODE_WPF(idx) }
static const struct vsp1_route vsp1_routes[] = {
{ VSP1_ENTITY_BRS, 0, VI6_DPR_ILV_BRS_ROUTE,
{ VI6_DPR_NODE_BRS_IN(0), VI6_DPR_NODE_BRS_IN(1) }, 0 },
{ VSP1_ENTITY_BRU, 0, VI6_DPR_BRU_ROUTE,
{ VI6_DPR_NODE_BRU_IN(0), VI6_DPR_NODE_BRU_IN(1),
VI6_DPR_NODE_BRU_IN(2), VI6_DPR_NODE_BRU_IN(3),
@ -459,7 +468,8 @@ static const struct vsp1_route vsp1_routes[] = {
{ VSP1_ENTITY_HGT, 0, 0, { 0, }, 0 },
VSP1_ENTITY_ROUTE(HSI),
VSP1_ENTITY_ROUTE(HST),
{ VSP1_ENTITY_LIF, 0, 0, { VI6_DPR_NODE_LIF, }, VI6_DPR_NODE_LIF },
{ VSP1_ENTITY_LIF, 0, 0, { 0, }, 0 },
{ VSP1_ENTITY_LIF, 1, 0, { 0, }, 0 },
VSP1_ENTITY_ROUTE(LUT),
VSP1_ENTITY_ROUTE_RPF(0),
VSP1_ENTITY_ROUTE_RPF(1),

View File

@ -23,6 +23,7 @@ struct vsp1_dl_list;
struct vsp1_pipeline;
enum vsp1_entity_type {
VSP1_ENTITY_BRS,
VSP1_ENTITY_BRU,
VSP1_ENTITY_CLU,
VSP1_ENTITY_HGO,
@ -104,8 +105,8 @@ struct vsp1_entity {
struct media_pad *pads;
unsigned int source_pad;
struct media_entity **sources;
struct media_entity *sink;
struct vsp1_entity **sources;
struct vsp1_entity *sink;
unsigned int sink_pad;
struct v4l2_subdev subdev;

View File

@ -30,7 +30,7 @@
static inline void vsp1_lif_write(struct vsp1_lif *lif, struct vsp1_dl_list *dl,
u32 reg, u32 data)
{
vsp1_dl_list_write(dl, reg, data);
vsp1_dl_list_write(dl, reg + lif->entity.index * VI6_LIF_OFFSET, data);
}
/* -----------------------------------------------------------------------------
@ -165,7 +165,7 @@ static const struct vsp1_entity_operations lif_entity_ops = {
* Initialization and Cleanup
*/
struct vsp1_lif *vsp1_lif_create(struct vsp1_device *vsp1)
struct vsp1_lif *vsp1_lif_create(struct vsp1_device *vsp1, unsigned int index)
{
struct vsp1_lif *lif;
int ret;
@ -176,6 +176,7 @@ struct vsp1_lif *vsp1_lif_create(struct vsp1_device *vsp1)
lif->entity.ops = &lif_entity_ops;
lif->entity.type = VSP1_ENTITY_LIF;
lif->entity.index = index;
/*
* The LIF is never exposed to userspace, but media entity registration

View File

@ -32,6 +32,6 @@ static inline struct vsp1_lif *to_lif(struct v4l2_subdev *subdev)
return container_of(subdev, struct vsp1_lif, entity.subdev);
}
struct vsp1_lif *vsp1_lif_create(struct vsp1_device *vsp1);
struct vsp1_lif *vsp1_lif_create(struct vsp1_device *vsp1, unsigned int index);
#endif /* __VSP1_LIF_H__ */

View File

@ -335,16 +335,12 @@ void vsp1_pipeline_frame_end(struct vsp1_pipeline *pipe)
if (pipe == NULL)
return;
/*
* If the DL commit raced with the frame end interrupt, the commit ends
* up being postponed by one frame. @completed represents whether the
* active frame was finished or postponed.
*/
completed = vsp1_dlm_irq_frame_end(pipe->output->dlm);
if (!completed) {
/*
* If the DL commit raced with the frame end interrupt, the
* commit ends up being postponed by one frame. Return
* immediately without calling the pipeline's frame end handler
* or incrementing the sequence number.
*/
return;
}
if (pipe->hgo)
vsp1_hgo_frame_end(pipe->hgo);
@ -352,8 +348,12 @@ void vsp1_pipeline_frame_end(struct vsp1_pipeline *pipe)
if (pipe->hgt)
vsp1_hgt_frame_end(pipe->hgt);
/*
* Regardless of frame completion we still need to notify the pipe
* frame_end to account for vblank events.
*/
if (pipe->frame_end)
pipe->frame_end(pipe);
pipe->frame_end(pipe, completed);
pipe->sequence++;
}
@ -373,10 +373,11 @@ void vsp1_pipeline_propagate_alpha(struct vsp1_pipeline *pipe,
return;
/*
* The BRU background color has a fixed alpha value set to 255, the
* output alpha value is thus always equal to 255.
* The BRU and BRS background color has a fixed alpha value set to 255,
* the output alpha value is thus always equal to 255.
*/
if (pipe->uds_input->type == VSP1_ENTITY_BRU)
if (pipe->uds_input->type == VSP1_ENTITY_BRU ||
pipe->uds_input->type == VSP1_ENTITY_BRS)
alpha = 255;
vsp1_uds_set_alpha(pipe->uds, dl, alpha);

View File

@ -91,7 +91,7 @@ struct vsp1_pipeline {
enum vsp1_pipeline_state state;
wait_queue_head_t wq;
void (*frame_end)(struct vsp1_pipeline *pipe);
void (*frame_end)(struct vsp1_pipeline *pipe, bool completed);
struct mutex lock;
struct kref kref;

View File

@ -18,6 +18,7 @@
*/
#define VI6_CMD(n) (0x0000 + (n) * 4)
#define VI6_CMD_UPDHDR (1 << 4)
#define VI6_CMD_STRCMD (1 << 0)
#define VI6_CLK_DCSWT 0x0018
@ -238,6 +239,10 @@
#define VI6_WPF_SRCRPF_VIRACT_SUB (1 << 28)
#define VI6_WPF_SRCRPF_VIRACT_MST (2 << 28)
#define VI6_WPF_SRCRPF_VIRACT_MASK (3 << 28)
#define VI6_WPF_SRCRPF_VIRACT2_DIS (0 << 24)
#define VI6_WPF_SRCRPF_VIRACT2_SUB (1 << 24)
#define VI6_WPF_SRCRPF_VIRACT2_MST (2 << 24)
#define VI6_WPF_SRCRPF_VIRACT2_MASK (3 << 24)
#define VI6_WPF_SRCRPF_RPF_ACT_DIS(n) (0 << ((n) * 2))
#define VI6_WPF_SRCRPF_RPF_ACT_SUB(n) (1 << ((n) * 2))
#define VI6_WPF_SRCRPF_RPF_ACT_MST(n) (2 << ((n) * 2))
@ -321,6 +326,8 @@
#define VI6_DPR_HST_ROUTE 0x2044
#define VI6_DPR_HSI_ROUTE 0x2048
#define VI6_DPR_BRU_ROUTE 0x204c
#define VI6_DPR_ILV_BRS_ROUTE 0x2050
#define VI6_DPR_ROUTE_BRSSEL (1 << 28)
#define VI6_DPR_ROUTE_FXA_MASK (0xff << 16)
#define VI6_DPR_ROUTE_FXA_SHIFT 16
#define VI6_DPR_ROUTE_FP_MASK (0x3f << 8)
@ -344,7 +351,8 @@
#define VI6_DPR_NODE_CLU 29
#define VI6_DPR_NODE_HST 30
#define VI6_DPR_NODE_HSI 31
#define VI6_DPR_NODE_LIF 55
#define VI6_DPR_NODE_BRS_IN(n) (38 + (n))
#define VI6_DPR_NODE_LIF 55 /* Gen2 only */
#define VI6_DPR_NODE_WPF(n) (56 + (n))
#define VI6_DPR_NODE_UNUSED 63
@ -476,7 +484,7 @@
#define VI6_HSI_CTRL_EN (1 << 0)
/* -----------------------------------------------------------------------------
* BRU Control Registers
* BRS and BRU Control Registers
*/
#define VI6_ROP_NOP 0
@ -496,7 +504,10 @@
#define VI6_ROP_NAND 14
#define VI6_ROP_SET 15
#define VI6_BRU_INCTRL 0x2c00
#define VI6_BRU_BASE 0x2c00
#define VI6_BRS_BASE 0x3900
#define VI6_BRU_INCTRL 0x0000
#define VI6_BRU_INCTRL_NRM (1 << 28)
#define VI6_BRU_INCTRL_DnON (1 << (16 + (n)))
#define VI6_BRU_INCTRL_DITHn_OFF (0 << ((n) * 4))
@ -508,19 +519,19 @@
#define VI6_BRU_INCTRL_DITHn_MASK (7 << ((n) * 4))
#define VI6_BRU_INCTRL_DITHn_SHIFT ((n) * 4)
#define VI6_BRU_VIRRPF_SIZE 0x2c04
#define VI6_BRU_VIRRPF_SIZE 0x0004
#define VI6_BRU_VIRRPF_SIZE_HSIZE_MASK (0x1fff << 16)
#define VI6_BRU_VIRRPF_SIZE_HSIZE_SHIFT 16
#define VI6_BRU_VIRRPF_SIZE_VSIZE_MASK (0x1fff << 0)
#define VI6_BRU_VIRRPF_SIZE_VSIZE_SHIFT 0
#define VI6_BRU_VIRRPF_LOC 0x2c08
#define VI6_BRU_VIRRPF_LOC 0x0008
#define VI6_BRU_VIRRPF_LOC_HCOORD_MASK (0x1fff << 16)
#define VI6_BRU_VIRRPF_LOC_HCOORD_SHIFT 16
#define VI6_BRU_VIRRPF_LOC_VCOORD_MASK (0x1fff << 0)
#define VI6_BRU_VIRRPF_LOC_VCOORD_SHIFT 0
#define VI6_BRU_VIRRPF_COL 0x2c0c
#define VI6_BRU_VIRRPF_COL 0x000c
#define VI6_BRU_VIRRPF_COL_A_MASK (0xff << 24)
#define VI6_BRU_VIRRPF_COL_A_SHIFT 24
#define VI6_BRU_VIRRPF_COL_RCR_MASK (0xff << 16)
@ -530,7 +541,7 @@
#define VI6_BRU_VIRRPF_COL_BCB_MASK (0xff << 0)
#define VI6_BRU_VIRRPF_COL_BCB_SHIFT 0
#define VI6_BRU_CTRL(n) (0x2c10 + (n) * 8 + ((n) <= 3 ? 0 : 4))
#define VI6_BRU_CTRL(n) (0x0010 + (n) * 8 + ((n) <= 3 ? 0 : 4))
#define VI6_BRU_CTRL_RBC (1 << 31)
#define VI6_BRU_CTRL_DSTSEL_BRUIN(n) (((n) <= 3 ? (n) : (n)+1) << 20)
#define VI6_BRU_CTRL_DSTSEL_VRPF (4 << 20)
@ -543,7 +554,7 @@
#define VI6_BRU_CTRL_AROP(rop) ((rop) << 0)
#define VI6_BRU_CTRL_AROP_MASK (0xf << 0)
#define VI6_BRU_BLD(n) (0x2c14 + (n) * 8 + ((n) <= 3 ? 0 : 4))
#define VI6_BRU_BLD(n) (0x0014 + (n) * 8 + ((n) <= 3 ? 0 : 4))
#define VI6_BRU_BLD_CBES (1 << 31)
#define VI6_BRU_BLD_CCMDX_DST_A (0 << 28)
#define VI6_BRU_BLD_CCMDX_255_DST_A (1 << 28)
@ -576,7 +587,7 @@
#define VI6_BRU_BLD_COEFY_MASK (0xff << 0)
#define VI6_BRU_BLD_COEFY_SHIFT 0
#define VI6_BRU_ROP 0x2c30
#define VI6_BRU_ROP 0x0030 /* Only available on BRU */
#define VI6_BRU_ROP_DSTSEL_BRUIN(n) (((n) <= 3 ? (n) : (n)+1) << 20)
#define VI6_BRU_ROP_DSTSEL_VRPF (4 << 20)
#define VI6_BRU_ROP_DSTSEL_MASK (7 << 20)
@ -653,6 +664,8 @@
* LIF Control Registers
*/
#define VI6_LIF_OFFSET (-0x100)
#define VI6_LIF_CTRL 0x3b00
#define VI6_LIF_CTRL_OBTH_MASK (0x7ff << 16)
#define VI6_LIF_CTRL_OBTH_SHIFT 16
@ -689,9 +702,20 @@
#define VI6_IP_VERSION_MODEL_VSPBD_GEN3 (0x15 << 8)
#define VI6_IP_VERSION_MODEL_VSPBC_GEN3 (0x16 << 8)
#define VI6_IP_VERSION_MODEL_VSPD_GEN3 (0x17 << 8)
#define VI6_IP_VERSION_MODEL_VSPD_V3 (0x18 << 8)
#define VI6_IP_VERSION_MODEL_VSPDL_GEN3 (0x19 << 8)
#define VI6_IP_VERSION_MODEL_VSPBS_GEN3 (0x1a << 8)
#define VI6_IP_VERSION_SOC_MASK (0xff << 0)
#define VI6_IP_VERSION_SOC_H (0x01 << 0)
#define VI6_IP_VERSION_SOC_M (0x02 << 0)
#define VI6_IP_VERSION_SOC_H2 (0x01 << 0)
#define VI6_IP_VERSION_SOC_V2H (0x01 << 0)
#define VI6_IP_VERSION_SOC_V3M (0x01 << 0)
#define VI6_IP_VERSION_SOC_M2 (0x02 << 0)
#define VI6_IP_VERSION_SOC_M3W (0x02 << 0)
#define VI6_IP_VERSION_SOC_V3H (0x02 << 0)
#define VI6_IP_VERSION_SOC_H3 (0x03 << 0)
#define VI6_IP_VERSION_SOC_D3 (0x04 << 0)
#define VI6_IP_VERSION_SOC_M3N (0x04 << 0)
#define VI6_IP_VERSION_SOC_E3 (0x04 << 0)
/* -----------------------------------------------------------------------------
* RPF CLUT Registers

View File

@ -440,13 +440,17 @@ static void vsp1_video_pipeline_run(struct vsp1_pipeline *pipe)
vsp1_pipeline_run(pipe);
}
static void vsp1_video_pipeline_frame_end(struct vsp1_pipeline *pipe)
static void vsp1_video_pipeline_frame_end(struct vsp1_pipeline *pipe,
bool completed)
{
struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
enum vsp1_pipeline_state state;
unsigned long flags;
unsigned int i;
/* M2M Pipelines should never call here with an incomplete frame. */
WARN_ON_ONCE(!completed);
spin_lock_irqsave(&pipe->irqlock, flags);
/* Complete buffers on all video nodes. */
@ -481,7 +485,7 @@ static int vsp1_video_pipeline_build_branch(struct vsp1_pipeline *pipe,
struct media_entity_enum ent_enum;
struct vsp1_entity *entity;
struct media_pad *pad;
bool bru_found = false;
struct vsp1_bru *bru = NULL;
int ret;
ret = media_entity_enum_init(&ent_enum, &input->entity.vsp1->media_dev);
@ -511,16 +515,20 @@ static int vsp1_video_pipeline_build_branch(struct vsp1_pipeline *pipe,
media_entity_to_v4l2_subdev(pad->entity));
/*
* A BRU is present in the pipeline, store the BRU input pad
* A BRU or BRS is present in the pipeline, store its input pad
* number in the input RPF for use when configuring the RPF.
*/
if (entity->type == VSP1_ENTITY_BRU) {
struct vsp1_bru *bru = to_bru(&entity->subdev);
if (entity->type == VSP1_ENTITY_BRU ||
entity->type == VSP1_ENTITY_BRS) {
/* BRU and BRS can't be chained. */
if (bru) {
ret = -EPIPE;
goto out;
}
bru = to_bru(&entity->subdev);
bru->inputs[pad->index].rpf = input;
input->bru_input = pad->index;
bru_found = true;
}
/* We've reached the WPF, we're done. */
@ -542,8 +550,7 @@ static int vsp1_video_pipeline_build_branch(struct vsp1_pipeline *pipe,
}
pipe->uds = entity;
pipe->uds_input = bru_found ? pipe->bru
: &input->entity;
pipe->uds_input = bru ? &bru->entity : &input->entity;
}
/* Follow the source link, ignoring any HGO or HGT. */
@ -589,30 +596,42 @@ static int vsp1_video_pipeline_build(struct vsp1_pipeline *pipe,
e = to_vsp1_entity(subdev);
list_add_tail(&e->list_pipe, &pipe->entities);
if (e->type == VSP1_ENTITY_RPF) {
switch (e->type) {
case VSP1_ENTITY_RPF:
rwpf = to_rwpf(subdev);
pipe->inputs[rwpf->entity.index] = rwpf;
rwpf->video->pipe_index = ++pipe->num_inputs;
rwpf->pipe = pipe;
} else if (e->type == VSP1_ENTITY_WPF) {
break;
case VSP1_ENTITY_WPF:
rwpf = to_rwpf(subdev);
pipe->output = rwpf;
rwpf->video->pipe_index = 0;
rwpf->pipe = pipe;
} else if (e->type == VSP1_ENTITY_LIF) {
break;
case VSP1_ENTITY_LIF:
pipe->lif = e;
} else if (e->type == VSP1_ENTITY_BRU) {
break;
case VSP1_ENTITY_BRU:
case VSP1_ENTITY_BRS:
pipe->bru = e;
} else if (e->type == VSP1_ENTITY_HGO) {
struct vsp1_hgo *hgo = to_hgo(subdev);
break;
case VSP1_ENTITY_HGO:
pipe->hgo = e;
hgo->histo.pipe = pipe;
} else if (e->type == VSP1_ENTITY_HGT) {
struct vsp1_hgt *hgt = to_hgt(subdev);
to_hgo(subdev)->histo.pipe = pipe;
break;
case VSP1_ENTITY_HGT:
pipe->hgt = e;
hgt->histo.pipe = pipe;
to_hgt(subdev)->histo.pipe = pipe;
break;
default:
break;
}
}
@ -796,12 +815,14 @@ static int vsp1_video_setup_pipeline(struct vsp1_pipeline *pipe)
struct vsp1_uds *uds = to_uds(&pipe->uds->subdev);
/*
* If a BRU is present in the pipeline before the UDS, the alpha
* component doesn't need to be scaled as the BRU output alpha
* value is fixed to 255. Otherwise we need to scale the alpha
* component only when available at the input RPF.
* If a BRU or BRS is present in the pipeline before the UDS,
* the alpha component doesn't need to be scaled as the BRU and
* BRS output alpha value is fixed to 255. Otherwise we need to
* scale the alpha component only when available at the input
* RPF.
*/
if (pipe->uds_input->type == VSP1_ENTITY_BRU) {
if (pipe->uds_input->type == VSP1_ENTITY_BRU ||
pipe->uds_input->type == VSP1_ENTITY_BRS) {
uds->scale_alpha = false;
} else {
struct vsp1_rwpf *rpf =

View File

@ -453,7 +453,9 @@ static void wpf_configure(struct vsp1_entity *entity,
}
if (pipe->bru || pipe->num_inputs > 1)
srcrpf |= VI6_WPF_SRCRPF_VIRACT_MST;
srcrpf |= pipe->bru->type == VSP1_ENTITY_BRU
? VI6_WPF_SRCRPF_VIRACT_MST
: VI6_WPF_SRCRPF_VIRACT2_MST;
vsp1_wpf_write(wpf, dl, VI6_WPF_SRCRPF, srcrpf);

View File

@ -34,11 +34,12 @@ struct vsp1_du_lif_config {
unsigned int width;
unsigned int height;
void (*callback)(void *);
void (*callback)(void *, bool);
void *callback_data;
};
int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg);
int vsp1_du_setup_lif(struct device *dev, unsigned int pipe_index,
const struct vsp1_du_lif_config *cfg);
struct vsp1_du_atomic_config {
u32 pixelformat;
@ -50,10 +51,11 @@ struct vsp1_du_atomic_config {
unsigned int zpos;
};
void vsp1_du_atomic_begin(struct device *dev);
int vsp1_du_atomic_update(struct device *dev, unsigned int rpf,
void vsp1_du_atomic_begin(struct device *dev, unsigned int pipe_index);
int vsp1_du_atomic_update(struct device *dev, unsigned int pipe_index,
unsigned int rpf,
const struct vsp1_du_atomic_config *cfg);
void vsp1_du_atomic_flush(struct device *dev);
void vsp1_du_atomic_flush(struct device *dev, unsigned int pipe_index);
int vsp1_du_map_sg(struct device *dev, struct sg_table *sgt);
void vsp1_du_unmap_sg(struct device *dev, struct sg_table *sgt);