Merge tag 'drm-msm-next-2019-06-25' of https://gitlab.freedesktop.org/drm/msm into drm-next

+ usual progress on cleanups
+ dsi vs EPROBE_DEFER fixes
+ msm8998 (snapdragon 835 support)
  + a540 gpu support (mesa support already landed)
  + dsi, dsi-phy support
+ mdp5 and dpu interconnect (bus/memory scaling) support
+ initial prep work for per-context pagetables (at least the parts that
  don't have external dependencies like iommu/arm-smmu)

There is one more patch for fixing DSI cmd mode panels (part of a set of
patches to get things working on nexus5), but it would be conflicty with
1cff7440a8 in drm-next without rebasing or back-merge,
and since it doesn't conflict with anything in msm-next, I think it best
if Sean merges that through drm-mix-fixes instead.

(In other news, I've been making some progress w/ getting efifb working
properly on sdm850 laptop without horrible hacks, and drm/msm + clk stuff
not totally falling over when bootloader enables display and things are
already running when driver probes.. but not quite ready yet, hopefully
we can post some of that for 5.4.. should help for both the sdm835 and
sdm850 laptops.)

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGsj3N4XzDLSDoa+4RHZ9wXObYmhcep0M3LjnRg48BeLvg@mail.gmail.com
This commit is contained in:
Dave Airlie 2019-06-28 09:34:25 +10:00
commit 53e155f2bb
50 changed files with 717 additions and 540 deletions

View File

@ -28,6 +28,11 @@ Required properties:
- #address-cells: number of address cells for the MDSS children. Should be 1.
- #size-cells: Should be 1.
- ranges: parent bus address space is the same as the child bus address space.
- interconnects : interconnect path specifier for MDSS according to
Documentation/devicetree/bindings/interconnect/interconnect.txt. Should be
2 paths corresponding to 2 AXI ports.
- interconnect-names : MDSS will have 2 port names to differentiate between the
2 interconnect paths defined with interconnect specifier.
Optional properties:
- assigned-clocks: list of clock specifiers for clocks needing rate assignment
@ -86,6 +91,11 @@ Example:
interrupt-controller;
#interrupt-cells = <1>;
interconnects = <&rsc_hlos MASTER_MDP0 &rsc_hlos SLAVE_EBI1>,
<&rsc_hlos MASTER_MDP1 &rsc_hlos SLAVE_EBI1>;
interconnect-names = "mdp0-mem", "mdp1-mem";
iommus = <&apps_iommu 0>;
#address-cells = <2>;

View File

@ -88,6 +88,7 @@ Required properties:
* "qcom,dsi-phy-28nm-8960"
* "qcom,dsi-phy-14nm"
* "qcom,dsi-phy-10nm"
* "qcom,dsi-phy-10nm-8998"
- reg: Physical base address and length of the registers of PLL, PHY. Some
revisions require the PHY regulator base address, whereas others require the
PHY lane base address. See below for each PHY revision.

View File

@ -395,19 +395,17 @@ static const unsigned int a3xx_registers[] = {
0x2200, 0x2212, 0x2214, 0x2217, 0x221a, 0x221a, 0x2240, 0x227e,
0x2280, 0x228b, 0x22c0, 0x22c0, 0x22c4, 0x22ce, 0x22d0, 0x22d8,
0x22df, 0x22e6, 0x22e8, 0x22e9, 0x22ec, 0x22ec, 0x22f0, 0x22f7,
0x22ff, 0x22ff, 0x2340, 0x2343, 0x2348, 0x2349, 0x2350, 0x2356,
0x2360, 0x2360, 0x2440, 0x2440, 0x2444, 0x2444, 0x2448, 0x244d,
0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470, 0x2472, 0x2472,
0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3, 0x24e4, 0x24ef,
0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e, 0x2510, 0x2511,
0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea, 0x25ec, 0x25ed,
0x25f0, 0x25f0, 0x2600, 0x2612, 0x2614, 0x2617, 0x261a, 0x261a,
0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0, 0x26c4, 0x26ce,
0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9, 0x26ec, 0x26ec,
0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743, 0x2748, 0x2749,
0x2750, 0x2756, 0x2760, 0x2760, 0x300c, 0x300e, 0x301c, 0x301d,
0x302a, 0x302a, 0x302c, 0x302d, 0x3030, 0x3031, 0x3034, 0x3036,
0x303c, 0x303c, 0x305e, 0x305f,
0x22ff, 0x22ff, 0x2340, 0x2343, 0x2440, 0x2440, 0x2444, 0x2444,
0x2448, 0x244d, 0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470,
0x2472, 0x2472, 0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3,
0x24e4, 0x24ef, 0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e,
0x2510, 0x2511, 0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea,
0x25ec, 0x25ed, 0x25f0, 0x25f0, 0x2600, 0x2612, 0x2614, 0x2617,
0x261a, 0x261a, 0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0,
0x26c4, 0x26ce, 0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9,
0x26ec, 0x26ec, 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743,
0x300c, 0x300e, 0x301c, 0x301d, 0x302a, 0x302a, 0x302c, 0x302d,
0x3030, 0x3031, 0x3034, 0x3036, 0x303c, 0x303c, 0x305e, 0x305f,
~0 /* sentinel */
};

View File

@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
- /home/ubuntu/envytools/envytools/rnndb/./adreno.xml ( 501 bytes, from 2019-05-29 01:28:15)
- /home/ubuntu/envytools/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2019-05-29 01:28:15)
- /home/ubuntu/envytools/envytools/rnndb/adreno/a2xx.xml ( 79608 bytes, from 2019-05-29 01:28:15)
- /home/ubuntu/envytools/envytools/rnndb/adreno/adreno_common.xml ( 14239 bytes, from 2019-05-29 01:28:15)
- /home/ubuntu/envytools/envytools/rnndb/adreno/adreno_pm4.xml ( 43155 bytes, from 2019-05-29 01:28:15)
- /home/ubuntu/envytools/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2019-05-29 01:28:15)
- /home/ubuntu/envytools/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2019-05-29 01:28:15)
- /home/ubuntu/envytools/envytools/rnndb/adreno/a5xx.xml ( 147291 bytes, from 2019-05-29 14:51:41)
- /home/ubuntu/envytools/envytools/rnndb/adreno/a6xx.xml ( 148461 bytes, from 2019-05-29 01:28:15)
- /home/ubuntu/envytools/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2019-05-29 01:28:15)
- /home/ubuntu/envytools/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2019-05-29 01:28:15)
Copyright (C) 2013-2018 by the following authors:
Copyright (C) 2013-2019 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@ -2148,6 +2148,8 @@ static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val)
#define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_1 0x00000e01
#define REG_A5XX_HLSQ_DBG_ECO_CNTL 0x00000e04
#define REG_A5XX_HLSQ_ADDR_MODE_CNTL 0x00000e05
#define REG_A5XX_HLSQ_MODE_CNTL 0x00000e06

View File

@ -149,7 +149,6 @@ DEFINE_SIMPLE_ATTRIBUTE(reset_fops, NULL, reset_set, "%llx\n");
int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor)
{
struct drm_device *dev;
struct dentry *ent;
int ret;
if (!minor)
@ -166,11 +165,8 @@ int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor)
return ret;
}
ent = debugfs_create_file("reset", S_IWUGO,
minor->debugfs_root,
dev, &reset_fops);
if (!ent)
return -ENOMEM;
debugfs_create_file("reset", S_IWUGO, minor->debugfs_root, dev,
&reset_fops);
return 0;
}

View File

@ -309,12 +309,18 @@ static const struct {
void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
unsigned int i;
for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
gpu_write(gpu, a5xx_hwcg[i].offset,
state ? a5xx_hwcg[i].value : 0);
if (adreno_is_a540(adreno_gpu)) {
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_DELAY_GPMU, state ? 0x00000770 : 0);
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_HYST_GPMU, state ? 0x00000004 : 0);
}
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
}
@ -498,6 +504,9 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
if (adreno_is_a540(adreno_gpu))
gpu_write(gpu, REG_A5XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
/* Make all blocks contribute to the GPU BUSY perf counter */
gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
@ -558,7 +567,10 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000);
gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
if (adreno_is_a530(adreno_gpu))
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
if (adreno_is_a540(adreno_gpu))
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
@ -583,6 +595,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
/* Set the highest bank bit */
gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, 2 << 7);
gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, 2 << 1);
if (adreno_is_a540(adreno_gpu))
gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, 2);
/* Protect registers from the CP */
gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007);
@ -633,6 +647,30 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
/* Put the GPU into 64 bit by default */
gpu_write(gpu, REG_A5XX_CP_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A5XX_VSC_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A5XX_GRAS_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A5XX_RB_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A5XX_PC_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A5XX_HLSQ_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A5XX_VFD_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A5XX_VPC_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A5XX_UCHE_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A5XX_SP_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A5XX_TPL1_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
/*
* VPC corner case with local memory load kill leads to corrupt
* internal state. Normal Disable does not work for all a5x chips.
* So do the following setting to disable it.
*/
if (adreno_gpu->info->quirks & ADRENO_QUIRK_LMLOADKILL_DISABLE) {
gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, BIT(23));
gpu_rmw(gpu, REG_A5XX_HLSQ_DBG_ECO_CNTL, BIT(18), 0);
}
ret = adreno_hw_init(gpu);
if (ret)
return ret;

View File

@ -23,6 +23,18 @@
#define AGC_POWER_CONFIG_PRODUCTION_ID 1
#define AGC_INIT_MSG_VALUE 0xBABEFACE
/* AGC_LM_CONFIG (A540+) */
#define AGC_LM_CONFIG (136/4)
#define AGC_LM_CONFIG_GPU_VERSION_SHIFT 17
#define AGC_LM_CONFIG_ENABLE_GPMU_ADAPTIVE 1
#define AGC_LM_CONFIG_THROTTLE_DISABLE (2 << 8)
#define AGC_LM_CONFIG_ISENSE_ENABLE (1 << 4)
#define AGC_LM_CONFIG_ENABLE_ERROR (3 << 4)
#define AGC_LM_CONFIG_LLM_ENABLED (1 << 16)
#define AGC_LM_CONFIG_BCL_DISABLED (1 << 24)
#define AGC_LEVEL_CONFIG (140/4)
static struct {
uint32_t reg;
uint32_t value;
@ -107,7 +119,7 @@ static inline uint32_t _get_mvolts(struct msm_gpu *gpu, uint32_t freq)
}
/* Setup thermal limit management */
static void a5xx_lm_setup(struct msm_gpu *gpu)
static void a530_lm_setup(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
@ -156,6 +168,45 @@ static void a5xx_lm_setup(struct msm_gpu *gpu)
gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
}
#define PAYLOAD_SIZE(_size) ((_size) * sizeof(u32))
#define LM_DCVS_LIMIT 1
#define LEVEL_CONFIG ~(0x303)
static void a540_lm_setup(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
u32 config;
/* The battery current limiter isn't enabled for A540 */
config = AGC_LM_CONFIG_BCL_DISABLED;
config |= adreno_gpu->rev.patchid << AGC_LM_CONFIG_GPU_VERSION_SHIFT;
/* For now disable GPMU side throttling */
config |= AGC_LM_CONFIG_THROTTLE_DISABLE;
/* Until we get clock scaling 0 is always the active power level */
gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0);
/* Fixed at 6000 for now */
gpu_write(gpu, REG_A5XX_GPMU_GPMU_PWR_THRESHOLD, 0x80000000 | 6000);
gpu_write(gpu, AGC_MSG_STATE, 0x80000001);
gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
gpu_write(gpu, AGC_MSG_PAYLOAD(0), 5448);
gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1);
gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, gpu->fast_rate));
gpu_write(gpu, AGC_MSG_PAYLOAD(3), gpu->fast_rate / 1000000);
gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LM_CONFIG), config);
gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LEVEL_CONFIG), LEVEL_CONFIG);
gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE,
PAYLOAD_SIZE(AGC_LEVEL_CONFIG + 1));
gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
}
/* Enable SP/TP cpower collapse */
static void a5xx_pc_init(struct msm_gpu *gpu)
{
@ -197,7 +248,8 @@ static int a5xx_gpmu_init(struct msm_gpu *gpu)
return -EINVAL;
}
gpu_write(gpu, REG_A5XX_GPMU_WFI_CONFIG, 0x4014);
if (adreno_is_a530(adreno_gpu))
gpu_write(gpu, REG_A5XX_GPMU_WFI_CONFIG, 0x4014);
/* Kick off the GPMU */
gpu_write(gpu, REG_A5XX_GPMU_CM3_SYSRESET, 0x0);
@ -211,12 +263,26 @@ static int a5xx_gpmu_init(struct msm_gpu *gpu)
DRM_ERROR("%s: GPMU firmware initialization timed out\n",
gpu->name);
if (!adreno_is_a530(adreno_gpu)) {
u32 val = gpu_read(gpu, REG_A5XX_GPMU_GENERAL_1);
if (val)
DRM_ERROR("%s: GPMU firmware initialization failed: %d\n",
gpu->name, val);
}
return 0;
}
/* Enable limits management */
static void a5xx_lm_enable(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
/* This init sequence only applies to A530 */
if (!adreno_is_a530(adreno_gpu))
return;
gpu_write(gpu, REG_A5XX_GDPM_INT_MASK, 0x0);
gpu_write(gpu, REG_A5XX_GDPM_INT_EN, 0x0A);
gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK, 0x01);
@ -228,10 +294,14 @@ static void a5xx_lm_enable(struct msm_gpu *gpu)
int a5xx_power_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int ret;
/* Set up the limits management */
a5xx_lm_setup(gpu);
if (adreno_is_a530(adreno_gpu))
a530_lm_setup(gpu);
else
a540_lm_setup(gpu);
/* Set up SP/TP power collpase */
a5xx_pc_init(gpu);

View File

@ -74,7 +74,7 @@ bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu)
u32 val;
/* This can be called from gpu state code so make sure GMU is valid */
if (IS_ERR_OR_NULL(gmu->mmio))
if (!gmu->initialized)
return false;
val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
@ -90,7 +90,7 @@ bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
u32 val;
/* This can be called from gpu state code so make sure GMU is valid */
if (IS_ERR_OR_NULL(gmu->mmio))
if (!gmu->initialized)
return false;
val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
@ -504,8 +504,10 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
wmb();
err:
devm_iounmap(gmu->dev, pdcptr);
devm_iounmap(gmu->dev, seqptr);
if (!IS_ERR_OR_NULL(pdcptr))
iounmap(pdcptr);
if (!IS_ERR_OR_NULL(seqptr))
iounmap(seqptr);
}
/*
@ -695,7 +697,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
int status, ret;
if (WARN(!gmu->mmio, "The GMU is not set up yet\n"))
if (WARN(!gmu->initialized, "The GMU is not set up yet\n"))
return 0;
gmu->hung = false;
@ -765,7 +767,7 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
{
u32 reg;
if (!gmu->mmio)
if (!gmu->initialized)
return true;
reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS);
@ -1195,7 +1197,7 @@ static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
return ERR_PTR(-EINVAL);
}
ret = devm_ioremap(&pdev->dev, res->start, resource_size(res));
ret = ioremap(res->start, resource_size(res));
if (!ret) {
DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name);
return ERR_PTR(-EINVAL);
@ -1211,10 +1213,10 @@ static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
irq = platform_get_irq_byname(pdev, name);
ret = devm_request_irq(&pdev->dev, irq, handler, IRQF_TRIGGER_HIGH,
name, gmu);
ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu);
if (ret) {
DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s\n", name);
DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n",
name, ret);
return ret;
}
@ -1227,27 +1229,35 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
{
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
if (IS_ERR_OR_NULL(gmu->mmio))
if (!gmu->initialized)
return;
a6xx_gmu_stop(a6xx_gpu);
pm_runtime_disable(gmu->dev);
pm_runtime_force_suspend(gmu->dev);
if (!IS_ERR_OR_NULL(gmu->gxpd)) {
pm_runtime_disable(gmu->gxpd);
dev_pm_domain_detach(gmu->gxpd, false);
}
a6xx_gmu_irq_disable(gmu);
iounmap(gmu->mmio);
gmu->mmio = NULL;
a6xx_gmu_memory_free(gmu, gmu->hfi);
iommu_detach_device(gmu->domain, gmu->dev);
iommu_domain_free(gmu->domain);
free_irq(gmu->gmu_irq, gmu);
free_irq(gmu->hfi_irq, gmu);
/* Drop reference taken in of_find_device_by_node */
put_device(gmu->dev);
gmu->initialized = false;
}
int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
{
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
struct platform_device *pdev = of_find_device_by_node(node);
@ -1268,34 +1278,34 @@ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
/* Get the list of clocks */
ret = a6xx_gmu_clocks_probe(gmu);
if (ret)
return ret;
goto err_put_device;
/* Set up the IOMMU context bank */
ret = a6xx_gmu_memory_probe(gmu);
if (ret)
return ret;
goto err_put_device;
/* Allocate memory for for the HFI queues */
gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
if (IS_ERR(gmu->hfi))
goto err;
goto err_memory;
/* Allocate memory for the GMU debug region */
gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K);
if (IS_ERR(gmu->debug))
goto err;
goto err_memory;
/* Map the GMU registers */
gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
if (IS_ERR(gmu->mmio))
goto err;
goto err_memory;
/* Get the HFI and GMU interrupts */
gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq);
if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0)
goto err;
goto err_mmio;
/*
* Get a link to the GX power domain to reset the GPU in case of GMU
@ -1309,8 +1319,15 @@ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
/* Set up the HFI queues */
a6xx_hfi_init(gmu);
gmu->initialized = true;
return 0;
err:
err_mmio:
iounmap(gmu->mmio);
free_irq(gmu->gmu_irq, gmu);
free_irq(gmu->hfi_irq, gmu);
err_memory:
a6xx_gmu_memory_free(gmu, gmu->hfi);
if (gmu->domain) {
@ -1318,6 +1335,11 @@ err:
iommu_domain_free(gmu->domain);
}
ret = -ENODEV;
return -ENODEV;
err_put_device:
/* Drop reference taken in of_find_device_by_node */
put_device(gmu->dev);
return ret;
}

View File

@ -75,6 +75,7 @@ struct a6xx_gmu {
struct a6xx_hfi_queue queues[2];
bool initialized;
bool hung;
};

View File

@ -391,6 +391,20 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
/* Turn on 64 bit addressing for all blocks */
gpu_write(gpu, REG_A6XX_CP_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_VSC_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_GRAS_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_RB_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_PC_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_HLSQ_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_VFD_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_VPC_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_UCHE_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_SP_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
/* enable hardware clockgating */
a6xx_set_hwcg(gpu, true);
@ -854,7 +868,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
/* FIXME: How do we gracefully handle this? */
BUG_ON(!node);
ret = a6xx_gmu_probe(a6xx_gpu, node);
ret = a6xx_gmu_init(a6xx_gpu, node);
if (ret) {
a6xx_destroy(&(a6xx_gpu->base.base));
return ERR_PTR(ret);

View File

@ -53,7 +53,7 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu);
int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu);
void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq);

View File

@ -144,6 +144,24 @@ static const struct adreno_info gpulist[] = {
ADRENO_QUIRK_FAULT_DETECT_MASK,
.init = a5xx_gpu_init,
.zapfw = "a530_zap.mdt",
}, {
.rev = ADRENO_REV(5, 4, 0, 2),
.revn = 540,
.name = "A540",
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
[ADRENO_FW_GPMU] = "a540_gpmu.fw2",
},
.gmem = SZ_1M,
/*
* Increase inactive period to 250 to avoid bouncing
* the GDSC which appears to make it grumpy
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
.init = a5xx_gpu_init,
.zapfw = "a540_zap.mdt",
}, {
.rev = ADRENO_REV(6, 3, 0, ANY_ID),
.revn = 630,
@ -351,7 +369,7 @@ static void adreno_unbind(struct device *dev, struct device *master,
{
struct msm_gpu *gpu = dev_get_drvdata(dev);
gpu->funcs->pm_suspend(gpu);
pm_runtime_force_suspend(dev);
gpu->funcs->destroy(gpu);
set_gpu_pdev(dev_get_drvdata(master), NULL);

View File

@ -67,7 +67,6 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
return ret;
mem_phys = r.start;
mem_size = resource_size(&r);
/* Request the MDT file for the firmware */
fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
@ -83,6 +82,13 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
goto out;
}
if (mem_size > resource_size(&r)) {
DRM_DEV_ERROR(dev,
"memory region is too small to load the MDT\n");
ret = -E2BIG;
goto out;
}
/* Allocate memory for the firmware image */
mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC);
if (!mem_region) {

View File

@ -61,6 +61,7 @@ enum {
enum adreno_quirks {
ADRENO_QUIRK_TWO_PASS_USE_WFI = 1,
ADRENO_QUIRK_FAULT_DETECT_MASK = 2,
ADRENO_QUIRK_LMLOADKILL_DISABLE = 3,
};
struct adreno_rev {
@ -221,6 +222,11 @@ static inline int adreno_is_a530(struct adreno_gpu *gpu)
return gpu->revn == 530;
}
static inline int adreno_is_a540(struct adreno_gpu *gpu)
{
return gpu->revn == 540;
}
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
const char *fwname);

View File

@ -69,7 +69,6 @@ static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
struct dpu_core_perf_params *perf)
{
struct dpu_crtc_state *dpu_cstate;
int i;
if (!kms || !kms->catalog || !crtc || !state || !perf) {
DPU_ERROR("invalid parameters\n");
@ -80,35 +79,24 @@ static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
memset(perf, 0, sizeof(struct dpu_core_perf_params));
if (!dpu_cstate->bw_control) {
for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
perf->bw_ctl[i] = kms->catalog->perf.max_bw_high *
perf->bw_ctl = kms->catalog->perf.max_bw_high *
1000ULL;
perf->max_per_pipe_ib[i] = perf->bw_ctl[i];
}
perf->max_per_pipe_ib = perf->bw_ctl;
perf->core_clk_rate = kms->perf.max_core_clk_rate;
} else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
perf->bw_ctl[i] = 0;
perf->max_per_pipe_ib[i] = 0;
}
perf->bw_ctl = 0;
perf->max_per_pipe_ib = 0;
perf->core_clk_rate = 0;
} else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) {
for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
perf->bw_ctl[i] = kms->perf.fix_core_ab_vote;
perf->max_per_pipe_ib[i] = kms->perf.fix_core_ib_vote;
}
perf->bw_ctl = kms->perf.fix_core_ab_vote;
perf->max_per_pipe_ib = kms->perf.fix_core_ib_vote;
perf->core_clk_rate = kms->perf.fix_core_clk_rate;
}
DPU_DEBUG(
"crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu llcc_ib=%llu llcc_ab=%llu mem_ib=%llu mem_ab=%llu\n",
"crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu\n",
crtc->base.id, perf->core_clk_rate,
perf->max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_MNOC],
perf->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_MNOC],
perf->max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_LLCC],
perf->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_LLCC],
perf->max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_EBI],
perf->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_EBI]);
perf->max_per_pipe_ib, perf->bw_ctl);
}
int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
@ -121,7 +109,6 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
struct dpu_crtc_state *dpu_cstate;
struct drm_crtc *tmp_crtc;
struct dpu_kms *kms;
int i;
if (!crtc || !state) {
DPU_ERROR("invalid crtc\n");
@ -143,31 +130,25 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
/* obtain new values */
_dpu_core_perf_calc_crtc(kms, crtc, state, &dpu_cstate->new_perf);
for (i = DPU_CORE_PERF_DATA_BUS_ID_MNOC;
i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl[i];
curr_client_type = dpu_crtc_get_client_type(crtc);
bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl;
curr_client_type = dpu_crtc_get_client_type(crtc);
drm_for_each_crtc(tmp_crtc, crtc->dev) {
if (tmp_crtc->enabled &&
(dpu_crtc_get_client_type(tmp_crtc) ==
curr_client_type) &&
(tmp_crtc != crtc)) {
struct dpu_crtc_state *tmp_cstate =
to_dpu_crtc_state(tmp_crtc->state);
drm_for_each_crtc(tmp_crtc, crtc->dev) {
if (tmp_crtc->enabled &&
(dpu_crtc_get_client_type(tmp_crtc) ==
curr_client_type) && (tmp_crtc != crtc)) {
struct dpu_crtc_state *tmp_cstate =
to_dpu_crtc_state(tmp_crtc->state);
DPU_DEBUG("crtc:%d bw:%llu ctrl:%d\n",
tmp_crtc->base.id,
tmp_cstate->new_perf.bw_ctl[i],
tmp_cstate->bw_control);
/*
* For bw check only use the bw if the
* atomic property has been already set
*/
if (tmp_cstate->bw_control)
bw_sum_of_intfs +=
tmp_cstate->new_perf.bw_ctl[i];
}
DPU_DEBUG("crtc:%d bw:%llu ctrl:%d\n",
tmp_crtc->base.id, tmp_cstate->new_perf.bw_ctl,
tmp_cstate->bw_control);
/*
* For bw check only use the bw if the
* atomic property has been already set
*/
if (tmp_cstate->bw_control)
bw_sum_of_intfs += tmp_cstate->new_perf.bw_ctl;
}
/* convert bandwidth to kb */
@ -198,9 +179,9 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
}
static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
struct drm_crtc *crtc, u32 bus_id)
struct drm_crtc *crtc)
{
struct dpu_core_perf_params perf = { { 0 } };
struct dpu_core_perf_params perf = { 0 };
enum dpu_crtc_client_type curr_client_type
= dpu_crtc_get_client_type(crtc);
struct drm_crtc *tmp_crtc;
@ -213,13 +194,11 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
dpu_crtc_get_client_type(tmp_crtc)) {
dpu_cstate = to_dpu_crtc_state(tmp_crtc->state);
perf.max_per_pipe_ib[bus_id] =
max(perf.max_per_pipe_ib[bus_id],
dpu_cstate->new_perf.max_per_pipe_ib[bus_id]);
perf.max_per_pipe_ib = max(perf.max_per_pipe_ib,
dpu_cstate->new_perf.max_per_pipe_ib);
DPU_DEBUG("crtc=%d bus_id=%d bw=%llu\n",
tmp_crtc->base.id, bus_id,
dpu_cstate->new_perf.bw_ctl[bus_id]);
DPU_DEBUG("crtc=%d bw=%llu\n", tmp_crtc->base.id,
dpu_cstate->new_perf.bw_ctl);
}
}
return ret;
@ -239,7 +218,6 @@ void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
struct dpu_crtc *dpu_crtc;
struct dpu_crtc_state *dpu_cstate;
struct dpu_kms *kms;
int i;
if (!crtc) {
DPU_ERROR("invalid crtc\n");
@ -275,10 +253,8 @@ void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
if (kms->perf.enable_bw_release) {
trace_dpu_cmd_release_bw(crtc->base.id);
DPU_DEBUG("Release BW crtc=%d\n", crtc->base.id);
for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
dpu_crtc->cur_perf.bw_ctl[i] = 0;
_dpu_core_perf_crtc_update_bus(kms, crtc, i);
}
dpu_crtc->cur_perf.bw_ctl = 0;
_dpu_core_perf_crtc_update_bus(kms, crtc);
}
}
@ -321,11 +297,10 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
int params_changed, bool stop_req)
{
struct dpu_core_perf_params *new, *old;
int update_bus = 0, update_clk = 0;
bool update_bus = false, update_clk = false;
u64 clk_rate = 0;
struct dpu_crtc *dpu_crtc;
struct dpu_crtc_state *dpu_cstate;
int i;
struct msm_drm_private *priv;
struct dpu_kms *kms;
int ret;
@ -352,62 +327,49 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
new = &dpu_cstate->new_perf;
if (crtc->enabled && !stop_req) {
for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
/*
* cases for bus bandwidth update.
* 1. new bandwidth vote - "ab or ib vote" is higher
* than current vote for update request.
* 2. new bandwidth vote - "ab or ib vote" is lower
* than current vote at end of commit or stop.
*/
if ((params_changed && ((new->bw_ctl[i] >
old->bw_ctl[i]) ||
(new->max_per_pipe_ib[i] >
old->max_per_pipe_ib[i]))) ||
(!params_changed && ((new->bw_ctl[i] <
old->bw_ctl[i]) ||
(new->max_per_pipe_ib[i] <
old->max_per_pipe_ib[i])))) {
DPU_DEBUG(
"crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
crtc->base.id, params_changed,
new->bw_ctl[i], old->bw_ctl[i]);
old->bw_ctl[i] = new->bw_ctl[i];
old->max_per_pipe_ib[i] =
new->max_per_pipe_ib[i];
update_bus |= BIT(i);
}
/*
* cases for bus bandwidth update.
* 1. new bandwidth vote - "ab or ib vote" is higher
* than current vote for update request.
* 2. new bandwidth vote - "ab or ib vote" is lower
* than current vote at end of commit or stop.
*/
if ((params_changed && ((new->bw_ctl > old->bw_ctl) ||
(new->max_per_pipe_ib > old->max_per_pipe_ib))) ||
(!params_changed && ((new->bw_ctl < old->bw_ctl) ||
(new->max_per_pipe_ib < old->max_per_pipe_ib)))) {
DPU_DEBUG("crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
crtc->base.id, params_changed,
new->bw_ctl, old->bw_ctl);
old->bw_ctl = new->bw_ctl;
old->max_per_pipe_ib = new->max_per_pipe_ib;
update_bus = true;
}
if ((params_changed &&
(new->core_clk_rate > old->core_clk_rate)) ||
(!params_changed &&
(new->core_clk_rate < old->core_clk_rate))) {
(new->core_clk_rate > old->core_clk_rate)) ||
(!params_changed &&
(new->core_clk_rate < old->core_clk_rate))) {
old->core_clk_rate = new->core_clk_rate;
update_clk = 1;
update_clk = true;
}
} else {
DPU_DEBUG("crtc=%d disable\n", crtc->base.id);
memset(old, 0, sizeof(*old));
memset(new, 0, sizeof(*new));
update_bus = ~0;
update_clk = 1;
update_bus = true;
update_clk = true;
}
trace_dpu_perf_crtc_update(crtc->base.id,
new->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_MNOC],
new->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_LLCC],
new->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_EBI],
new->core_clk_rate, stop_req,
update_bus, update_clk);
for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
if (update_bus & BIT(i)) {
ret = _dpu_core_perf_crtc_update_bus(kms, crtc, i);
if (ret) {
DPU_ERROR("crtc-%d: failed to update bw vote for bus-%d\n",
crtc->base.id, i);
return ret;
}
trace_dpu_perf_crtc_update(crtc->base.id, new->bw_ctl,
new->core_clk_rate, stop_req, update_bus, update_clk);
if (update_bus) {
ret = _dpu_core_perf_crtc_update_bus(kms, crtc);
if (ret) {
DPU_ERROR("crtc-%d: failed to update bus bw vote\n",
crtc->base.id);
return ret;
}
}
@ -498,8 +460,6 @@ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
struct dentry *entry;
entry = debugfs_create_dir("core_perf", parent);
if (IS_ERR_OR_NULL(entry))
return -EINVAL;
debugfs_create_u64("max_core_clk_rate", 0600, entry,
&perf->max_core_clk_rate);

View File

@ -34,8 +34,8 @@ enum dpu_core_perf_data_bus_id {
* @core_clk_rate: core clock rate request
*/
struct dpu_core_perf_params {
u64 max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_MAX];
u64 bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_MAX];
u64 max_per_pipe_ib;
u64 bw_ctl;
u64 core_clk_rate;
};

View File

@ -1233,19 +1233,14 @@ static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
{
struct drm_crtc *crtc = (struct drm_crtc *) s->private;
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
int i;
seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
seq_printf(s, "core_clk_rate: %llu\n",
dpu_crtc->cur_perf.core_clk_rate);
for (i = DPU_CORE_PERF_DATA_BUS_ID_MNOC;
i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
seq_printf(s, "bw_ctl[%d]: %llu\n", i,
dpu_crtc->cur_perf.bw_ctl[i]);
seq_printf(s, "max_per_pipe_ib[%d]: %llu\n", i,
dpu_crtc->cur_perf.max_per_pipe_ib[i]);
}
seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl);
seq_printf(s, "max_per_pipe_ib: %llu\n",
dpu_crtc->cur_perf.max_per_pipe_ib);
return 0;
}
@ -1264,10 +1259,7 @@ static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
crtc->dev->primary->debugfs_root);
if (!dpu_crtc->debugfs_root)
return -ENOMEM;
/* don't error check these */
debugfs_create_file("status", 0400,
dpu_crtc->debugfs_root,
dpu_crtc, &debugfs_status_fops);

View File

@ -622,9 +622,6 @@ static int dpu_encoder_virt_atomic_check(
}
}
if (!ret)
drm_mode_set_crtcinfo(adj_mode, 0);
trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags,
adj_mode->private_flags);
@ -1985,8 +1982,6 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
/* create overall sub-directory for the encoder */
dpu_enc->debugfs_root = debugfs_create_dir(name,
drm_enc->dev->primary->debugfs_root);
if (!dpu_enc->debugfs_root)
return -ENOMEM;
/* don't error check these */
debugfs_create_file("status", 0600,

View File

@ -470,90 +470,6 @@ static const struct dpu_format dpu_format_map[] = {
DPU_FETCH_LINEAR, 3),
};
/*
* A5x tile formats tables:
* These tables hold the A5x tile formats supported.
*/
static const struct dpu_format dpu_format_map_tile[] = {
INTERLEAVED_RGB_FMT_TILED(BGR565,
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
false, 2, 0,
DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
INTERLEAVED_RGB_FMT_TILED(ARGB8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
true, 4, 0,
DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
INTERLEAVED_RGB_FMT_TILED(ABGR8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
true, 4, 0,
DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
INTERLEAVED_RGB_FMT_TILED(XBGR8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
false, 4, 0,
DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
INTERLEAVED_RGB_FMT_TILED(RGBA8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 4, 0,
DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
INTERLEAVED_RGB_FMT_TILED(BGRA8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
true, 4, 0,
DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
INTERLEAVED_RGB_FMT_TILED(BGRX8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
false, 4, 0,
DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
INTERLEAVED_RGB_FMT_TILED(XRGB8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
false, 4, 0,
DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
INTERLEAVED_RGB_FMT_TILED(RGBX8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
false, 4, 0,
DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 4, DPU_FORMAT_FLAG_DX,
DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
INTERLEAVED_RGB_FMT_TILED(XBGR2101010,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 4, DPU_FORMAT_FLAG_DX,
DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
PSEUDO_YUV_FMT_TILED(NV12,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C2_R_Cr,
DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_NV12),
PSEUDO_YUV_FMT_TILED(NV21,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C1_B_Cb,
DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_NV12),
};
/*
* UBWC formats table:
* This table holds the UBWC formats supported.
@ -599,32 +515,6 @@ static const struct dpu_format dpu_format_map_ubwc[] = {
DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12),
};
static const struct dpu_format dpu_format_map_p010[] = {
PSEUDO_YUV_FMT_LOOSE(NV12,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C2_R_Cr,
DPU_CHROMA_420, (DPU_FORMAT_FLAG_YUV | DPU_FORMAT_FLAG_DX),
DPU_FETCH_LINEAR, 2),
};
static const struct dpu_format dpu_format_map_p010_ubwc[] = {
PSEUDO_YUV_FMT_LOOSE_TILED(NV12,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C2_R_Cr,
DPU_CHROMA_420, (DPU_FORMAT_FLAG_YUV | DPU_FORMAT_FLAG_DX |
DPU_FORMAT_FLAG_COMPRESSED),
DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12),
};
static const struct dpu_format dpu_format_map_tp10_ubwc[] = {
PSEUDO_YUV_FMT_TILED(NV12,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C1_B_Cb, C2_R_Cr,
DPU_CHROMA_420, (DPU_FORMAT_FLAG_YUV | DPU_FORMAT_FLAG_DX |
DPU_FORMAT_FLAG_COMPRESSED),
DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12),
};
/* _dpu_get_v_h_subsample_rate - Get subsample rates for all formats we support
* Note: Not using the drm_format_*_subsampling since we have formats
*/

View File

@ -106,9 +106,9 @@ int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable)
rc = -EPERM;
}
if (rc) {
msm_dss_enable_clk(&clk_arry[i],
i, false);
if (rc && i) {
msm_dss_enable_clk(&clk_arry[i - 1],
i - 1, false);
break;
}
}

View File

@ -56,7 +56,7 @@ static const char * const iommu_ports[] = {
#define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
static int dpu_kms_hw_init(struct msm_kms *kms);
static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
static unsigned long dpu_iomap_size(struct platform_device *pdev,
const char *name)
@ -142,8 +142,6 @@ static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
struct dentry *parent)
{
struct dentry *entry = debugfs_create_dir("danger", parent);
if (IS_ERR_OR_NULL(entry))
return;
debugfs_create_file("danger_status", 0600, entry,
dpu_kms, &dpu_debugfs_danger_stats_fops);
@ -218,32 +216,29 @@ void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
}
}
void *dpu_debugfs_create_regset32(const char *name, umode_t mode,
void dpu_debugfs_create_regset32(const char *name, umode_t mode,
void *parent, struct dpu_debugfs_regset32 *regset)
{
if (!name || !regset || !regset->dpu_kms || !regset->blk_len)
return NULL;
return;
/* make sure offset is a multiple of 4 */
regset->offset = round_down(regset->offset, 4);
return debugfs_create_file(name, mode, parent,
regset, &dpu_fops_regset32);
debugfs_create_file(name, mode, parent, regset, &dpu_fops_regset32);
}
static int _dpu_debugfs_init(struct dpu_kms *dpu_kms)
static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
void *p = dpu_hw_util_get_log_mask_ptr();
struct dentry *entry;
if (!p)
return -EINVAL;
entry = debugfs_create_dir("debug", dpu_kms->dev->primary->debugfs_root);
if (IS_ERR_OR_NULL(entry))
return -ENODEV;
entry = debugfs_create_dir("debug", minor->debugfs_root);
/* allow root to be NULL */
debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p);
dpu_debugfs_danger_init(dpu_kms, entry);
@ -578,13 +573,6 @@ fail:
return ret;
}
#ifdef CONFIG_DEBUG_FS
static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
{
return _dpu_debugfs_init(to_dpu_kms(kms));
}
#endif
static long dpu_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
struct drm_encoder *encoder)
{
@ -725,17 +713,20 @@ static const struct msm_kms_funcs kms_funcs = {
#endif
};
static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
{
struct msm_mmu *mmu;
if (!dpu_kms->base.aspace)
return;
mmu = dpu_kms->base.aspace->mmu;
mmu->funcs->detach(mmu, (const char **)iommu_ports,
ARRAY_SIZE(iommu_ports));
msm_gem_address_space_put(dpu_kms->base.aspace);
return 0;
dpu_kms->base.aspace = NULL;
}
static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
@ -754,25 +745,20 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
aspace = msm_gem_address_space_create(dpu_kms->dev->dev,
domain, "dpu1");
if (IS_ERR(aspace)) {
ret = PTR_ERR(aspace);
goto fail;
iommu_domain_free(domain);
return PTR_ERR(aspace);
}
dpu_kms->base.aspace = aspace;
ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret) {
DPU_ERROR("failed to attach iommu %d\n", ret);
msm_gem_address_space_put(aspace);
goto fail;
return ret;
}
dpu_kms->base.aspace = aspace;
return 0;
fail:
_dpu_kms_mmu_destroy(dpu_kms);
return ret;
}
static struct dss_clk *_dpu_kms_get_clk(struct dpu_kms *dpu_kms,

View File

@ -197,12 +197,8 @@ void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
* @mode: File mode within debugfs
* @parent: Parent directory entry within debugfs, can be NULL
* @regset: Pointer to persistent register block definition
*
* Return: dentry pointer for newly created file, use either debugfs_remove()
* or debugfs_remove_recursive() (on a parent directory) to remove the
* file
*/
void *dpu_debugfs_create_regset32(const char *name, umode_t mode,
void dpu_debugfs_create_regset32(const char *name, umode_t mode,
void *parent, struct dpu_debugfs_regset32 *regset);
/**

View File

@ -4,11 +4,15 @@
*/
#include "dpu_kms.h"
#include <linux/interconnect.h>
#define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base)
#define HW_INTR_STATUS 0x0010
/* Max BW defined in KBps */
#define MAX_BW 6800000
struct dpu_irq_controller {
unsigned long enabled_mask;
struct irq_domain *domain;
@ -21,8 +25,40 @@ struct dpu_mdss {
u32 hwversion;
struct dss_module_power mp;
struct dpu_irq_controller irq_controller;
struct icc_path *path[2];
u32 num_paths;
};
static int dpu_mdss_parse_data_bus_icc_path(struct drm_device *dev,
struct dpu_mdss *dpu_mdss)
{
struct icc_path *path0 = of_icc_get(dev->dev, "mdp0-mem");
struct icc_path *path1 = of_icc_get(dev->dev, "mdp1-mem");
if (IS_ERR_OR_NULL(path0))
return PTR_ERR_OR_ZERO(path0);
dpu_mdss->path[0] = path0;
dpu_mdss->num_paths = 1;
if (!IS_ERR_OR_NULL(path1)) {
dpu_mdss->path[1] = path1;
dpu_mdss->num_paths++;
}
return 0;
}
static void dpu_mdss_icc_request_bw(struct msm_mdss *mdss)
{
struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
int i;
u64 avg_bw = dpu_mdss->num_paths ? MAX_BW / dpu_mdss->num_paths : 0;
for (i = 0; i < dpu_mdss->num_paths; i++)
icc_set_bw(dpu_mdss->path[i], avg_bw, kBps_to_icc(MAX_BW));
}
static void dpu_mdss_irq(struct irq_desc *desc)
{
struct dpu_mdss *dpu_mdss = irq_desc_get_handler_data(desc);
@ -136,6 +172,8 @@ static int dpu_mdss_enable(struct msm_mdss *mdss)
struct dss_module_power *mp = &dpu_mdss->mp;
int ret;
dpu_mdss_icc_request_bw(mdss);
ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
if (ret)
DPU_ERROR("clock enable failed, ret:%d\n", ret);
@ -147,12 +185,15 @@ static int dpu_mdss_disable(struct msm_mdss *mdss)
{
struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
struct dss_module_power *mp = &dpu_mdss->mp;
int ret;
int ret, i;
ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
if (ret)
DPU_ERROR("clock disable failed, ret:%d\n", ret);
for (i = 0; i < dpu_mdss->num_paths; i++)
icc_set_bw(dpu_mdss->path[i], 0, 0);
return ret;
}
@ -163,6 +204,7 @@ static void dpu_mdss_destroy(struct drm_device *dev)
struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
struct dss_module_power *mp = &dpu_mdss->mp;
int irq;
int i;
pm_runtime_suspend(dev->dev);
pm_runtime_disable(dev->dev);
@ -172,6 +214,9 @@ static void dpu_mdss_destroy(struct drm_device *dev)
msm_dss_put_clk(mp->clk_config, mp->num_clk);
devm_kfree(&pdev->dev, mp->clk_config);
for (i = 0; i < dpu_mdss->num_paths; i++)
icc_put(dpu_mdss->path[i]);
if (dpu_mdss->mmio)
devm_iounmap(&pdev->dev, dpu_mdss->mmio);
dpu_mdss->mmio = NULL;
@ -211,6 +256,10 @@ int dpu_mdss_init(struct drm_device *dev)
}
dpu_mdss->mmio_len = resource_size(res);
ret = dpu_mdss_parse_data_bus_icc_path(dev, dpu_mdss);
if (ret)
return ret;
mp = &dpu_mdss->mp;
ret = msm_dss_parse_clock(pdev, mp);
if (ret) {
@ -232,14 +281,16 @@ int dpu_mdss_init(struct drm_device *dev)
irq_set_chained_handler_and_data(irq, dpu_mdss_irq,
dpu_mdss);
priv->mdss = &dpu_mdss->base;
pm_runtime_enable(dev->dev);
dpu_mdss_icc_request_bw(priv->mdss);
pm_runtime_get_sync(dev->dev);
dpu_mdss->hwversion = readl_relaxed(dpu_mdss->mmio);
pm_runtime_put_sync(dev->dev);
priv->mdss = &dpu_mdss->base;
return ret;
irq_error:

View File

@ -21,6 +21,7 @@
#include <linux/debugfs.h>
#include <linux/dma-buf.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_atomic_uapi.h>
#include "msm_drv.h"
@ -1324,9 +1325,6 @@ static int _dpu_plane_init_debugfs(struct drm_plane *plane)
debugfs_create_dir(pdpu->pipe_name,
plane->dev->primary->debugfs_root);
if (!pdpu->debugfs_root)
return -ENOMEM;
/* don't error check these */
debugfs_create_x32("features", 0600,
pdpu->debugfs_root, &pdpu->features);
@ -1535,6 +1533,8 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
if (ret)
DPU_ERROR("failed to install zpos property, rc = %d\n", ret);
drm_plane_enable_fb_damage_clips(plane);
/* success! finalize initialization */
drm_plane_helper_add(plane, &dpu_plane_helper_funcs);

View File

@ -138,16 +138,12 @@ TRACE_EVENT(dpu_trace_counter,
)
TRACE_EVENT(dpu_perf_crtc_update,
TP_PROTO(u32 crtc, u64 bw_ctl_mnoc, u64 bw_ctl_llcc,
u64 bw_ctl_ebi, u32 core_clk_rate,
bool stop_req, u32 update_bus, u32 update_clk),
TP_ARGS(crtc, bw_ctl_mnoc, bw_ctl_llcc, bw_ctl_ebi, core_clk_rate,
stop_req, update_bus, update_clk),
TP_PROTO(u32 crtc, u64 bw_ctl, u32 core_clk_rate,
bool stop_req, bool update_bus, bool update_clk),
TP_ARGS(crtc, bw_ctl, core_clk_rate, stop_req, update_bus, update_clk),
TP_STRUCT__entry(
__field(u32, crtc)
__field(u64, bw_ctl_mnoc)
__field(u64, bw_ctl_llcc)
__field(u64, bw_ctl_ebi)
__field(u64, bw_ctl)
__field(u32, core_clk_rate)
__field(bool, stop_req)
__field(u32, update_bus)
@ -155,20 +151,16 @@ TRACE_EVENT(dpu_perf_crtc_update,
),
TP_fast_assign(
__entry->crtc = crtc;
__entry->bw_ctl_mnoc = bw_ctl_mnoc;
__entry->bw_ctl_llcc = bw_ctl_llcc;
__entry->bw_ctl_ebi = bw_ctl_ebi;
__entry->bw_ctl = bw_ctl;
__entry->core_clk_rate = core_clk_rate;
__entry->stop_req = stop_req;
__entry->update_bus = update_bus;
__entry->update_clk = update_clk;
),
TP_printk(
"crtc=%d bw_mnoc=%llu bw_llcc=%llu bw_ebi=%llu clk_rate=%u stop_req=%d u_bus=%d u_clk=%d",
"crtc=%d bw_ctl=%llu clk_rate=%u stop_req=%d u_bus=%d u_clk=%d",
__entry->crtc,
__entry->bw_ctl_mnoc,
__entry->bw_ctl_llcc,
__entry->bw_ctl_ebi,
__entry->bw_ctl,
__entry->core_clk_rate,
__entry->stop_req,
__entry->update_bus,

View File

@ -310,8 +310,6 @@ void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
int i, j;
entry = debugfs_create_dir("vbif", debugfs_root);
if (IS_ERR_OR_NULL(entry))
return;
for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
@ -319,8 +317,6 @@ void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
debugfs_vbif = debugfs_create_dir(vbif_name, entry);
if (IS_ERR_OR_NULL(debugfs_vbif))
continue;
debugfs_create_u32("features", 0600, debugfs_vbif,
(u32 *)&vbif->features);

View File

@ -15,6 +15,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <drm/drm_damage_helper.h>
#include "mdp4_kms.h"
#define DOWN_SCALE_MAX 8
@ -391,6 +392,8 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
mdp4_plane_install_properties(plane, &plane->base);
drm_plane_enable_fb_damage_clips(plane);
return plane;
fail:

View File

@ -713,7 +713,7 @@ fail:
if (cfg_handler)
mdp5_cfg_destroy(cfg_handler);
return NULL;
return ERR_PTR(ret);
}
static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)

View File

@ -16,6 +16,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/interconnect.h>
#include <linux/of_irq.h>
#include "msm_drv.h"
@ -1048,9 +1049,46 @@ static const struct component_ops mdp5_ops = {
.unbind = mdp5_unbind,
};
static int mdp5_setup_interconnect(struct platform_device *pdev)
{
struct icc_path *path0 = of_icc_get(&pdev->dev, "mdp0-mem");
struct icc_path *path1 = of_icc_get(&pdev->dev, "mdp1-mem");
struct icc_path *path_rot = of_icc_get(&pdev->dev, "rotator-mem");
if (IS_ERR(path0))
return PTR_ERR(path0);
if (!path0) {
/* no interconnect support is not necessarily a fatal
* condition, the platform may simply not have an
* interconnect driver yet. But warn about it in case
* bootloader didn't setup bus clocks high enough for
* scanout.
*/
dev_warn(&pdev->dev, "No interconnect support may cause display underflows!\n");
return 0;
}
icc_set_bw(path0, 0, MBps_to_icc(6400));
if (!IS_ERR_OR_NULL(path1))
icc_set_bw(path1, 0, MBps_to_icc(6400));
if (!IS_ERR_OR_NULL(path_rot))
icc_set_bw(path_rot, 0, MBps_to_icc(6400));
return 0;
}
static int mdp5_dev_probe(struct platform_device *pdev)
{
int ret;
DBG("");
ret = mdp5_setup_interconnect(pdev);
if (ret)
return ret;
return component_add(&pdev->dev, &mdp5_ops);
}

View File

@ -16,6 +16,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <drm/drm_damage_helper.h>
#include <drm/drm_print.h>
#include "mdp5_kms.h"
@ -1099,6 +1100,8 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
mdp5_plane_install_properties(plane, &plane->base);
drm_plane_enable_fb_damage_clips(plane);
return plane;
fail:

View File

@ -242,6 +242,8 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
goto fail;
}
msm_dsi_manager_setup_encoder(msm_dsi->id);
priv->bridges[priv->num_bridges++] = msm_dsi->bridge;
priv->connectors[priv->num_connectors++] = msm_dsi->connector;

View File

@ -71,7 +71,6 @@ struct msm_dsi {
*/
struct drm_panel *panel;
struct drm_bridge *external_bridge;
unsigned long device_flags;
struct device *phy_dev;
bool phy_enabled;
@ -89,7 +88,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id);
struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id);
int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len);
void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags);
void msm_dsi_manager_setup_encoder(int id);
int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
bool msm_dsi_manager_validate_current_config(u8 id);
@ -161,8 +160,8 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
int msm_dsi_host_power_off(struct mipi_dsi_host *host);
int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
const struct drm_display_mode *mode);
struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
unsigned long *panel_flags);
struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host);
unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host);
struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host);
int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer);
void msm_dsi_host_unregister(struct mipi_dsi_host *host);

View File

@ -110,6 +110,25 @@ static const struct msm_dsi_config msm8996_dsi_cfg = {
.num_dsi = 2,
};
static const char * const dsi_msm8998_bus_clk_names[] = {
"iface", "bus", "core",
};
static const struct msm_dsi_config msm8998_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
.num = 2,
.regs = {
{"vdd", 367000, 16 }, /* 0.9 V */
{"vdda", 62800, 2 }, /* 1.2 V */
},
},
.bus_clk_names = dsi_msm8998_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_msm8998_bus_clk_names),
.io_start = { 0xc994000, 0xc996000 },
.num_dsi = 2,
};
static const char * const dsi_sdm845_bus_clk_names[] = {
"iface", "bus",
};
@ -178,6 +197,8 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
&msm8916_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1,
&msm8996_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_0,
&msm8998_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1,
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
};

View File

@ -17,6 +17,7 @@
#define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000
#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001
#define MSM_DSI_6G_VER_MINOR_V1_4_1 0x10040001
#define MSM_DSI_6G_VER_MINOR_V2_2_0 0x20000000
#define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001
#define MSM_DSI_V2_VER_MINOR_8064 0x0

View File

@ -1041,7 +1041,7 @@ static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
ret = wait_for_completion_timeout(&msm_host->video_comp,
msecs_to_jiffies(70));
if (ret <= 0)
if (ret == 0)
DRM_DEV_ERROR(dev, "wait for video done timed out\n");
dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
@ -1590,8 +1590,6 @@ static int dsi_host_attach(struct mipi_dsi_host *host,
msm_host->format = dsi->format;
msm_host->mode_flags = dsi->mode_flags;
msm_dsi_manager_attach_dsi_device(msm_host->id, dsi->mode_flags);
/* Some gpios defined in panel DT need to be controlled by host */
ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
if (ret)
@ -2434,17 +2432,14 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
return 0;
}
struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
unsigned long *panel_flags)
struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
struct drm_panel *panel;
return of_drm_find_panel(to_msm_dsi_host(host)->device_node);
}
panel = of_drm_find_panel(msm_host->device_node);
if (panel_flags)
*panel_flags = msm_host->mode_flags;
return panel;
unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host)
{
return to_msm_dsi_host(host)->mode_flags;
}
struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host)

View File

@ -225,63 +225,79 @@ static int dsi_mgr_bridge_get_id(struct drm_bridge *bridge)
return dsi_bridge->id;
}
static bool dsi_mgr_is_cmd_mode(struct msm_dsi *msm_dsi)
{
unsigned long host_flags = msm_dsi_host_get_mode_flags(msm_dsi->host);
return !(host_flags & MIPI_DSI_MODE_VIDEO);
}
void msm_dsi_manager_setup_encoder(int id)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_drm_private *priv = msm_dsi->dev->dev_private;
struct msm_kms *kms = priv->kms;
struct drm_encoder *encoder = msm_dsi_get_encoder(msm_dsi);
if (encoder && kms->funcs->set_encoder_mode)
kms->funcs->set_encoder_mode(kms, encoder,
dsi_mgr_is_cmd_mode(msm_dsi));
}
static int msm_dsi_manager_panel_init(struct drm_connector *conn, u8 id)
{
struct msm_drm_private *priv = conn->dev->dev_private;
struct msm_kms *kms = priv->kms;
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
struct msm_dsi *master_dsi, *slave_dsi;
struct drm_panel *panel;
if (IS_DUAL_DSI() && !IS_MASTER_DSI_LINK(id)) {
master_dsi = other_dsi;
slave_dsi = msm_dsi;
} else {
master_dsi = msm_dsi;
slave_dsi = other_dsi;
}
/*
* There is only 1 panel in the global panel list for dual DSI mode.
* Therefore slave dsi should get the drm_panel instance from master
* dsi.
*/
panel = msm_dsi_host_get_panel(master_dsi->host);
if (IS_ERR(panel)) {
DRM_ERROR("Could not find panel for %u (%ld)\n", msm_dsi->id,
PTR_ERR(panel));
return PTR_ERR(panel);
}
if (!panel || !IS_DUAL_DSI())
goto out;
drm_object_attach_property(&conn->base,
conn->dev->mode_config.tile_property, 0);
/*
* Set split display info to kms once dual DSI panel is connected to
* both hosts.
*/
if (other_dsi && other_dsi->panel && kms->funcs->set_split_display) {
kms->funcs->set_split_display(kms, master_dsi->encoder,
slave_dsi->encoder,
dsi_mgr_is_cmd_mode(msm_dsi));
}
out:
msm_dsi->panel = panel;
return 0;
}
static enum drm_connector_status dsi_mgr_connector_detect(
struct drm_connector *connector, bool force)
{
int id = dsi_mgr_connector_get_id(connector);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
struct msm_drm_private *priv = connector->dev->dev_private;
struct msm_kms *kms = priv->kms;
DBG("id=%d", id);
if (!msm_dsi->panel) {
msm_dsi->panel = msm_dsi_host_get_panel(msm_dsi->host,
&msm_dsi->device_flags);
/* There is only 1 panel in the global panel list
* for dual DSI mode. Therefore slave dsi should get
* the drm_panel instance from master dsi, and
* keep using the panel flags got from the current DSI link.
*/
if (!msm_dsi->panel && IS_DUAL_DSI() &&
!IS_MASTER_DSI_LINK(id) && other_dsi)
msm_dsi->panel = msm_dsi_host_get_panel(
other_dsi->host, NULL);
if (msm_dsi->panel && kms->funcs->set_encoder_mode) {
bool cmd_mode = !(msm_dsi->device_flags &
MIPI_DSI_MODE_VIDEO);
struct drm_encoder *encoder =
msm_dsi_get_encoder(msm_dsi);
kms->funcs->set_encoder_mode(kms, encoder, cmd_mode);
}
if (msm_dsi->panel && IS_DUAL_DSI())
drm_object_attach_property(&connector->base,
connector->dev->mode_config.tile_property, 0);
/* Set split display info to kms once dual DSI panel is
* connected to both hosts.
*/
if (msm_dsi->panel && IS_DUAL_DSI() &&
other_dsi && other_dsi->panel) {
bool cmd_mode = !(msm_dsi->device_flags &
MIPI_DSI_MODE_VIDEO);
struct drm_encoder *encoder = msm_dsi_get_encoder(
dsi_mgr_get_dsi(DSI_ENCODER_MASTER));
struct drm_encoder *slave_enc = msm_dsi_get_encoder(
dsi_mgr_get_dsi(DSI_ENCODER_SLAVE));
if (kms->funcs->set_split_display)
kms->funcs->set_split_display(kms, encoder,
slave_enc, cmd_mode);
else
pr_err("mdp does not support dual DSI\n");
}
}
return msm_dsi->panel ? connector_status_connected :
connector_status_disconnected;
@ -595,7 +611,17 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
drm_connector_attach_encoder(connector, msm_dsi->encoder);
ret = msm_dsi_manager_panel_init(connector, id);
if (ret) {
DRM_DEV_ERROR(msm_dsi->dev->dev, "init panel failed %d\n", ret);
goto fail;
}
return connector;
fail:
connector->funcs->destroy(msm_dsi->connector);
return ERR_PTR(ret);
}
bool msm_dsi_manager_validate_current_config(u8 id)
@ -751,35 +777,6 @@ bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len)
return true;
}
void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct drm_device *dev = msm_dsi->dev;
struct msm_drm_private *priv;
struct msm_kms *kms;
struct drm_encoder *encoder;
bool cmd_mode;
/*
* drm_device pointer is assigned to msm_dsi only in the modeset_init
* path. If mipi_dsi_attach() happens in DSI driver's probe path
* (generally the case when we're connected to a drm_panel of the type
* mipi_dsi_device), this would be NULL. In such cases, try to set the
* encoder mode in the DSI connector's detect() op.
*/
if (!dev)
return;
priv = dev->dev_private;
kms = priv->kms;
encoder = msm_dsi_get_encoder(msm_dsi);
cmd_mode = !(device_flags &
MIPI_DSI_MODE_VIDEO);
if (encoder && kms->funcs->set_encoder_mode)
kms->funcs->set_encoder_mode(kms, encoder, cmd_mode);
}
int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
{
struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;

View File

@ -499,6 +499,8 @@ static const struct of_device_id dsi_phy_dt_match[] = {
#ifdef CONFIG_DRM_MSM_DSI_10NM_PHY
{ .compatible = "qcom,dsi-phy-10nm",
.data = &dsi_phy_10nm_cfgs },
{ .compatible = "qcom,dsi-phy-10nm-8998",
.data = &dsi_phy_10nm_8998_cfgs },
#endif
{}
};
@ -608,10 +610,12 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
goto fail;
phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id);
if (IS_ERR_OR_NULL(phy->pll))
if (IS_ERR_OR_NULL(phy->pll)) {
DRM_DEV_INFO(dev,
"%s: pll init failed: %ld, need separate pll clk driver\n",
__func__, PTR_ERR(phy->pll));
phy->pll = NULL;
}
dsi_phy_disable_resource(phy);

View File

@ -13,6 +13,9 @@
#define dsi_phy_read(offset) msm_readl((offset))
#define dsi_phy_write(offset, data) msm_writel((data), (offset))
/* v3.0.0 10nm implementation that requires the old timings settings */
#define V3_0_0_10NM_OLD_TIMINGS_QUIRK BIT(0)
struct msm_dsi_phy_ops {
int (*init) (struct msm_dsi_phy *phy);
int (*enable)(struct msm_dsi_phy *phy, int src_pll_id,
@ -33,6 +36,7 @@ struct msm_dsi_phy_cfg {
bool src_pll_truthtable[DSI_MAX][DSI_MAX];
const resource_size_t io_start[DSI_MAX];
const int num_dsi_phy;
const int quirks;
};
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs;
@ -41,6 +45,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs;
struct msm_dsi_dphy_timing {
u32 clk_pre;

View File

@ -42,6 +42,9 @@ static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy)
u8 tx_dctrl[] = { 0x00, 0x00, 0x00, 0x04, 0x01 };
void __iomem *lane_base = phy->lane_base;
if (phy->cfg->quirks & V3_0_0_10NM_OLD_TIMINGS_QUIRK)
tx_dctrl[3] = 0x02;
/* Strength ctrl settings */
for (i = 0; i < 5; i++) {
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(i),
@ -74,9 +77,11 @@ static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy)
tx_dctrl[i]);
}
/* Toggle BIT 0 to release freeze I/0 */
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x05);
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04);
if (!(phy->cfg->quirks & V3_0_0_10NM_OLD_TIMINGS_QUIRK)) {
/* Toggle BIT 0 to release freeze I/0 */
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x05);
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04);
}
}
static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
@ -221,3 +226,22 @@ const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = {
.io_start = { 0xae94400, 0xae96400 },
.num_dsi_phy = 2,
};
const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs = {
.type = MSM_DSI_PHY_10NM,
.src_pll_truthtable = { {false, false}, {true, false} },
.reg_cfg = {
.num = 1,
.regs = {
{"vdds", 36000, 32},
},
},
.ops = {
.enable = dsi_10nm_phy_enable,
.disable = dsi_10nm_phy_disable,
.init = dsi_10nm_phy_init,
},
.io_start = { 0xc994400, 0xc996400 },
.num_dsi_phy = 2,
.quirks = V3_0_0_10NM_OLD_TIMINGS_QUIRK,
};

View File

@ -104,8 +104,13 @@ struct dsi_pll_10nm {
struct dsi_pll_regs reg_setup;
/* private clocks: */
struct clk_hw *hws[NUM_DSI_CLOCKS_MAX];
u32 num_hws;
struct clk_hw *out_div_clk_hw;
struct clk_hw *bit_clk_hw;
struct clk_hw *byte_clk_hw;
struct clk_hw *by_2_bit_clk_hw;
struct clk_hw *post_out_div_clk_hw;
struct clk_hw *pclk_mux_hw;
struct clk_hw *out_dsiclk_hw;
/* clock-provider: */
struct clk_hw_onecell_data *hw_data;
@ -617,8 +622,19 @@ static int dsi_pll_10nm_get_provider(struct msm_dsi_pll *pll,
static void dsi_pll_10nm_destroy(struct msm_dsi_pll *pll)
{
struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
struct device *dev = &pll_10nm->pdev->dev;
DBG("DSI PLL%d", pll_10nm->id);
of_clk_del_provider(dev->of_node);
clk_hw_unregister_divider(pll_10nm->out_dsiclk_hw);
clk_hw_unregister_mux(pll_10nm->pclk_mux_hw);
clk_hw_unregister_fixed_factor(pll_10nm->post_out_div_clk_hw);
clk_hw_unregister_fixed_factor(pll_10nm->by_2_bit_clk_hw);
clk_hw_unregister_fixed_factor(pll_10nm->byte_clk_hw);
clk_hw_unregister_divider(pll_10nm->bit_clk_hw);
clk_hw_unregister_divider(pll_10nm->out_div_clk_hw);
clk_hw_unregister(&pll_10nm->base.clk_hw);
}
/*
@ -639,10 +655,8 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
.ops = &clk_ops_dsi_pll_10nm_vco,
};
struct device *dev = &pll_10nm->pdev->dev;
struct clk_hw **hws = pll_10nm->hws;
struct clk_hw_onecell_data *hw_data;
struct clk_hw *hw;
int num = 0;
int ret;
DBG("DSI%d", pll_10nm->id);
@ -660,8 +674,6 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
if (ret)
return ret;
hws[num++] = &pll_10nm->base.clk_hw;
snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
snprintf(parent, 32, "dsi%dvco_clk", pll_10nm->id);
@ -670,10 +682,12 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
pll_10nm->mmio +
REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE,
0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
if (IS_ERR(hw))
return PTR_ERR(hw);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto err_base_clk_hw;
}
hws[num++] = hw;
pll_10nm->out_div_clk_hw = hw;
snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
@ -685,10 +699,12 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
REG_DSI_10nm_PHY_CMN_CLK_CFG0,
0, 4, CLK_DIVIDER_ONE_BASED,
&pll_10nm->postdiv_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto err_out_div_clk_hw;
}
hws[num++] = hw;
pll_10nm->bit_clk_hw = hw;
snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_10nm->id);
snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
@ -696,10 +712,12 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
CLK_SET_RATE_PARENT, 1, 8);
if (IS_ERR(hw))
return PTR_ERR(hw);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto err_bit_clk_hw;
}
hws[num++] = hw;
pll_10nm->byte_clk_hw = hw;
hw_data->hws[DSI_BYTE_PLL_CLK] = hw;
snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->id);
@ -707,20 +725,24 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
0, 1, 2);
if (IS_ERR(hw))
return PTR_ERR(hw);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto err_byte_clk_hw;
}
hws[num++] = hw;
pll_10nm->by_2_bit_clk_hw = hw;
snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->id);
snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
0, 1, 4);
if (IS_ERR(hw))
return PTR_ERR(hw);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto err_by_2_bit_clk_hw;
}
hws[num++] = hw;
pll_10nm->post_out_div_clk_hw = hw;
snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_10nm->id);
snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
@ -734,10 +756,12 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
}, 4, 0, pll_10nm->phy_cmn_mmio +
REG_DSI_10nm_PHY_CMN_CLK_CFG1,
0, 2, 0, NULL);
if (IS_ERR(hw))
return PTR_ERR(hw);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto err_post_out_div_clk_hw;
}
hws[num++] = hw;
pll_10nm->pclk_mux_hw = hw;
snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_10nm->id);
snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->id);
@ -748,14 +772,14 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
REG_DSI_10nm_PHY_CMN_CLK_CFG0,
4, 4, CLK_DIVIDER_ONE_BASED,
&pll_10nm->postdiv_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto err_pclk_mux_hw;
}
hws[num++] = hw;
pll_10nm->out_dsiclk_hw = hw;
hw_data->hws[DSI_PIXEL_PLL_CLK] = hw;
pll_10nm->num_hws = num;
hw_data->num = NUM_PROVIDED_CLKS;
pll_10nm->hw_data = hw_data;
@ -763,10 +787,29 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
pll_10nm->hw_data);
if (ret) {
DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
return ret;
goto err_dsiclk_hw;
}
return 0;
err_dsiclk_hw:
clk_hw_unregister_divider(pll_10nm->out_dsiclk_hw);
err_pclk_mux_hw:
clk_hw_unregister_mux(pll_10nm->pclk_mux_hw);
err_post_out_div_clk_hw:
clk_hw_unregister_fixed_factor(pll_10nm->post_out_div_clk_hw);
err_by_2_bit_clk_hw:
clk_hw_unregister_fixed_factor(pll_10nm->by_2_bit_clk_hw);
err_byte_clk_hw:
clk_hw_unregister_fixed_factor(pll_10nm->byte_clk_hw);
err_bit_clk_hw:
clk_hw_unregister_divider(pll_10nm->bit_clk_hw);
err_out_div_clk_hw:
clk_hw_unregister_divider(pll_10nm->out_div_clk_hw);
err_base_clk_hw:
clk_hw_unregister(&pll_10nm->base.clk_hw);
return ret;
}
struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
@ -775,9 +818,6 @@ struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
struct msm_dsi_pll *pll;
int ret;
if (!pdev)
return ERR_PTR(-ENODEV);
pll_10nm = devm_kzalloc(&pdev->dev, sizeof(*pll_10nm), GFP_KERNEL);
if (!pll_10nm)
return ERR_PTR(-ENOMEM);

View File

@ -259,13 +259,24 @@ static int msm_drm_uninit(struct device *dev)
struct msm_mdss *mdss = priv->mdss;
int i;
/*
* Shutdown the hw if we're far enough along where things might be on.
* If we run this too early, we'll end up panicking in any variety of
* places. Since we don't register the drm device until late in
* msm_drm_init, drm_dev->registered is used as an indicator that the
* shutdown will be successful.
*/
if (ddev->registered) {
drm_dev_unregister(ddev);
drm_atomic_helper_shutdown(ddev);
}
/* We must cancel and cleanup any pending vblank enable/disable
* work before drm_irq_uninstall() to avoid work re-enabling an
* irq after uninstall has disabled it.
*/
flush_workqueue(priv->wq);
destroy_workqueue(priv->wq);
/* clean up event worker threads */
for (i = 0; i < priv->num_crtcs; i++) {
@ -279,8 +290,6 @@ static int msm_drm_uninit(struct device *dev)
drm_kms_helper_poll_fini(ddev);
drm_dev_unregister(ddev);
msm_perf_debugfs_cleanup(priv);
msm_rd_debugfs_cleanup(priv);
@ -288,7 +297,7 @@ static int msm_drm_uninit(struct device *dev)
if (fbdev && priv->fbdev)
msm_fbdev_free(ddev);
#endif
drm_atomic_helper_shutdown(ddev);
drm_mode_config_cleanup(ddev);
pm_runtime_get_sync(dev);
@ -313,6 +322,7 @@ static int msm_drm_uninit(struct device *dev)
ddev->dev_private = NULL;
drm_dev_put(ddev);
destroy_workqueue(priv->wq);
kfree(priv);
return 0;
@ -611,6 +621,7 @@ static void load_gpu(struct drm_device *dev)
static int context_init(struct drm_device *dev, struct drm_file *file)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_file_private *ctx;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@ -619,6 +630,7 @@ static int context_init(struct drm_device *dev, struct drm_file *file)
msm_submitqueue_init(dev, ctx);
ctx->aspace = priv->gpu->aspace;
file->driver_priv = ctx;
return 0;
@ -1317,16 +1329,24 @@ static int msm_pdev_probe(struct platform_device *pdev)
ret = add_gpu_components(&pdev->dev, &match);
if (ret)
return ret;
goto fail;
/* on all devices that I am aware of, iommu's which can map
* any address the cpu can see are used:
*/
ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
if (ret)
return ret;
goto fail;
return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
ret = component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
if (ret)
goto fail;
return 0;
fail:
of_platform_depopulate(&pdev->dev);
return ret;
}
static int msm_pdev_remove(struct platform_device *pdev)

View File

@ -68,6 +68,7 @@ struct msm_file_private {
rwlock_t queuelock;
struct list_head submitqueues;
int queueid;
struct msm_gem_address_space *aspace;
};
enum msm_mdp_plane_property {

View File

@ -16,6 +16,7 @@
*/
#include <drm/drm_crtc.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
@ -35,6 +36,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
.create_handle = drm_gem_fb_create_handle,
.destroy = drm_gem_fb_destroy,
.dirty = drm_atomic_helper_dirtyfb,
};
#ifdef CONFIG_DEBUG_FS

View File

@ -352,8 +352,10 @@ put_iova(struct drm_gem_object *obj)
WARN_ON(!mutex_is_locked(&msm_obj->lock));
list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
msm_gem_purge_vma(vma->aspace, vma);
msm_gem_close_vma(vma->aspace, vma);
if (vma->aspace) {
msm_gem_purge_vma(vma->aspace, vma);
msm_gem_close_vma(vma->aspace, vma);
}
del_vma(vma);
}
}

View File

@ -141,6 +141,7 @@ void msm_gem_free_work(struct work_struct *work);
struct msm_gem_submit {
struct drm_device *dev;
struct msm_gpu *gpu;
struct msm_gem_address_space *aspace;
struct list_head node; /* node in ring submit list */
struct list_head bo_list;
struct ww_acquire_ctx ticket;

View File

@ -32,8 +32,9 @@
#define BO_PINNED 0x2000
static struct msm_gem_submit *submit_create(struct drm_device *dev,
struct msm_gpu *gpu, struct msm_gpu_submitqueue *queue,
uint32_t nr_bos, uint32_t nr_cmds)
struct msm_gpu *gpu, struct msm_gem_address_space *aspace,
struct msm_gpu_submitqueue *queue, uint32_t nr_bos,
uint32_t nr_cmds)
{
struct msm_gem_submit *submit;
uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
@ -47,6 +48,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
return NULL;
submit->dev = dev;
submit->aspace = aspace;
submit->gpu = gpu;
submit->fence = NULL;
submit->cmd = (void *)&submit->bos[nr_bos];
@ -160,7 +162,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
struct msm_gem_object *msm_obj = submit->bos[i].obj;
if (submit->bos[i].flags & BO_PINNED)
msm_gem_unpin_iova(&msm_obj->base, submit->gpu->aspace);
msm_gem_unpin_iova(&msm_obj->base, submit->aspace);
if (submit->bos[i].flags & BO_LOCKED)
ww_mutex_unlock(&msm_obj->base.resv->lock);
@ -264,7 +266,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
/* if locking succeeded, pin bo: */
ret = msm_gem_get_and_pin_iova(&msm_obj->base,
submit->gpu->aspace, &iova);
submit->aspace, &iova);
if (ret)
break;
@ -477,7 +479,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
}
}
submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds);
submit = submit_create(dev, gpu, ctx->aspace, queue, args->nr_bos,
args->nr_cmds);
if (!submit) {
ret = -ENOMEM;
goto out_unlock;

View File

@ -684,7 +684,7 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
struct msm_gem_object *msm_obj = submit->bos[i].obj;
/* move to inactive: */
msm_gem_move_to_inactive(&msm_obj->base);
msm_gem_unpin_iova(&msm_obj->base, gpu->aspace);
msm_gem_unpin_iova(&msm_obj->base, submit->aspace);
drm_gem_object_put(&msm_obj->base);
}
@ -768,8 +768,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
/* submit takes a reference to the bo and iova until retired: */
drm_gem_object_get(&msm_obj->base);
msm_gem_get_and_pin_iova(&msm_obj->base,
submit->gpu->aspace, &iova);
msm_gem_get_and_pin_iova(&msm_obj->base, submit->aspace, &iova);
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);

View File

@ -30,7 +30,7 @@ static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
struct msm_iommu *iommu = arg;
if (iommu->base.handler)
return iommu->base.handler(iommu->base.arg, iova, flags);
pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags);
pr_warn_ratelimited("*** fault: iova=%16lx, flags=%d\n", iova, flags);
return 0;
}

View File

@ -205,7 +205,6 @@ int msm_perf_debugfs_init(struct drm_minor *minor)
{
struct msm_drm_private *priv = minor->dev->dev_private;
struct msm_perf_state *perf;
struct dentry *ent;
/* only create on first minor: */
if (priv->perf)
@ -220,19 +219,9 @@ int msm_perf_debugfs_init(struct drm_minor *minor)
mutex_init(&perf->read_lock);
priv->perf = perf;
ent = debugfs_create_file("perf", S_IFREG | S_IRUGO,
minor->debugfs_root, perf, &perf_debugfs_fops);
if (!ent) {
DRM_ERROR("Cannot create /sys/kernel/debug/dri/%pd/perf\n",
minor->debugfs_root);
goto fail;
}
debugfs_create_file("perf", S_IFREG | S_IRUGO, minor->debugfs_root,
perf, &perf_debugfs_fops);
return 0;
fail:
msm_perf_debugfs_cleanup(priv);
return -1;
}
void msm_perf_debugfs_cleanup(struct msm_drm_private *priv)

View File

@ -244,8 +244,6 @@ static void rd_cleanup(struct msm_rd_state *rd)
static struct msm_rd_state *rd_init(struct drm_minor *minor, const char *name)
{
struct msm_rd_state *rd;
struct dentry *ent;
int ret = 0;
rd = kzalloc(sizeof(*rd), GFP_KERNEL);
if (!rd)
@ -258,20 +256,10 @@ static struct msm_rd_state *rd_init(struct drm_minor *minor, const char *name)
init_waitqueue_head(&rd->fifo_event);
ent = debugfs_create_file(name, S_IFREG | S_IRUGO,
minor->debugfs_root, rd, &rd_debugfs_fops);
if (!ent) {
DRM_ERROR("Cannot create /sys/kernel/debug/dri/%pd/%s\n",
minor->debugfs_root, name);
ret = -ENOMEM;
goto fail;
}
debugfs_create_file(name, S_IFREG | S_IRUGO, minor->debugfs_root, rd,
&rd_debugfs_fops);
return rd;
fail:
rd_cleanup(rd);
return ERR_PTR(ret);
}
int msm_rd_debugfs_init(struct drm_minor *minor)