drm msm next for 5.8-rc1
* new gpu support: a405, a640, a650 * dpu: color processing support * mdp5: support for msm8x36 (the thing with a405) * some prep work for per-context pagetables (ie the part that does not depend on in-flight iommu patches) * last but not least, UABI update for submit ioctl to support syncobj (from Bas) -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJe3bDxAAoJEAx081l5xIa+tkoQAIGUxvEgYBQ+S6RvANZAT+Wq 2JZS2JPvExcB3Xe4erI+y7DeIuK2VghQUAcxMWhrDGgU7jKLV7jq09HTKkdE7++4 feLQMZziy3rAN3H6Pe1+72ZI9jAeK7JpvyRxI1nSu1O1JnaZS2rHmCOnBT8yA8sw tHld1b5KUMmgTLR6CcJQYz0qp7p8x5LE8MdWY57Px5AqcnXFf1z/oiYNiCcxK2Jl tEic1b9mvCwvlGWYdu00aavqo7WESj3oWYxtb8MsmVBWjAHtTqrlBY21DyQzgdEu sgc8QAG+zHJ7Ls81INSVfDQ1zrspn/n+yL8efMhQibpMAQqGgt17nF+ZIx50nLMi USg5qBJKgBL2iccooA9QEioFE3tB6Ld8SfcjLGIU7jegi0Fw/KpVPqmUVjKdqrXT qjUKExa4e4pFxOlgbOYc1lIzSLwpGjGpLWbRWj8aee1GyrWRJA0Y9aRo75G6Sr4e SX6807kX+h0IrF1rJzftVKa+KviD9SD4NyAyah6OJvg0FVJnhbO75PmnAkB6GVnQ Jgg7fALjjkANRd8764H2B0pjke6wPDnUNXnh32ei2FWxVfQfIu/qhlJg9cU7TdMf Z2kcHijoRGjAfvddD+oDs3DS58b9o7DHKgsZuLWvh87MpVbv9CynZSh5SgGqqNKR nHajwsRXQc6e/fXT4YzN =hIK6 -----END PGP SIGNATURE----- Merge tag 'drm-next-msm-5.8-2020-06-08' of git://anongit.freedesktop.org/drm/drm Pull drm msm updates from Dave Airlie: "This tree has been in next for a couple of weeks, but Rob missed an arm32 build issue, so I was awaiting the tree with a patch reverted. - new gpu support: a405, a640, a650 - dpu: color processing support - mdp5: support for msm8x36 (the thing with a405) - some prep work for per-context pagetables (ie the part that does not depend on in-flight iommu patches) - last but not least, UABI update for submit ioctl to support syncobj (from Bas)" * tag 'drm-next-msm-5.8-2020-06-08' of git://anongit.freedesktop.org/drm/drm: (30 commits) Revert "drm/msm/dpu: add support for clk and bw scaling for display" drm/msm/a6xx: skip HFI set freq if GMU is powered down drm/msm: Update the MMU helper function APIs drm/msm: Refactor address space initialization drm/msm: Attach the IOMMU device during initialization drm/msm/dpu: dpu_setup_dspp_pcc() can be static drm/msm/a6xx: a6xx_hfi_send_start() can be static drm/msm/a4xx: add a405_registers for a405 device drm/msm/a4xx: add adreno a405 support drm/msm/a6xx: update a6xx_hw_init for A640 and A650 drm/msm/a6xx: enable GMU log drm/msm/a6xx: update pdc/rscc GMU registers for A640/A650 drm/msm/a6xx: A640/A650 GMU firmware path drm/msm/a6xx: HFI v2 for A640 and A650 drm/msm/a6xx: add A640/A650 to gpulist drm/msm/a6xx: use msm_gem for GMU memory objects drm/msm: add internal MSM_BO_MAP_PRIV flag drm/msm: add msm_gem_get_and_pin_iova_range drm/msm: Check for powered down HW in the devfreq callbacks drm/msm/dpu: update bandwidth threshold check ...
This commit is contained in:
commit
9413b9a690
|
@ -65,6 +65,7 @@ msm-y := \
|
||||||
disp/dpu1/dpu_hw_lm.o \
|
disp/dpu1/dpu_hw_lm.o \
|
||||||
disp/dpu1/dpu_hw_pingpong.o \
|
disp/dpu1/dpu_hw_pingpong.o \
|
||||||
disp/dpu1/dpu_hw_sspp.o \
|
disp/dpu1/dpu_hw_sspp.o \
|
||||||
|
disp/dpu1/dpu_hw_dspp.o \
|
||||||
disp/dpu1/dpu_hw_top.o \
|
disp/dpu1/dpu_hw_top.o \
|
||||||
disp/dpu1/dpu_hw_util.o \
|
disp/dpu1/dpu_hw_util.o \
|
||||||
disp/dpu1/dpu_hw_vbif.o \
|
disp/dpu1/dpu_hw_vbif.o \
|
||||||
|
|
|
@ -401,6 +401,21 @@ static struct msm_gpu_state *a2xx_gpu_state_get(struct msm_gpu *gpu)
|
||||||
return state;
|
return state;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct msm_gem_address_space *
|
||||||
|
a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
|
||||||
|
{
|
||||||
|
struct msm_mmu *mmu = msm_gpummu_new(&pdev->dev, gpu);
|
||||||
|
struct msm_gem_address_space *aspace;
|
||||||
|
|
||||||
|
aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
|
||||||
|
SZ_16M + 0xfff * SZ_64K);
|
||||||
|
|
||||||
|
if (IS_ERR(aspace) && !IS_ERR(mmu))
|
||||||
|
mmu->funcs->destroy(mmu);
|
||||||
|
|
||||||
|
return aspace;
|
||||||
|
}
|
||||||
|
|
||||||
/* Register offset defines for A2XX - copy of A3XX */
|
/* Register offset defines for A2XX - copy of A3XX */
|
||||||
static const unsigned int a2xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
|
static const unsigned int a2xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
|
||||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
|
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
|
||||||
|
@ -429,6 +444,7 @@ static const struct adreno_gpu_funcs funcs = {
|
||||||
#endif
|
#endif
|
||||||
.gpu_state_get = a2xx_gpu_state_get,
|
.gpu_state_get = a2xx_gpu_state_get,
|
||||||
.gpu_state_put = adreno_gpu_state_put,
|
.gpu_state_put = adreno_gpu_state_put,
|
||||||
|
.create_address_space = a2xx_create_address_space,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -441,6 +441,7 @@ static const struct adreno_gpu_funcs funcs = {
|
||||||
#endif
|
#endif
|
||||||
.gpu_state_get = a3xx_gpu_state_get,
|
.gpu_state_get = a3xx_gpu_state_get,
|
||||||
.gpu_state_put = adreno_gpu_state_put,
|
.gpu_state_put = adreno_gpu_state_put,
|
||||||
|
.create_address_space = adreno_iommu_create_address_space,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -66,19 +66,22 @@ static void a4xx_enable_hwcg(struct msm_gpu *gpu)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < 4; i++) {
|
/* No CCU for A405 */
|
||||||
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(i),
|
if (!adreno_is_a405(adreno_gpu)) {
|
||||||
0x00000922);
|
for (i = 0; i < 4; i++) {
|
||||||
}
|
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(i),
|
||||||
|
0x00000922);
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < 4; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(i),
|
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(i),
|
||||||
0x00000000);
|
0x00000000);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < 4; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(i),
|
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(i),
|
||||||
0x00000001);
|
0x00000001);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_MODE_GPC, 0x02222222);
|
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_MODE_GPC, 0x02222222);
|
||||||
|
@ -137,7 +140,9 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
|
||||||
uint32_t *ptr, len;
|
uint32_t *ptr, len;
|
||||||
int i, ret;
|
int i, ret;
|
||||||
|
|
||||||
if (adreno_is_a420(adreno_gpu)) {
|
if (adreno_is_a405(adreno_gpu)) {
|
||||||
|
gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
|
||||||
|
} else if (adreno_is_a420(adreno_gpu)) {
|
||||||
gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT, 0x0001001F);
|
gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT, 0x0001001F);
|
||||||
gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT_CONF, 0x000000A4);
|
gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT_CONF, 0x000000A4);
|
||||||
gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
|
gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
|
||||||
|
@ -440,6 +445,52 @@ static const unsigned int a4xx_registers[] = {
|
||||||
~0 /* sentinel */
|
~0 /* sentinel */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const unsigned int a405_registers[] = {
|
||||||
|
/* RBBM */
|
||||||
|
0x0000, 0x0002, 0x0004, 0x0021, 0x0023, 0x0024, 0x0026, 0x0026,
|
||||||
|
0x0028, 0x002B, 0x002E, 0x0034, 0x0037, 0x0044, 0x0047, 0x0066,
|
||||||
|
0x0068, 0x0095, 0x009C, 0x0170, 0x0174, 0x01AF,
|
||||||
|
/* CP */
|
||||||
|
0x0200, 0x0233, 0x0240, 0x0250, 0x04C0, 0x04DD, 0x0500, 0x050B,
|
||||||
|
0x0578, 0x058F,
|
||||||
|
/* VSC */
|
||||||
|
0x0C00, 0x0C03, 0x0C08, 0x0C41, 0x0C50, 0x0C51,
|
||||||
|
/* GRAS */
|
||||||
|
0x0C80, 0x0C81, 0x0C88, 0x0C8F,
|
||||||
|
/* RB */
|
||||||
|
0x0CC0, 0x0CC0, 0x0CC4, 0x0CD2,
|
||||||
|
/* PC */
|
||||||
|
0x0D00, 0x0D0C, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
|
||||||
|
/* VFD */
|
||||||
|
0x0E40, 0x0E4A,
|
||||||
|
/* VPC */
|
||||||
|
0x0E60, 0x0E61, 0x0E63, 0x0E68,
|
||||||
|
/* UCHE */
|
||||||
|
0x0E80, 0x0E84, 0x0E88, 0x0E95,
|
||||||
|
/* GRAS CTX 0 */
|
||||||
|
0x2000, 0x2004, 0x2008, 0x2067, 0x2070, 0x2078, 0x207B, 0x216E,
|
||||||
|
/* PC CTX 0 */
|
||||||
|
0x21C0, 0x21C6, 0x21D0, 0x21D0, 0x21D9, 0x21D9, 0x21E5, 0x21E7,
|
||||||
|
/* VFD CTX 0 */
|
||||||
|
0x2200, 0x2204, 0x2208, 0x22A9,
|
||||||
|
/* GRAS CTX 1 */
|
||||||
|
0x2400, 0x2404, 0x2408, 0x2467, 0x2470, 0x2478, 0x247B, 0x256E,
|
||||||
|
/* PC CTX 1 */
|
||||||
|
0x25C0, 0x25C6, 0x25D0, 0x25D0, 0x25D9, 0x25D9, 0x25E5, 0x25E7,
|
||||||
|
/* VFD CTX 1 */
|
||||||
|
0x2600, 0x2604, 0x2608, 0x26A9,
|
||||||
|
/* VBIF version 0x20050000*/
|
||||||
|
0x3000, 0x3007, 0x302C, 0x302C, 0x3030, 0x3030, 0x3034, 0x3036,
|
||||||
|
0x3038, 0x3038, 0x303C, 0x303D, 0x3040, 0x3040, 0x3049, 0x3049,
|
||||||
|
0x3058, 0x3058, 0x305B, 0x3061, 0x3064, 0x3068, 0x306C, 0x306D,
|
||||||
|
0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094, 0x3098, 0x3098,
|
||||||
|
0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8, 0x30D0, 0x30D0,
|
||||||
|
0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100, 0x3108, 0x3108,
|
||||||
|
0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120, 0x3124, 0x3125,
|
||||||
|
0x3129, 0x3129, 0x340C, 0x340C, 0x3410, 0x3410,
|
||||||
|
~0 /* sentinel */
|
||||||
|
};
|
||||||
|
|
||||||
static struct msm_gpu_state *a4xx_gpu_state_get(struct msm_gpu *gpu)
|
static struct msm_gpu_state *a4xx_gpu_state_get(struct msm_gpu *gpu)
|
||||||
{
|
{
|
||||||
struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
|
struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
|
||||||
|
@ -532,6 +583,7 @@ static const struct adreno_gpu_funcs funcs = {
|
||||||
#endif
|
#endif
|
||||||
.gpu_state_get = a4xx_gpu_state_get,
|
.gpu_state_get = a4xx_gpu_state_get,
|
||||||
.gpu_state_put = adreno_gpu_state_put,
|
.gpu_state_put = adreno_gpu_state_put,
|
||||||
|
.create_address_space = adreno_iommu_create_address_space,
|
||||||
},
|
},
|
||||||
.get_timestamp = a4xx_get_timestamp,
|
.get_timestamp = a4xx_get_timestamp,
|
||||||
};
|
};
|
||||||
|
@ -563,13 +615,14 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
|
||||||
gpu->perfcntrs = NULL;
|
gpu->perfcntrs = NULL;
|
||||||
gpu->num_perfcntrs = 0;
|
gpu->num_perfcntrs = 0;
|
||||||
|
|
||||||
adreno_gpu->registers = a4xx_registers;
|
|
||||||
adreno_gpu->reg_offsets = a4xx_register_offsets;
|
|
||||||
|
|
||||||
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
|
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
|
adreno_gpu->registers = adreno_is_a405(adreno_gpu) ? a405_registers :
|
||||||
|
a4xx_registers;
|
||||||
|
adreno_gpu->reg_offsets = a4xx_register_offsets;
|
||||||
|
|
||||||
/* if needed, allocate gmem: */
|
/* if needed, allocate gmem: */
|
||||||
if (adreno_is_a4xx(adreno_gpu)) {
|
if (adreno_is_a4xx(adreno_gpu)) {
|
||||||
ret = adreno_gpu_ocmem_init(dev->dev, adreno_gpu,
|
ret = adreno_gpu_ocmem_init(dev->dev, adreno_gpu,
|
||||||
|
|
|
@ -1404,6 +1404,10 @@ static unsigned long a5xx_gpu_busy(struct msm_gpu *gpu)
|
||||||
{
|
{
|
||||||
u64 busy_cycles, busy_time;
|
u64 busy_cycles, busy_time;
|
||||||
|
|
||||||
|
/* Only read the gpu busy if the hardware is already active */
|
||||||
|
if (pm_runtime_get_if_in_use(&gpu->pdev->dev) == 0)
|
||||||
|
return 0;
|
||||||
|
|
||||||
busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO,
|
busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO,
|
||||||
REG_A5XX_RBBM_PERFCTR_RBBM_0_HI);
|
REG_A5XX_RBBM_PERFCTR_RBBM_0_HI);
|
||||||
|
|
||||||
|
@ -1412,6 +1416,8 @@ static unsigned long a5xx_gpu_busy(struct msm_gpu *gpu)
|
||||||
|
|
||||||
gpu->devfreq.busy_cycles = busy_cycles;
|
gpu->devfreq.busy_cycles = busy_cycles;
|
||||||
|
|
||||||
|
pm_runtime_put(&gpu->pdev->dev);
|
||||||
|
|
||||||
if (WARN_ON(busy_time > ~0LU))
|
if (WARN_ON(busy_time > ~0LU))
|
||||||
return ~0LU;
|
return ~0LU;
|
||||||
|
|
||||||
|
@ -1439,6 +1445,7 @@ static const struct adreno_gpu_funcs funcs = {
|
||||||
.gpu_busy = a5xx_gpu_busy,
|
.gpu_busy = a5xx_gpu_busy,
|
||||||
.gpu_state_get = a5xx_gpu_state_get,
|
.gpu_state_get = a5xx_gpu_state_get,
|
||||||
.gpu_state_put = a5xx_gpu_state_put,
|
.gpu_state_put = a5xx_gpu_state_put,
|
||||||
|
.create_address_space = adreno_iommu_create_address_space,
|
||||||
},
|
},
|
||||||
.get_timestamp = a5xx_get_timestamp,
|
.get_timestamp = a5xx_get_timestamp,
|
||||||
};
|
};
|
||||||
|
|
|
@ -1047,6 +1047,8 @@ enum a6xx_tex_type {
|
||||||
|
|
||||||
#define REG_A6XX_CP_MISC_CNTL 0x00000840
|
#define REG_A6XX_CP_MISC_CNTL 0x00000840
|
||||||
|
|
||||||
|
#define REG_A6XX_CP_APRIV_CNTL 0x00000844
|
||||||
|
|
||||||
#define REG_A6XX_CP_ROQ_THRESHOLDS_1 0x000008c1
|
#define REG_A6XX_CP_ROQ_THRESHOLDS_1 0x000008c1
|
||||||
|
|
||||||
#define REG_A6XX_CP_ROQ_THRESHOLDS_2 0x000008c2
|
#define REG_A6XX_CP_ROQ_THRESHOLDS_2 0x000008c2
|
||||||
|
@ -1764,6 +1766,8 @@ static inline uint32_t A6XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
|
||||||
|
|
||||||
#define REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL 0x00000010
|
#define REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL 0x00000010
|
||||||
|
|
||||||
|
#define REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL 0x00000011
|
||||||
|
|
||||||
#define REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0000001f
|
#define REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0000001f
|
||||||
|
|
||||||
#define REG_A6XX_RBBM_INT_CLEAR_CMD 0x00000037
|
#define REG_A6XX_RBBM_INT_CLEAR_CMD 0x00000037
|
||||||
|
@ -2418,6 +2422,16 @@ static inline uint32_t A6XX_UCHE_CLIENT_PF_PERFSEL(uint32_t val)
|
||||||
|
|
||||||
#define REG_A6XX_TPL1_NC_MODE_CNTL 0x0000b604
|
#define REG_A6XX_TPL1_NC_MODE_CNTL 0x0000b604
|
||||||
|
|
||||||
|
#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0 0x0000b608
|
||||||
|
|
||||||
|
#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1 0x0000b609
|
||||||
|
|
||||||
|
#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2 0x0000b60a
|
||||||
|
|
||||||
|
#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3 0x0000b60b
|
||||||
|
|
||||||
|
#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4 0x0000b60c
|
||||||
|
|
||||||
#define REG_A6XX_TPL1_PERFCTR_TP_SEL_0 0x0000b610
|
#define REG_A6XX_TPL1_PERFCTR_TP_SEL_0 0x0000b610
|
||||||
|
|
||||||
#define REG_A6XX_TPL1_PERFCTR_TP_SEL_1 0x0000b611
|
#define REG_A6XX_TPL1_PERFCTR_TP_SEL_1 0x0000b611
|
||||||
|
|
|
@ -2,14 +2,16 @@
|
||||||
/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
|
/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
|
||||||
|
|
||||||
#include <linux/clk.h>
|
#include <linux/clk.h>
|
||||||
#include <linux/dma-mapping.h>
|
|
||||||
#include <linux/interconnect.h>
|
#include <linux/interconnect.h>
|
||||||
#include <linux/pm_domain.h>
|
#include <linux/pm_domain.h>
|
||||||
#include <linux/pm_opp.h>
|
#include <linux/pm_opp.h>
|
||||||
#include <soc/qcom/cmd-db.h>
|
#include <soc/qcom/cmd-db.h>
|
||||||
|
#include <drm/drm_gem.h>
|
||||||
|
|
||||||
#include "a6xx_gpu.h"
|
#include "a6xx_gpu.h"
|
||||||
#include "a6xx_gmu.xml.h"
|
#include "a6xx_gmu.xml.h"
|
||||||
|
#include "msm_gem.h"
|
||||||
|
#include "msm_mmu.h"
|
||||||
|
|
||||||
static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
|
static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
|
||||||
{
|
{
|
||||||
|
@ -127,8 +129,6 @@ static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
|
||||||
if (ret)
|
if (ret)
|
||||||
dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
|
dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
|
||||||
|
|
||||||
gmu->freq = gmu->gpu_freqs[index];
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Eventually we will want to scale the path vote with the frequency but
|
* Eventually we will want to scale the path vote with the frequency but
|
||||||
* for now leave it at max so that the performance is nominal.
|
* for now leave it at max so that the performance is nominal.
|
||||||
|
@ -151,8 +151,21 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
gmu->current_perf_index = perf_index;
|
gmu->current_perf_index = perf_index;
|
||||||
|
gmu->freq = gmu->gpu_freqs[perf_index];
|
||||||
|
|
||||||
__a6xx_gmu_set_freq(gmu, perf_index);
|
/*
|
||||||
|
* This can get called from devfreq while the hardware is idle. Don't
|
||||||
|
* bring up the power if it isn't already active
|
||||||
|
*/
|
||||||
|
if (pm_runtime_get_if_in_use(gmu->dev) == 0)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (gmu->legacy)
|
||||||
|
__a6xx_gmu_set_freq(gmu, perf_index);
|
||||||
|
else
|
||||||
|
a6xx_hfi_set_freq(gmu, perf_index);
|
||||||
|
|
||||||
|
pm_runtime_put(gmu->dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu)
|
unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu)
|
||||||
|
@ -196,6 +209,12 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu)
|
||||||
u32 val;
|
u32 val;
|
||||||
|
|
||||||
gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
|
gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
|
||||||
|
|
||||||
|
/* Set the log wptr index
|
||||||
|
* note: downstream saves the value in poweroff and restores it here
|
||||||
|
*/
|
||||||
|
gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0);
|
||||||
|
|
||||||
gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
|
gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
|
||||||
|
|
||||||
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
|
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
|
||||||
|
@ -232,8 +251,13 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
|
||||||
|
|
||||||
switch (state) {
|
switch (state) {
|
||||||
case GMU_OOB_GPU_SET:
|
case GMU_OOB_GPU_SET:
|
||||||
request = GMU_OOB_GPU_SET_REQUEST;
|
if (gmu->legacy) {
|
||||||
ack = GMU_OOB_GPU_SET_ACK;
|
request = GMU_OOB_GPU_SET_REQUEST;
|
||||||
|
ack = GMU_OOB_GPU_SET_ACK;
|
||||||
|
} else {
|
||||||
|
request = GMU_OOB_GPU_SET_REQUEST_NEW;
|
||||||
|
ack = GMU_OOB_GPU_SET_ACK_NEW;
|
||||||
|
}
|
||||||
name = "GPU_SET";
|
name = "GPU_SET";
|
||||||
break;
|
break;
|
||||||
case GMU_OOB_BOOT_SLUMBER:
|
case GMU_OOB_BOOT_SLUMBER:
|
||||||
|
@ -272,6 +296,13 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
|
||||||
/* Clear a pending OOB state in the GMU */
|
/* Clear a pending OOB state in the GMU */
|
||||||
void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
|
void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
|
||||||
{
|
{
|
||||||
|
if (!gmu->legacy) {
|
||||||
|
WARN_ON(state != GMU_OOB_GPU_SET);
|
||||||
|
gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
|
||||||
|
1 << GMU_OOB_GPU_SET_CLEAR_NEW);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
switch (state) {
|
switch (state) {
|
||||||
case GMU_OOB_GPU_SET:
|
case GMU_OOB_GPU_SET:
|
||||||
gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
|
gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
|
||||||
|
@ -294,6 +325,9 @@ static int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
|
||||||
int ret;
|
int ret;
|
||||||
u32 val;
|
u32 val;
|
||||||
|
|
||||||
|
if (!gmu->legacy)
|
||||||
|
return 0;
|
||||||
|
|
||||||
gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000);
|
gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000);
|
||||||
|
|
||||||
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
|
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
|
||||||
|
@ -313,6 +347,9 @@ static void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
|
||||||
u32 val;
|
u32 val;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (!gmu->legacy)
|
||||||
|
return;
|
||||||
|
|
||||||
/* Make sure retention is on */
|
/* Make sure retention is on */
|
||||||
gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11));
|
gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11));
|
||||||
|
|
||||||
|
@ -356,6 +393,11 @@ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
|
||||||
if (gmu->idle_level < GMU_IDLE_STATE_SPTP)
|
if (gmu->idle_level < GMU_IDLE_STATE_SPTP)
|
||||||
a6xx_sptprac_disable(gmu);
|
a6xx_sptprac_disable(gmu);
|
||||||
|
|
||||||
|
if (!gmu->legacy) {
|
||||||
|
ret = a6xx_hfi_send_prep_slumber(gmu);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
/* Tell the GMU to get ready to slumber */
|
/* Tell the GMU to get ready to slumber */
|
||||||
gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1);
|
gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1);
|
||||||
|
|
||||||
|
@ -371,6 +413,7 @@ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
/* Put fence into allow mode */
|
/* Put fence into allow mode */
|
||||||
gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
|
gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -392,7 +435,7 @@ static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = gmu_poll_timeout(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
|
ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
|
||||||
!val, 100, 10000);
|
!val, 100, 10000);
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
@ -418,7 +461,7 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
|
||||||
|
|
||||||
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
|
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
|
||||||
|
|
||||||
ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
|
ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
|
||||||
val, val & (1 << 16), 100, 10000);
|
val, val & (1 << 16), 100, 10000);
|
||||||
if (ret)
|
if (ret)
|
||||||
DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
|
DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
|
||||||
|
@ -441,32 +484,48 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
|
||||||
struct platform_device *pdev = to_platform_device(gmu->dev);
|
struct platform_device *pdev = to_platform_device(gmu->dev);
|
||||||
void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
|
void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
|
||||||
void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
|
void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
|
||||||
|
uint32_t pdc_address_offset;
|
||||||
|
|
||||||
if (!pdcptr || !seqptr)
|
if (!pdcptr || !seqptr)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
if (adreno_is_a618(adreno_gpu) || adreno_is_a640(adreno_gpu))
|
||||||
|
pdc_address_offset = 0x30090;
|
||||||
|
else if (adreno_is_a650(adreno_gpu))
|
||||||
|
pdc_address_offset = 0x300a0;
|
||||||
|
else
|
||||||
|
pdc_address_offset = 0x30080;
|
||||||
|
|
||||||
/* Disable SDE clock gating */
|
/* Disable SDE clock gating */
|
||||||
gmu_write(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
|
gmu_write_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
|
||||||
|
|
||||||
/* Setup RSC PDC handshake for sleep and wakeup */
|
/* Setup RSC PDC handshake for sleep and wakeup */
|
||||||
gmu_write(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
|
gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
|
||||||
gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
|
gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
|
||||||
gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
|
gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
|
||||||
gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
|
gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
|
||||||
gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
|
gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
|
||||||
gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
|
gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
|
||||||
gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
|
gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
|
||||||
gmu_write(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
|
gmu_write_rscc(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
|
||||||
gmu_write(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
|
gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
|
||||||
gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
|
gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
|
||||||
gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
|
gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
|
||||||
|
|
||||||
/* Load RSC sequencer uCode for sleep and wakeup */
|
/* Load RSC sequencer uCode for sleep and wakeup */
|
||||||
gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
|
if (adreno_is_a650(adreno_gpu)) {
|
||||||
gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
|
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xeaaae5a0);
|
||||||
gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1);
|
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xe1a1ebab);
|
||||||
gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2);
|
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e0a581);
|
||||||
gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
|
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xecac82e2);
|
||||||
|
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020edad);
|
||||||
|
} else {
|
||||||
|
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
|
||||||
|
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
|
||||||
|
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1);
|
||||||
|
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2);
|
||||||
|
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
|
||||||
|
}
|
||||||
|
|
||||||
/* Load PDC sequencer uCode for power up and power down sequence */
|
/* Load PDC sequencer uCode for power up and power down sequence */
|
||||||
pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
|
pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
|
||||||
|
@ -487,10 +546,7 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
|
||||||
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
|
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
|
||||||
|
|
||||||
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
|
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
|
||||||
if (adreno_is_a618(adreno_gpu))
|
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, pdc_address_offset);
|
||||||
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30090);
|
|
||||||
else
|
|
||||||
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
|
|
||||||
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
|
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
|
||||||
|
|
||||||
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
|
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
|
||||||
|
@ -502,17 +558,12 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
|
||||||
|
|
||||||
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
|
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
|
||||||
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
|
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
|
||||||
if (adreno_is_a618(adreno_gpu))
|
if (adreno_is_a618(adreno_gpu) || adreno_is_a650(adreno_gpu))
|
||||||
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2);
|
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2);
|
||||||
else
|
else
|
||||||
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
|
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
|
||||||
|
|
||||||
|
|
||||||
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
|
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
|
||||||
if (adreno_is_a618(adreno_gpu))
|
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, pdc_address_offset);
|
||||||
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30090);
|
|
||||||
else
|
|
||||||
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
|
|
||||||
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
|
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
|
||||||
|
|
||||||
/* Setup GPU PDC */
|
/* Setup GPU PDC */
|
||||||
|
@ -542,6 +593,8 @@ static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
|
||||||
{
|
{
|
||||||
/* Disable GMU WB/RB buffer */
|
/* Disable GMU WB/RB buffer */
|
||||||
gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1);
|
gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1);
|
||||||
|
gmu_write(gmu, REG_A6XX_GMU_ICACHE_CONFIG, 0x1);
|
||||||
|
gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1);
|
||||||
|
|
||||||
gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
|
gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
|
||||||
|
|
||||||
|
@ -571,14 +624,95 @@ static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
|
||||||
A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE);
|
A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct block_header {
|
||||||
|
u32 addr;
|
||||||
|
u32 size;
|
||||||
|
u32 type;
|
||||||
|
u32 value;
|
||||||
|
u32 data[];
|
||||||
|
};
|
||||||
|
|
||||||
|
/* this should be a general kernel helper */
|
||||||
|
static int in_range(u32 addr, u32 start, u32 size)
|
||||||
|
{
|
||||||
|
return addr >= start && addr < start + size;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk)
|
||||||
|
{
|
||||||
|
if (!in_range(blk->addr, bo->iova, bo->size))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
memcpy(bo->virt + blk->addr - bo->iova, blk->data, blk->size);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
|
||||||
|
{
|
||||||
|
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
|
||||||
|
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
|
||||||
|
const struct firmware *fw_image = adreno_gpu->fw[ADRENO_FW_GMU];
|
||||||
|
const struct block_header *blk;
|
||||||
|
u32 reg_offset;
|
||||||
|
|
||||||
|
u32 itcm_base = 0x00000000;
|
||||||
|
u32 dtcm_base = 0x00040000;
|
||||||
|
|
||||||
|
if (adreno_is_a650(adreno_gpu))
|
||||||
|
dtcm_base = 0x10004000;
|
||||||
|
|
||||||
|
if (gmu->legacy) {
|
||||||
|
/* Sanity check the size of the firmware that was loaded */
|
||||||
|
if (fw_image->size > 0x8000) {
|
||||||
|
DRM_DEV_ERROR(gmu->dev,
|
||||||
|
"GMU firmware is bigger than the available region\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
gmu_write_bulk(gmu, REG_A6XX_GMU_CM3_ITCM_START,
|
||||||
|
(u32*) fw_image->data, fw_image->size);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
for (blk = (const struct block_header *) fw_image->data;
|
||||||
|
(const u8*) blk < fw_image->data + fw_image->size;
|
||||||
|
blk = (const struct block_header *) &blk->data[blk->size >> 2]) {
|
||||||
|
if (blk->size == 0)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (in_range(blk->addr, itcm_base, SZ_16K)) {
|
||||||
|
reg_offset = (blk->addr - itcm_base) >> 2;
|
||||||
|
gmu_write_bulk(gmu,
|
||||||
|
REG_A6XX_GMU_CM3_ITCM_START + reg_offset,
|
||||||
|
blk->data, blk->size);
|
||||||
|
} else if (in_range(blk->addr, dtcm_base, SZ_16K)) {
|
||||||
|
reg_offset = (blk->addr - dtcm_base) >> 2;
|
||||||
|
gmu_write_bulk(gmu,
|
||||||
|
REG_A6XX_GMU_CM3_DTCM_START + reg_offset,
|
||||||
|
blk->data, blk->size);
|
||||||
|
} else if (!fw_block_mem(&gmu->icache, blk) &&
|
||||||
|
!fw_block_mem(&gmu->dcache, blk) &&
|
||||||
|
!fw_block_mem(&gmu->dummy, blk)) {
|
||||||
|
DRM_DEV_ERROR(gmu->dev,
|
||||||
|
"failed to match fw block (addr=%.8x size=%d data[0]=%.8x)\n",
|
||||||
|
blk->addr, blk->size, blk->data[0]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
|
static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
|
||||||
{
|
{
|
||||||
static bool rpmh_init;
|
static bool rpmh_init;
|
||||||
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
|
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
|
||||||
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
|
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
|
||||||
int i, ret;
|
int ret;
|
||||||
u32 chipid;
|
u32 chipid;
|
||||||
u32 *image;
|
|
||||||
|
if (adreno_is_a650(adreno_gpu))
|
||||||
|
gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1);
|
||||||
|
|
||||||
if (state == GMU_WARM_BOOT) {
|
if (state == GMU_WARM_BOOT) {
|
||||||
ret = a6xx_rpmh_start(gmu);
|
ret = a6xx_rpmh_start(gmu);
|
||||||
|
@ -589,13 +723,6 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
|
||||||
"GMU firmware is not loaded\n"))
|
"GMU firmware is not loaded\n"))
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
||||||
/* Sanity check the size of the firmware that was loaded */
|
|
||||||
if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) {
|
|
||||||
DRM_DEV_ERROR(gmu->dev,
|
|
||||||
"GMU firmware is bigger than the available region\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Turn on register retention */
|
/* Turn on register retention */
|
||||||
gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
|
gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
|
||||||
|
|
||||||
|
@ -609,18 +736,16 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
image = (u32 *) adreno_gpu->fw[ADRENO_FW_GMU]->data;
|
ret = a6xx_gmu_fw_load(gmu);
|
||||||
|
if (ret)
|
||||||
for (i = 0; i < adreno_gpu->fw[ADRENO_FW_GMU]->size >> 2; i++)
|
return ret;
|
||||||
gmu_write(gmu, REG_A6XX_GMU_CM3_ITCM_START + i,
|
|
||||||
image[i]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0);
|
gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0);
|
||||||
gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);
|
gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);
|
||||||
|
|
||||||
/* Write the iova of the HFI table */
|
/* Write the iova of the HFI table */
|
||||||
gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi->iova);
|
gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova);
|
||||||
gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
|
gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
|
||||||
|
|
||||||
gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
|
gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
|
||||||
|
@ -633,6 +758,9 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
|
||||||
|
|
||||||
gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
|
gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
|
||||||
|
|
||||||
|
gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG,
|
||||||
|
gmu->log.iova | (gmu->log.size / SZ_4K - 1));
|
||||||
|
|
||||||
/* Set up the lowest idle level on the GMU */
|
/* Set up the lowest idle level on the GMU */
|
||||||
a6xx_gmu_power_config(gmu);
|
a6xx_gmu_power_config(gmu);
|
||||||
|
|
||||||
|
@ -640,9 +768,11 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = a6xx_gmu_gfx_rail_on(gmu);
|
if (gmu->legacy) {
|
||||||
if (ret)
|
ret = a6xx_gmu_gfx_rail_on(gmu);
|
||||||
return ret;
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/* Enable SPTP_PC if the CPU is responsible for it */
|
/* Enable SPTP_PC if the CPU is responsible for it */
|
||||||
if (gmu->idle_level < GMU_IDLE_STATE_SPTP) {
|
if (gmu->idle_level < GMU_IDLE_STATE_SPTP) {
|
||||||
|
@ -683,13 +813,13 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
|
||||||
u32 val;
|
u32 val;
|
||||||
|
|
||||||
/* Make sure there are no outstanding RPMh votes */
|
/* Make sure there are no outstanding RPMh votes */
|
||||||
gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
|
gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
|
||||||
(val & 1), 100, 10000);
|
(val & 1), 100, 10000);
|
||||||
gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
|
gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
|
||||||
(val & 1), 100, 10000);
|
(val & 1), 100, 10000);
|
||||||
gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
|
gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
|
||||||
(val & 1), 100, 10000);
|
(val & 1), 100, 10000);
|
||||||
gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
|
gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
|
||||||
(val & 1), 100, 1000);
|
(val & 1), 100, 1000);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -744,6 +874,13 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
|
||||||
status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
|
status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
|
||||||
GMU_WARM_BOOT : GMU_COLD_BOOT;
|
GMU_WARM_BOOT : GMU_COLD_BOOT;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Warm boot path does not work on newer GPUs
|
||||||
|
* Presumably this is because icache/dcache regions must be restored
|
||||||
|
*/
|
||||||
|
if (!gmu->legacy)
|
||||||
|
status = GMU_COLD_BOOT;
|
||||||
|
|
||||||
ret = a6xx_gmu_fw_start(gmu, status);
|
ret = a6xx_gmu_fw_start(gmu, status);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -761,7 +898,10 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
|
||||||
enable_irq(gmu->hfi_irq);
|
enable_irq(gmu->hfi_irq);
|
||||||
|
|
||||||
/* Set the GPU to the current freq */
|
/* Set the GPU to the current freq */
|
||||||
__a6xx_gmu_set_freq(gmu, gmu->current_perf_index);
|
if (gmu->legacy)
|
||||||
|
__a6xx_gmu_set_freq(gmu, gmu->current_perf_index);
|
||||||
|
else
|
||||||
|
a6xx_hfi_set_freq(gmu, gmu->current_perf_index);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* "enable" the GX power domain which won't actually do anything but it
|
* "enable" the GX power domain which won't actually do anything but it
|
||||||
|
@ -919,34 +1059,75 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo)
|
static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu)
|
||||||
{
|
{
|
||||||
if (IS_ERR_OR_NULL(bo))
|
msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace, false);
|
||||||
return;
|
msm_gem_kernel_put(gmu->debug.obj, gmu->aspace, false);
|
||||||
|
msm_gem_kernel_put(gmu->icache.obj, gmu->aspace, false);
|
||||||
|
msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace, false);
|
||||||
|
msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace, false);
|
||||||
|
msm_gem_kernel_put(gmu->log.obj, gmu->aspace, false);
|
||||||
|
|
||||||
dma_free_wc(gmu->dev, bo->size, bo->virt, bo->iova);
|
gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu);
|
||||||
kfree(bo);
|
msm_gem_address_space_put(gmu->aspace);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
|
static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
|
||||||
size_t size)
|
size_t size, u64 iova)
|
||||||
{
|
{
|
||||||
struct a6xx_gmu_bo *bo;
|
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
|
||||||
|
struct drm_device *dev = a6xx_gpu->base.base.dev;
|
||||||
|
uint32_t flags = MSM_BO_WC;
|
||||||
|
u64 range_start, range_end;
|
||||||
|
int ret;
|
||||||
|
|
||||||
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
|
size = PAGE_ALIGN(size);
|
||||||
if (!bo)
|
if (!iova) {
|
||||||
return ERR_PTR(-ENOMEM);
|
/* no fixed address - use GMU's uncached range */
|
||||||
|
range_start = 0x60000000 + PAGE_SIZE; /* skip dummy page */
|
||||||
bo->size = PAGE_ALIGN(size);
|
range_end = 0x80000000;
|
||||||
|
} else {
|
||||||
bo->virt = dma_alloc_wc(gmu->dev, bo->size, &bo->iova, GFP_KERNEL);
|
/* range for fixed address */
|
||||||
|
range_start = iova;
|
||||||
if (!bo->virt) {
|
range_end = iova + size;
|
||||||
kfree(bo);
|
/* use IOMMU_PRIV for icache/dcache */
|
||||||
return ERR_PTR(-ENOMEM);
|
flags |= MSM_BO_MAP_PRIV;
|
||||||
}
|
}
|
||||||
|
|
||||||
return bo;
|
bo->obj = msm_gem_new(dev, size, flags);
|
||||||
|
if (IS_ERR(bo->obj))
|
||||||
|
return PTR_ERR(bo->obj);
|
||||||
|
|
||||||
|
ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova,
|
||||||
|
range_start >> PAGE_SHIFT, range_end >> PAGE_SHIFT);
|
||||||
|
if (ret) {
|
||||||
|
drm_gem_object_put(bo->obj);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
bo->virt = msm_gem_get_vaddr(bo->obj);
|
||||||
|
bo->size = size;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
|
||||||
|
{
|
||||||
|
struct iommu_domain *domain;
|
||||||
|
struct msm_mmu *mmu;
|
||||||
|
|
||||||
|
domain = iommu_domain_alloc(&platform_bus_type);
|
||||||
|
if (!domain)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
mmu = msm_iommu_new(gmu->dev, domain);
|
||||||
|
gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x7fffffff);
|
||||||
|
if (IS_ERR(gmu->aspace)) {
|
||||||
|
iommu_domain_free(domain);
|
||||||
|
return PTR_ERR(gmu->aspace);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Return the 'arc-level' for the given frequency */
|
/* Return the 'arc-level' for the given frequency */
|
||||||
|
@ -1011,8 +1192,8 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
|
||||||
|
|
||||||
if (j == pri_count) {
|
if (j == pri_count) {
|
||||||
DRM_DEV_ERROR(dev,
|
DRM_DEV_ERROR(dev,
|
||||||
"Level %u not found in in the RPMh list\n",
|
"Level %u not found in the RPMh list\n",
|
||||||
level);
|
level);
|
||||||
DRM_DEV_ERROR(dev, "Available levels:\n");
|
DRM_DEV_ERROR(dev, "Available levels:\n");
|
||||||
for (j = 0; j < pri_count; j++)
|
for (j = 0; j < pri_count; j++)
|
||||||
DRM_DEV_ERROR(dev, " %u\n", pri[j]);
|
DRM_DEV_ERROR(dev, " %u\n", pri[j]);
|
||||||
|
@ -1190,6 +1371,7 @@ static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
|
||||||
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
|
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
|
||||||
{
|
{
|
||||||
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
|
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
|
||||||
|
struct platform_device *pdev = to_platform_device(gmu->dev);
|
||||||
|
|
||||||
if (!gmu->initialized)
|
if (!gmu->initialized)
|
||||||
return;
|
return;
|
||||||
|
@ -1202,9 +1384,12 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
|
||||||
}
|
}
|
||||||
|
|
||||||
iounmap(gmu->mmio);
|
iounmap(gmu->mmio);
|
||||||
|
if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
|
||||||
|
iounmap(gmu->rscc);
|
||||||
gmu->mmio = NULL;
|
gmu->mmio = NULL;
|
||||||
|
gmu->rscc = NULL;
|
||||||
|
|
||||||
a6xx_gmu_memory_free(gmu, gmu->hfi);
|
a6xx_gmu_memory_free(gmu);
|
||||||
|
|
||||||
free_irq(gmu->gmu_irq, gmu);
|
free_irq(gmu->gmu_irq, gmu);
|
||||||
free_irq(gmu->hfi_irq, gmu);
|
free_irq(gmu->hfi_irq, gmu);
|
||||||
|
@ -1217,6 +1402,7 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
|
||||||
|
|
||||||
int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
|
int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
|
||||||
{
|
{
|
||||||
|
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
|
||||||
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
|
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
|
||||||
struct platform_device *pdev = of_find_device_by_node(node);
|
struct platform_device *pdev = of_find_device_by_node(node);
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -1226,15 +1412,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
|
||||||
|
|
||||||
gmu->dev = &pdev->dev;
|
gmu->dev = &pdev->dev;
|
||||||
|
|
||||||
/* Pass force_dma false to require the DT to set the dma region */
|
of_dma_configure(gmu->dev, node, true);
|
||||||
ret = of_dma_configure(gmu->dev, node, false);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
/* Set the mask after the of_dma_configure() */
|
|
||||||
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(31));
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
/* Fow now, don't do anything fancy until we get our feet under us */
|
/* Fow now, don't do anything fancy until we get our feet under us */
|
||||||
gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
|
gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
|
||||||
|
@ -1246,20 +1424,64 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_put_device;
|
goto err_put_device;
|
||||||
|
|
||||||
/* Allocate memory for for the HFI queues */
|
ret = a6xx_gmu_memory_probe(gmu);
|
||||||
gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
|
if (ret)
|
||||||
if (IS_ERR(gmu->hfi))
|
goto err_put_device;
|
||||||
|
|
||||||
|
/* Allocate memory for the GMU dummy page */
|
||||||
|
ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, SZ_4K, 0x60000000);
|
||||||
|
if (ret)
|
||||||
goto err_memory;
|
goto err_memory;
|
||||||
|
|
||||||
/* Allocate memory for the GMU debug region */
|
if (adreno_is_a650(adreno_gpu)) {
|
||||||
gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K);
|
ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
|
||||||
if (IS_ERR(gmu->debug))
|
SZ_16M - SZ_16K, 0x04000);
|
||||||
|
if (ret)
|
||||||
|
goto err_memory;
|
||||||
|
} else if (adreno_is_a640(adreno_gpu)) {
|
||||||
|
ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
|
||||||
|
SZ_256K - SZ_16K, 0x04000);
|
||||||
|
if (ret)
|
||||||
|
goto err_memory;
|
||||||
|
|
||||||
|
ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache,
|
||||||
|
SZ_256K - SZ_16K, 0x44000);
|
||||||
|
if (ret)
|
||||||
|
goto err_memory;
|
||||||
|
} else {
|
||||||
|
/* HFI v1, has sptprac */
|
||||||
|
gmu->legacy = true;
|
||||||
|
|
||||||
|
/* Allocate memory for the GMU debug region */
|
||||||
|
ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0);
|
||||||
|
if (ret)
|
||||||
|
goto err_memory;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Allocate memory for for the HFI queues */
|
||||||
|
ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0);
|
||||||
|
if (ret)
|
||||||
|
goto err_memory;
|
||||||
|
|
||||||
|
/* Allocate memory for the GMU log region */
|
||||||
|
ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_4K, 0);
|
||||||
|
if (ret)
|
||||||
goto err_memory;
|
goto err_memory;
|
||||||
|
|
||||||
/* Map the GMU registers */
|
/* Map the GMU registers */
|
||||||
gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
|
gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
|
||||||
if (IS_ERR(gmu->mmio))
|
if (IS_ERR(gmu->mmio)) {
|
||||||
|
ret = PTR_ERR(gmu->mmio);
|
||||||
goto err_memory;
|
goto err_memory;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (adreno_is_a650(adreno_gpu)) {
|
||||||
|
gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc");
|
||||||
|
if (IS_ERR(gmu->rscc))
|
||||||
|
goto err_mmio;
|
||||||
|
} else {
|
||||||
|
gmu->rscc = gmu->mmio + 0x23000;
|
||||||
|
}
|
||||||
|
|
||||||
/* Get the HFI and GMU interrupts */
|
/* Get the HFI and GMU interrupts */
|
||||||
gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
|
gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
|
||||||
|
@ -1286,13 +1508,15 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
|
||||||
|
|
||||||
err_mmio:
|
err_mmio:
|
||||||
iounmap(gmu->mmio);
|
iounmap(gmu->mmio);
|
||||||
|
if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
|
||||||
|
iounmap(gmu->rscc);
|
||||||
free_irq(gmu->gmu_irq, gmu);
|
free_irq(gmu->gmu_irq, gmu);
|
||||||
free_irq(gmu->hfi_irq, gmu);
|
free_irq(gmu->hfi_irq, gmu);
|
||||||
err_memory:
|
|
||||||
a6xx_gmu_memory_free(gmu, gmu->hfi);
|
|
||||||
|
|
||||||
ret = -ENODEV;
|
ret = -ENODEV;
|
||||||
|
|
||||||
|
err_memory:
|
||||||
|
a6xx_gmu_memory_free(gmu);
|
||||||
err_put_device:
|
err_put_device:
|
||||||
/* Drop reference taken in of_find_device_by_node */
|
/* Drop reference taken in of_find_device_by_node */
|
||||||
put_device(gmu->dev);
|
put_device(gmu->dev);
|
||||||
|
|
|
@ -10,9 +10,10 @@
|
||||||
#include "a6xx_hfi.h"
|
#include "a6xx_hfi.h"
|
||||||
|
|
||||||
struct a6xx_gmu_bo {
|
struct a6xx_gmu_bo {
|
||||||
|
struct drm_gem_object *obj;
|
||||||
void *virt;
|
void *virt;
|
||||||
size_t size;
|
size_t size;
|
||||||
dma_addr_t iova;
|
u64 iova;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -43,7 +44,10 @@ struct a6xx_gmu_bo {
|
||||||
struct a6xx_gmu {
|
struct a6xx_gmu {
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
|
|
||||||
|
struct msm_gem_address_space *aspace;
|
||||||
|
|
||||||
void * __iomem mmio;
|
void * __iomem mmio;
|
||||||
|
void * __iomem rscc;
|
||||||
|
|
||||||
int hfi_irq;
|
int hfi_irq;
|
||||||
int gmu_irq;
|
int gmu_irq;
|
||||||
|
@ -52,8 +56,12 @@ struct a6xx_gmu {
|
||||||
|
|
||||||
int idle_level;
|
int idle_level;
|
||||||
|
|
||||||
struct a6xx_gmu_bo *hfi;
|
struct a6xx_gmu_bo hfi;
|
||||||
struct a6xx_gmu_bo *debug;
|
struct a6xx_gmu_bo debug;
|
||||||
|
struct a6xx_gmu_bo icache;
|
||||||
|
struct a6xx_gmu_bo dcache;
|
||||||
|
struct a6xx_gmu_bo dummy;
|
||||||
|
struct a6xx_gmu_bo log;
|
||||||
|
|
||||||
int nr_clocks;
|
int nr_clocks;
|
||||||
struct clk_bulk_data *clocks;
|
struct clk_bulk_data *clocks;
|
||||||
|
@ -76,6 +84,7 @@ struct a6xx_gmu {
|
||||||
|
|
||||||
bool initialized;
|
bool initialized;
|
||||||
bool hung;
|
bool hung;
|
||||||
|
bool legacy; /* a618 or a630 */
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)
|
static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)
|
||||||
|
@ -88,6 +97,13 @@ static inline void gmu_write(struct a6xx_gmu *gmu, u32 offset, u32 value)
|
||||||
return msm_writel(value, gmu->mmio + (offset << 2));
|
return msm_writel(value, gmu->mmio + (offset << 2));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
gmu_write_bulk(struct a6xx_gmu *gmu, u32 offset, const u32 *data, u32 size)
|
||||||
|
{
|
||||||
|
memcpy_toio(gmu->mmio + (offset << 2), data, size);
|
||||||
|
wmb();
|
||||||
|
}
|
||||||
|
|
||||||
static inline void gmu_rmw(struct a6xx_gmu *gmu, u32 reg, u32 mask, u32 or)
|
static inline void gmu_rmw(struct a6xx_gmu *gmu, u32 reg, u32 mask, u32 or)
|
||||||
{
|
{
|
||||||
u32 val = gmu_read(gmu, reg);
|
u32 val = gmu_read(gmu, reg);
|
||||||
|
@ -111,6 +127,15 @@ static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi)
|
||||||
readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \
|
readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \
|
||||||
interval, timeout)
|
interval, timeout)
|
||||||
|
|
||||||
|
static inline void gmu_write_rscc(struct a6xx_gmu *gmu, u32 offset, u32 value)
|
||||||
|
{
|
||||||
|
return msm_writel(value, gmu->rscc + (offset << 2));
|
||||||
|
}
|
||||||
|
|
||||||
|
#define gmu_poll_timeout_rscc(gmu, addr, val, cond, interval, timeout) \
|
||||||
|
readl_poll_timeout((gmu)->rscc + ((addr) << 2), val, cond, \
|
||||||
|
interval, timeout)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* These are the available OOB (out of band requests) to the GMU where "out of
|
* These are the available OOB (out of band requests) to the GMU where "out of
|
||||||
* band" means that the CPU talks to the GMU directly and not through HFI.
|
* band" means that the CPU talks to the GMU directly and not through HFI.
|
||||||
|
@ -156,10 +181,16 @@ enum a6xx_gmu_oob_state {
|
||||||
#define GMU_OOB_GPU_SET_ACK 24
|
#define GMU_OOB_GPU_SET_ACK 24
|
||||||
#define GMU_OOB_GPU_SET_CLEAR 24
|
#define GMU_OOB_GPU_SET_CLEAR 24
|
||||||
|
|
||||||
|
#define GMU_OOB_GPU_SET_REQUEST_NEW 30
|
||||||
|
#define GMU_OOB_GPU_SET_ACK_NEW 31
|
||||||
|
#define GMU_OOB_GPU_SET_CLEAR_NEW 31
|
||||||
|
|
||||||
|
|
||||||
void a6xx_hfi_init(struct a6xx_gmu *gmu);
|
void a6xx_hfi_init(struct a6xx_gmu *gmu);
|
||||||
int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
|
int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
|
||||||
void a6xx_hfi_stop(struct a6xx_gmu *gmu);
|
void a6xx_hfi_stop(struct a6xx_gmu *gmu);
|
||||||
|
int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu);
|
||||||
|
int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index);
|
||||||
|
|
||||||
bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu);
|
bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu);
|
||||||
bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu);
|
bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu);
|
||||||
|
|
|
@ -101,6 +101,10 @@ static inline uint32_t A6XX_HFI_IRQ_OOB_MASK(uint32_t val)
|
||||||
|
|
||||||
#define REG_A6XX_GMU_DCVS_RETURN 0x000023ff
|
#define REG_A6XX_GMU_DCVS_RETURN 0x000023ff
|
||||||
|
|
||||||
|
#define REG_A6XX_GMU_ICACHE_CONFIG 0x00004c00
|
||||||
|
|
||||||
|
#define REG_A6XX_GMU_DCACHE_CONFIG 0x00004c01
|
||||||
|
|
||||||
#define REG_A6XX_GMU_SYS_BUS_CONFIG 0x00004c0f
|
#define REG_A6XX_GMU_SYS_BUS_CONFIG 0x00004c0f
|
||||||
|
|
||||||
#define REG_A6XX_GMU_CM3_SYSRESET 0x00005000
|
#define REG_A6XX_GMU_CM3_SYSRESET 0x00005000
|
||||||
|
@ -199,6 +203,12 @@ static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val)
|
||||||
|
|
||||||
#define REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE 0x000050ec
|
#define REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE 0x000050ec
|
||||||
|
|
||||||
|
#define REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF 0x000050f0
|
||||||
|
|
||||||
|
#define REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG 0x00005100
|
||||||
|
|
||||||
|
#define REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP 0x00005101
|
||||||
|
|
||||||
#define REG_A6XX_GMU_BOOT_KMD_LM_HANDSHAKE 0x000051f0
|
#define REG_A6XX_GMU_BOOT_KMD_LM_HANDSHAKE 0x000051f0
|
||||||
|
|
||||||
#define REG_A6XX_GMU_LLM_GLM_SLEEP_CTRL 0x00005157
|
#define REG_A6XX_GMU_LLM_GLM_SLEEP_CTRL 0x00005157
|
||||||
|
@ -330,8 +340,6 @@ static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val)
|
||||||
|
|
||||||
#define REG_A6XX_GMU_AO_SPARE_CNTL 0x00009316
|
#define REG_A6XX_GMU_AO_SPARE_CNTL 0x00009316
|
||||||
|
|
||||||
#define REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0 0x00008c04
|
|
||||||
|
|
||||||
#define REG_A6XX_GMU_RSCC_CONTROL_REQ 0x00009307
|
#define REG_A6XX_GMU_RSCC_CONTROL_REQ 0x00009307
|
||||||
|
|
||||||
#define REG_A6XX_GMU_RSCC_CONTROL_ACK 0x00009308
|
#define REG_A6XX_GMU_RSCC_CONTROL_ACK 0x00009308
|
||||||
|
@ -344,39 +352,41 @@ static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val)
|
||||||
|
|
||||||
#define REG_A6XX_GPU_CC_GX_DOMAIN_MISC 0x00009d42
|
#define REG_A6XX_GPU_CC_GX_DOMAIN_MISC 0x00009d42
|
||||||
|
|
||||||
#define REG_A6XX_RSCC_PDC_SEQ_START_ADDR 0x00008c08
|
#define REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0 0x00000004
|
||||||
|
|
||||||
#define REG_A6XX_RSCC_PDC_MATCH_VALUE_LO 0x00008c09
|
#define REG_A6XX_RSCC_PDC_SEQ_START_ADDR 0x00000008
|
||||||
|
|
||||||
#define REG_A6XX_RSCC_PDC_MATCH_VALUE_HI 0x00008c0a
|
#define REG_A6XX_RSCC_PDC_MATCH_VALUE_LO 0x00000009
|
||||||
|
|
||||||
#define REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0 0x00008c0b
|
#define REG_A6XX_RSCC_PDC_MATCH_VALUE_HI 0x0000000a
|
||||||
|
|
||||||
#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR 0x00008c0d
|
#define REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0 0x0000000b
|
||||||
|
|
||||||
#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA 0x00008c0e
|
#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR 0x0000000d
|
||||||
|
|
||||||
#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_L_DRV0 0x00008c82
|
#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA 0x0000000e
|
||||||
|
|
||||||
#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0 0x00008c83
|
#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_L_DRV0 0x00000082
|
||||||
|
|
||||||
#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0 0x00008c89
|
#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0 0x00000083
|
||||||
|
|
||||||
#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_OUTPUT_DRV0 0x00008c8c
|
#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0 0x00000089
|
||||||
|
|
||||||
#define REG_A6XX_RSCC_OVERRIDE_START_ADDR 0x00008d00
|
#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_OUTPUT_DRV0 0x0000008c
|
||||||
|
|
||||||
#define REG_A6XX_RSCC_SEQ_BUSY_DRV0 0x00008d01
|
#define REG_A6XX_RSCC_OVERRIDE_START_ADDR 0x00000100
|
||||||
|
|
||||||
#define REG_A6XX_RSCC_SEQ_MEM_0_DRV0 0x00008d80
|
#define REG_A6XX_RSCC_SEQ_BUSY_DRV0 0x00000101
|
||||||
|
|
||||||
#define REG_A6XX_RSCC_TCS0_DRV0_STATUS 0x00008f46
|
#define REG_A6XX_RSCC_SEQ_MEM_0_DRV0 0x00000180
|
||||||
|
|
||||||
#define REG_A6XX_RSCC_TCS1_DRV0_STATUS 0x000090ae
|
#define REG_A6XX_RSCC_TCS0_DRV0_STATUS 0x00000346
|
||||||
|
|
||||||
#define REG_A6XX_RSCC_TCS2_DRV0_STATUS 0x00009216
|
#define REG_A6XX_RSCC_TCS1_DRV0_STATUS 0x000003ee
|
||||||
|
|
||||||
#define REG_A6XX_RSCC_TCS3_DRV0_STATUS 0x0000937e
|
#define REG_A6XX_RSCC_TCS2_DRV0_STATUS 0x00000496
|
||||||
|
|
||||||
|
#define REG_A6XX_RSCC_TCS3_DRV0_STATUS 0x0000053e
|
||||||
|
|
||||||
|
|
||||||
#endif /* A6XX_GMU_XML */
|
#endif /* A6XX_GMU_XML */
|
||||||
|
|
|
@ -414,7 +414,17 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
|
||||||
a6xx_set_hwcg(gpu, true);
|
a6xx_set_hwcg(gpu, true);
|
||||||
|
|
||||||
/* VBIF/GBIF start*/
|
/* VBIF/GBIF start*/
|
||||||
gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
|
if (adreno_is_a640(adreno_gpu) || adreno_is_a650(adreno_gpu)) {
|
||||||
|
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620);
|
||||||
|
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620);
|
||||||
|
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620);
|
||||||
|
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
|
||||||
|
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
|
||||||
|
gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3);
|
||||||
|
} else {
|
||||||
|
gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
|
||||||
|
}
|
||||||
|
|
||||||
if (adreno_is_a630(adreno_gpu))
|
if (adreno_is_a630(adreno_gpu))
|
||||||
gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
|
gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
|
||||||
|
|
||||||
|
@ -429,25 +439,35 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
|
||||||
gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000);
|
gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000);
|
||||||
gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff);
|
gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff);
|
||||||
|
|
||||||
/* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
|
if (!adreno_is_a650(adreno_gpu)) {
|
||||||
gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO,
|
/* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
|
||||||
REG_A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x00100000);
|
gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO,
|
||||||
|
REG_A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x00100000);
|
||||||
|
|
||||||
gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX_LO,
|
gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX_LO,
|
||||||
REG_A6XX_UCHE_GMEM_RANGE_MAX_HI,
|
REG_A6XX_UCHE_GMEM_RANGE_MAX_HI,
|
||||||
0x00100000 + adreno_gpu->gmem - 1);
|
0x00100000 + adreno_gpu->gmem - 1);
|
||||||
|
}
|
||||||
|
|
||||||
gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804);
|
gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804);
|
||||||
gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4);
|
gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4);
|
||||||
|
|
||||||
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0);
|
if (adreno_is_a640(adreno_gpu) || adreno_is_a650(adreno_gpu))
|
||||||
|
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140);
|
||||||
|
else
|
||||||
|
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0);
|
||||||
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
|
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
|
||||||
|
|
||||||
/* Setting the mem pool size */
|
/* Setting the mem pool size */
|
||||||
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
|
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
|
||||||
|
|
||||||
/* Setting the primFifo thresholds default values */
|
/* Setting the primFifo thresholds default values */
|
||||||
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, (0x300 << 11));
|
if (adreno_is_a650(adreno_gpu))
|
||||||
|
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300000);
|
||||||
|
else if (adreno_is_a640(adreno_gpu))
|
||||||
|
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200000);
|
||||||
|
else
|
||||||
|
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, (0x300 << 11));
|
||||||
|
|
||||||
/* Set the AHB default slave response to "ERROR" */
|
/* Set the AHB default slave response to "ERROR" */
|
||||||
gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1);
|
gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1);
|
||||||
|
@ -471,6 +491,19 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
|
||||||
|
|
||||||
gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1);
|
gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1);
|
||||||
|
|
||||||
|
/* Set weights for bicubic filtering */
|
||||||
|
if (adreno_is_a650(adreno_gpu)) {
|
||||||
|
gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0, 0);
|
||||||
|
gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1,
|
||||||
|
0x3fe05ff4);
|
||||||
|
gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2,
|
||||||
|
0x3fa0ebee);
|
||||||
|
gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3,
|
||||||
|
0x3f5193ed);
|
||||||
|
gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4,
|
||||||
|
0x3f0243f0);
|
||||||
|
}
|
||||||
|
|
||||||
/* Protect registers from the CP */
|
/* Protect registers from the CP */
|
||||||
gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, 0x00000003);
|
gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, 0x00000003);
|
||||||
|
|
||||||
|
@ -508,6 +541,11 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
|
||||||
A6XX_PROTECT_RDONLY(0x980, 0x4));
|
A6XX_PROTECT_RDONLY(0x980, 0x4));
|
||||||
gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0));
|
gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0));
|
||||||
|
|
||||||
|
if (adreno_is_a650(adreno_gpu)) {
|
||||||
|
gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL,
|
||||||
|
(1 << 6) | (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1));
|
||||||
|
}
|
||||||
|
|
||||||
/* Enable interrupts */
|
/* Enable interrupts */
|
||||||
gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK);
|
gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK);
|
||||||
|
|
||||||
|
@ -566,8 +604,10 @@ out:
|
||||||
*/
|
*/
|
||||||
a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
|
a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
|
||||||
|
|
||||||
/* Take the GMU out of its special boot mode */
|
if (a6xx_gpu->gmu.legacy) {
|
||||||
a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER);
|
/* Take the GMU out of its special boot mode */
|
||||||
|
a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER);
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -810,6 +850,11 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
|
||||||
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
|
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
|
||||||
u64 busy_cycles, busy_time;
|
u64 busy_cycles, busy_time;
|
||||||
|
|
||||||
|
|
||||||
|
/* Only read the gpu busy if the hardware is already active */
|
||||||
|
if (pm_runtime_get_if_in_use(a6xx_gpu->gmu.dev) == 0)
|
||||||
|
return 0;
|
||||||
|
|
||||||
busy_cycles = gmu_read64(&a6xx_gpu->gmu,
|
busy_cycles = gmu_read64(&a6xx_gpu->gmu,
|
||||||
REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
|
REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
|
||||||
REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H);
|
REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H);
|
||||||
|
@ -819,6 +864,8 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
|
||||||
|
|
||||||
gpu->devfreq.busy_cycles = busy_cycles;
|
gpu->devfreq.busy_cycles = busy_cycles;
|
||||||
|
|
||||||
|
pm_runtime_put(a6xx_gpu->gmu.dev);
|
||||||
|
|
||||||
if (WARN_ON(busy_time > ~0LU))
|
if (WARN_ON(busy_time > ~0LU))
|
||||||
return ~0LU;
|
return ~0LU;
|
||||||
|
|
||||||
|
@ -846,6 +893,7 @@ static const struct adreno_gpu_funcs funcs = {
|
||||||
#if defined(CONFIG_DRM_MSM_GPU_STATE)
|
#if defined(CONFIG_DRM_MSM_GPU_STATE)
|
||||||
.gpu_state_get = a6xx_gpu_state_get,
|
.gpu_state_get = a6xx_gpu_state_get,
|
||||||
.gpu_state_put = a6xx_gpu_state_put,
|
.gpu_state_put = a6xx_gpu_state_put,
|
||||||
|
.create_address_space = adreno_iommu_create_address_space,
|
||||||
#endif
|
#endif
|
||||||
},
|
},
|
||||||
.get_timestamp = a6xx_get_timestamp,
|
.get_timestamp = a6xx_get_timestamp,
|
||||||
|
|
|
@ -17,10 +17,14 @@ static const char * const a6xx_hfi_msg_id[] = {
|
||||||
HFI_MSG_ID(HFI_H2F_MSG_BW_TABLE),
|
HFI_MSG_ID(HFI_H2F_MSG_BW_TABLE),
|
||||||
HFI_MSG_ID(HFI_H2F_MSG_PERF_TABLE),
|
HFI_MSG_ID(HFI_H2F_MSG_PERF_TABLE),
|
||||||
HFI_MSG_ID(HFI_H2F_MSG_TEST),
|
HFI_MSG_ID(HFI_H2F_MSG_TEST),
|
||||||
|
HFI_MSG_ID(HFI_H2F_MSG_START),
|
||||||
|
HFI_MSG_ID(HFI_H2F_MSG_CORE_FW_START),
|
||||||
|
HFI_MSG_ID(HFI_H2F_MSG_GX_BW_PERF_VOTE),
|
||||||
|
HFI_MSG_ID(HFI_H2F_MSG_PREPARE_SLUMBER),
|
||||||
};
|
};
|
||||||
|
|
||||||
static int a6xx_hfi_queue_read(struct a6xx_hfi_queue *queue, u32 *data,
|
static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu,
|
||||||
u32 dwords)
|
struct a6xx_hfi_queue *queue, u32 *data, u32 dwords)
|
||||||
{
|
{
|
||||||
struct a6xx_hfi_queue_header *header = queue->header;
|
struct a6xx_hfi_queue_header *header = queue->header;
|
||||||
u32 i, hdr, index = header->read_index;
|
u32 i, hdr, index = header->read_index;
|
||||||
|
@ -48,6 +52,9 @@ static int a6xx_hfi_queue_read(struct a6xx_hfi_queue *queue, u32 *data,
|
||||||
index = (index + 1) % header->size;
|
index = (index + 1) % header->size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!gmu->legacy)
|
||||||
|
index = ALIGN(index, 4) % header->size;
|
||||||
|
|
||||||
header->read_index = index;
|
header->read_index = index;
|
||||||
return HFI_HEADER_SIZE(hdr);
|
return HFI_HEADER_SIZE(hdr);
|
||||||
}
|
}
|
||||||
|
@ -73,6 +80,12 @@ static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu,
|
||||||
index = (index + 1) % header->size;
|
index = (index + 1) % header->size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Cookify any non used data at the end of the write buffer */
|
||||||
|
if (!gmu->legacy) {
|
||||||
|
for (; index % 4; index = (index + 1) % header->size)
|
||||||
|
queue->data[index] = 0xfafafafa;
|
||||||
|
}
|
||||||
|
|
||||||
header->write_index = index;
|
header->write_index = index;
|
||||||
spin_unlock(&queue->lock);
|
spin_unlock(&queue->lock);
|
||||||
|
|
||||||
|
@ -106,7 +119,7 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
|
||||||
struct a6xx_hfi_msg_response resp;
|
struct a6xx_hfi_msg_response resp;
|
||||||
|
|
||||||
/* Get the next packet */
|
/* Get the next packet */
|
||||||
ret = a6xx_hfi_queue_read(queue, (u32 *) &resp,
|
ret = a6xx_hfi_queue_read(gmu, queue, (u32 *) &resp,
|
||||||
sizeof(resp) >> 2);
|
sizeof(resp) >> 2);
|
||||||
|
|
||||||
/* If the queue is empty our response never made it */
|
/* If the queue is empty our response never made it */
|
||||||
|
@ -176,8 +189,8 @@ static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state)
|
||||||
{
|
{
|
||||||
struct a6xx_hfi_msg_gmu_init_cmd msg = { 0 };
|
struct a6xx_hfi_msg_gmu_init_cmd msg = { 0 };
|
||||||
|
|
||||||
msg.dbg_buffer_addr = (u32) gmu->debug->iova;
|
msg.dbg_buffer_addr = (u32) gmu->debug.iova;
|
||||||
msg.dbg_buffer_size = (u32) gmu->debug->size;
|
msg.dbg_buffer_size = (u32) gmu->debug.size;
|
||||||
msg.boot_state = boot_state;
|
msg.boot_state = boot_state;
|
||||||
|
|
||||||
return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_INIT, &msg, sizeof(msg),
|
return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_INIT, &msg, sizeof(msg),
|
||||||
|
@ -195,6 +208,28 @@ static int a6xx_hfi_get_fw_version(struct a6xx_gmu *gmu, u32 *version)
|
||||||
version, sizeof(*version));
|
version, sizeof(*version));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int a6xx_hfi_send_perf_table_v1(struct a6xx_gmu *gmu)
|
||||||
|
{
|
||||||
|
struct a6xx_hfi_msg_perf_table_v1 msg = { 0 };
|
||||||
|
int i;
|
||||||
|
|
||||||
|
msg.num_gpu_levels = gmu->nr_gpu_freqs;
|
||||||
|
msg.num_gmu_levels = gmu->nr_gmu_freqs;
|
||||||
|
|
||||||
|
for (i = 0; i < gmu->nr_gpu_freqs; i++) {
|
||||||
|
msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
|
||||||
|
msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < gmu->nr_gmu_freqs; i++) {
|
||||||
|
msg.cx_votes[i].vote = gmu->cx_arc_votes[i];
|
||||||
|
msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
|
||||||
|
}
|
||||||
|
|
||||||
|
return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg),
|
||||||
|
NULL, 0);
|
||||||
|
}
|
||||||
|
|
||||||
static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
|
static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
|
||||||
{
|
{
|
||||||
struct a6xx_hfi_msg_perf_table msg = { 0 };
|
struct a6xx_hfi_msg_perf_table msg = { 0 };
|
||||||
|
@ -205,6 +240,7 @@ static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
|
||||||
|
|
||||||
for (i = 0; i < gmu->nr_gpu_freqs; i++) {
|
for (i = 0; i < gmu->nr_gpu_freqs; i++) {
|
||||||
msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
|
msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
|
||||||
|
msg.gx_votes[i].acd = 0xffffffff;
|
||||||
msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
|
msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -306,7 +342,45 @@ static int a6xx_hfi_send_test(struct a6xx_gmu *gmu)
|
||||||
NULL, 0);
|
NULL, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
|
static int a6xx_hfi_send_start(struct a6xx_gmu *gmu)
|
||||||
|
{
|
||||||
|
struct a6xx_hfi_msg_start msg = { 0 };
|
||||||
|
|
||||||
|
return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_START, &msg, sizeof(msg),
|
||||||
|
NULL, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int a6xx_hfi_send_core_fw_start(struct a6xx_gmu *gmu)
|
||||||
|
{
|
||||||
|
struct a6xx_hfi_msg_core_fw_start msg = { 0 };
|
||||||
|
|
||||||
|
return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_CORE_FW_START, &msg,
|
||||||
|
sizeof(msg), NULL, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index)
|
||||||
|
{
|
||||||
|
struct a6xx_hfi_gx_bw_perf_vote_cmd msg = { 0 };
|
||||||
|
|
||||||
|
msg.ack_type = 1; /* blocking */
|
||||||
|
msg.freq = index;
|
||||||
|
msg.bw = 0; /* TODO: bus scaling */
|
||||||
|
|
||||||
|
return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_GX_BW_PERF_VOTE, &msg,
|
||||||
|
sizeof(msg), NULL, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu)
|
||||||
|
{
|
||||||
|
struct a6xx_hfi_prep_slumber_cmd msg = { 0 };
|
||||||
|
|
||||||
|
/* TODO: should freq and bw fields be non-zero ? */
|
||||||
|
|
||||||
|
return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PREPARE_SLUMBER, &msg,
|
||||||
|
sizeof(msg), NULL, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int a6xx_hfi_start_v1(struct a6xx_gmu *gmu, int boot_state)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -324,7 +398,7 @@ int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
|
||||||
* the GMU firmware
|
* the GMU firmware
|
||||||
*/
|
*/
|
||||||
|
|
||||||
ret = a6xx_hfi_send_perf_table(gmu);
|
ret = a6xx_hfi_send_perf_table_v1(gmu);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -341,6 +415,37 @@ int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (gmu->legacy)
|
||||||
|
return a6xx_hfi_start_v1(gmu, boot_state);
|
||||||
|
|
||||||
|
|
||||||
|
ret = a6xx_hfi_send_perf_table(gmu);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = a6xx_hfi_send_bw_table(gmu);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = a6xx_hfi_send_core_fw_start(gmu);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Downstream driver sends this in its "a6xx_hw_init" equivalent,
|
||||||
|
* but seems to be no harm in sending it here
|
||||||
|
*/
|
||||||
|
ret = a6xx_hfi_send_start(gmu);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
void a6xx_hfi_stop(struct a6xx_gmu *gmu)
|
void a6xx_hfi_stop(struct a6xx_gmu *gmu)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
@ -385,7 +490,7 @@ static void a6xx_hfi_queue_init(struct a6xx_hfi_queue *queue,
|
||||||
|
|
||||||
void a6xx_hfi_init(struct a6xx_gmu *gmu)
|
void a6xx_hfi_init(struct a6xx_gmu *gmu)
|
||||||
{
|
{
|
||||||
struct a6xx_gmu_bo *hfi = gmu->hfi;
|
struct a6xx_gmu_bo *hfi = &gmu->hfi;
|
||||||
struct a6xx_hfi_queue_table_header *table = hfi->virt;
|
struct a6xx_hfi_queue_table_header *table = hfi->virt;
|
||||||
struct a6xx_hfi_queue_header *headers = hfi->virt + sizeof(*table);
|
struct a6xx_hfi_queue_header *headers = hfi->virt + sizeof(*table);
|
||||||
u64 offset;
|
u64 offset;
|
||||||
|
@ -415,5 +520,5 @@ void a6xx_hfi_init(struct a6xx_gmu *gmu)
|
||||||
/* GMU response queue */
|
/* GMU response queue */
|
||||||
offset += SZ_4K;
|
offset += SZ_4K;
|
||||||
a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset,
|
a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset,
|
||||||
hfi->iova + offset, 4);
|
hfi->iova + offset, gmu->legacy ? 4 : 1);
|
||||||
}
|
}
|
||||||
|
|
|
@ -51,7 +51,8 @@ struct a6xx_hfi_queue {
|
||||||
/* HFI message types */
|
/* HFI message types */
|
||||||
|
|
||||||
#define HFI_MSG_CMD 0
|
#define HFI_MSG_CMD 0
|
||||||
#define HFI_MSG_ACK 2
|
#define HFI_MSG_ACK 1
|
||||||
|
#define HFI_MSG_ACK_V1 2
|
||||||
|
|
||||||
#define HFI_F2H_MSG_ACK 126
|
#define HFI_F2H_MSG_ACK 126
|
||||||
|
|
||||||
|
@ -94,7 +95,13 @@ struct perf_level {
|
||||||
u32 freq;
|
u32 freq;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct a6xx_hfi_msg_perf_table {
|
struct perf_gx_level {
|
||||||
|
u32 vote;
|
||||||
|
u32 acd;
|
||||||
|
u32 freq;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct a6xx_hfi_msg_perf_table_v1 {
|
||||||
u32 header;
|
u32 header;
|
||||||
u32 num_gpu_levels;
|
u32 num_gpu_levels;
|
||||||
u32 num_gmu_levels;
|
u32 num_gmu_levels;
|
||||||
|
@ -103,6 +110,15 @@ struct a6xx_hfi_msg_perf_table {
|
||||||
struct perf_level cx_votes[4];
|
struct perf_level cx_votes[4];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct a6xx_hfi_msg_perf_table {
|
||||||
|
u32 header;
|
||||||
|
u32 num_gpu_levels;
|
||||||
|
u32 num_gmu_levels;
|
||||||
|
|
||||||
|
struct perf_gx_level gx_votes[16];
|
||||||
|
struct perf_level cx_votes[4];
|
||||||
|
};
|
||||||
|
|
||||||
#define HFI_H2F_MSG_BW_TABLE 3
|
#define HFI_H2F_MSG_BW_TABLE 3
|
||||||
|
|
||||||
struct a6xx_hfi_msg_bw_table {
|
struct a6xx_hfi_msg_bw_table {
|
||||||
|
@ -124,4 +140,34 @@ struct a6xx_hfi_msg_test {
|
||||||
u32 header;
|
u32 header;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define HFI_H2F_MSG_START 10
|
||||||
|
|
||||||
|
struct a6xx_hfi_msg_start {
|
||||||
|
u32 header;
|
||||||
|
};
|
||||||
|
|
||||||
|
#define HFI_H2F_MSG_CORE_FW_START 14
|
||||||
|
|
||||||
|
struct a6xx_hfi_msg_core_fw_start {
|
||||||
|
u32 header;
|
||||||
|
u32 handle;
|
||||||
|
};
|
||||||
|
|
||||||
|
#define HFI_H2F_MSG_GX_BW_PERF_VOTE 30
|
||||||
|
|
||||||
|
struct a6xx_hfi_gx_bw_perf_vote_cmd {
|
||||||
|
u32 header;
|
||||||
|
u32 ack_type;
|
||||||
|
u32 freq;
|
||||||
|
u32 bw;
|
||||||
|
};
|
||||||
|
|
||||||
|
#define HFI_H2F_MSG_PREPARE_SLUMBER 33
|
||||||
|
|
||||||
|
struct a6xx_hfi_prep_slumber_cmd {
|
||||||
|
u32 header;
|
||||||
|
u32 bw;
|
||||||
|
u32 freq;
|
||||||
|
};
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -92,6 +92,17 @@ static const struct adreno_info gpulist[] = {
|
||||||
.gmem = SZ_1M,
|
.gmem = SZ_1M,
|
||||||
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
|
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
|
||||||
.init = a3xx_gpu_init,
|
.init = a3xx_gpu_init,
|
||||||
|
}, {
|
||||||
|
.rev = ADRENO_REV(4, 0, 5, ANY_ID),
|
||||||
|
.revn = 405,
|
||||||
|
.name = "A405",
|
||||||
|
.fw = {
|
||||||
|
[ADRENO_FW_PM4] = "a420_pm4.fw",
|
||||||
|
[ADRENO_FW_PFP] = "a420_pfp.fw",
|
||||||
|
},
|
||||||
|
.gmem = SZ_256K,
|
||||||
|
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
|
||||||
|
.init = a4xx_gpu_init,
|
||||||
}, {
|
}, {
|
||||||
.rev = ADRENO_REV(4, 2, 0, ANY_ID),
|
.rev = ADRENO_REV(4, 2, 0, ANY_ID),
|
||||||
.revn = 420,
|
.revn = 420,
|
||||||
|
@ -189,6 +200,30 @@ static const struct adreno_info gpulist[] = {
|
||||||
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
|
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
|
||||||
.init = a6xx_gpu_init,
|
.init = a6xx_gpu_init,
|
||||||
.zapfw = "a630_zap.mdt",
|
.zapfw = "a630_zap.mdt",
|
||||||
|
}, {
|
||||||
|
.rev = ADRENO_REV(6, 4, 0, ANY_ID),
|
||||||
|
.revn = 640,
|
||||||
|
.name = "A640",
|
||||||
|
.fw = {
|
||||||
|
[ADRENO_FW_SQE] = "a630_sqe.fw",
|
||||||
|
[ADRENO_FW_GMU] = "a640_gmu.bin",
|
||||||
|
},
|
||||||
|
.gmem = SZ_1M,
|
||||||
|
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
|
||||||
|
.init = a6xx_gpu_init,
|
||||||
|
.zapfw = "a640_zap.mdt",
|
||||||
|
}, {
|
||||||
|
.rev = ADRENO_REV(6, 5, 0, ANY_ID),
|
||||||
|
.revn = 650,
|
||||||
|
.name = "A650",
|
||||||
|
.fw = {
|
||||||
|
[ADRENO_FW_SQE] = "a650_sqe.fw",
|
||||||
|
[ADRENO_FW_GMU] = "a650_gmu.bin",
|
||||||
|
},
|
||||||
|
.gmem = SZ_1M + SZ_128K,
|
||||||
|
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
|
||||||
|
.init = a6xx_gpu_init,
|
||||||
|
.zapfw = "a650_zap.mdt",
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -185,6 +185,23 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
|
||||||
return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
|
return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct msm_gem_address_space *
|
||||||
|
adreno_iommu_create_address_space(struct msm_gpu *gpu,
|
||||||
|
struct platform_device *pdev)
|
||||||
|
{
|
||||||
|
struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type);
|
||||||
|
struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, iommu);
|
||||||
|
struct msm_gem_address_space *aspace;
|
||||||
|
|
||||||
|
aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
|
||||||
|
0xfffffff);
|
||||||
|
|
||||||
|
if (IS_ERR(aspace) && !IS_ERR(mmu))
|
||||||
|
mmu->funcs->destroy(mmu);
|
||||||
|
|
||||||
|
return aspace;
|
||||||
|
}
|
||||||
|
|
||||||
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
|
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
|
||||||
{
|
{
|
||||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||||
|
@ -197,7 +214,7 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
|
||||||
*value = adreno_gpu->gmem;
|
*value = adreno_gpu->gmem;
|
||||||
return 0;
|
return 0;
|
||||||
case MSM_PARAM_GMEM_BASE:
|
case MSM_PARAM_GMEM_BASE:
|
||||||
*value = 0x100000;
|
*value = !adreno_is_a650(adreno_gpu) ? 0x100000 : 0;
|
||||||
return 0;
|
return 0;
|
||||||
case MSM_PARAM_CHIP_ID:
|
case MSM_PARAM_CHIP_ID:
|
||||||
*value = adreno_gpu->rev.patchid |
|
*value = adreno_gpu->rev.patchid |
|
||||||
|
@ -459,7 +476,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
||||||
break;
|
break;
|
||||||
/* fall-thru */
|
/* fall-thru */
|
||||||
case MSM_SUBMIT_CMD_BUF:
|
case MSM_SUBMIT_CMD_BUF:
|
||||||
OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
|
OUT_PKT3(ring, adreno_is_a4xx(adreno_gpu) ?
|
||||||
CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
|
CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
|
||||||
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
|
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
|
||||||
OUT_RING(ring, submit->cmd[i].size);
|
OUT_RING(ring, submit->cmd[i].size);
|
||||||
|
@ -988,12 +1005,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||||
|
|
||||||
adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
|
adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
|
||||||
|
|
||||||
adreno_gpu_config.va_start = SZ_16M;
|
|
||||||
adreno_gpu_config.va_end = 0xffffffff;
|
|
||||||
/* maximum range of a2xx mmu */
|
|
||||||
if (adreno_is_a2xx(adreno_gpu))
|
|
||||||
adreno_gpu_config.va_end = SZ_16M + 0xfff * SZ_64K;
|
|
||||||
|
|
||||||
adreno_gpu_config.nr_rings = nr_rings;
|
adreno_gpu_config.nr_rings = nr_rings;
|
||||||
|
|
||||||
adreno_get_pwrlevels(&pdev->dev, gpu);
|
adreno_get_pwrlevels(&pdev->dev, gpu);
|
||||||
|
|
|
@ -202,6 +202,11 @@ static inline bool adreno_is_a4xx(struct adreno_gpu *gpu)
|
||||||
return (gpu->revn >= 400) && (gpu->revn < 500);
|
return (gpu->revn >= 400) && (gpu->revn < 500);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int adreno_is_a405(struct adreno_gpu *gpu)
|
||||||
|
{
|
||||||
|
return gpu->revn == 405;
|
||||||
|
}
|
||||||
|
|
||||||
static inline int adreno_is_a420(struct adreno_gpu *gpu)
|
static inline int adreno_is_a420(struct adreno_gpu *gpu)
|
||||||
{
|
{
|
||||||
return gpu->revn == 420;
|
return gpu->revn == 420;
|
||||||
|
@ -237,6 +242,16 @@ static inline int adreno_is_a630(struct adreno_gpu *gpu)
|
||||||
return gpu->revn == 630;
|
return gpu->revn == 630;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int adreno_is_a640(struct adreno_gpu *gpu)
|
||||||
|
{
|
||||||
|
return gpu->revn == 640;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int adreno_is_a650(struct adreno_gpu *gpu)
|
||||||
|
{
|
||||||
|
return gpu->revn == 650;
|
||||||
|
}
|
||||||
|
|
||||||
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
|
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
|
||||||
const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
|
const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
|
||||||
const char *fwname);
|
const char *fwname);
|
||||||
|
@ -272,6 +287,14 @@ void adreno_gpu_state_destroy(struct msm_gpu_state *state);
|
||||||
int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state);
|
int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state);
|
||||||
int adreno_gpu_state_put(struct msm_gpu_state *state);
|
int adreno_gpu_state_put(struct msm_gpu_state *state);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Common helper function to initialize the default address space for arm-smmu
|
||||||
|
* attached targets
|
||||||
|
*/
|
||||||
|
struct msm_gem_address_space *
|
||||||
|
adreno_iommu_create_address_space(struct msm_gpu *gpu,
|
||||||
|
struct platform_device *pdev);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For a5xx and a6xx targets load the zap shader that is used to pull the GPU
|
* For a5xx and a6xx targets load the zap shader that is used to pull the GPU
|
||||||
* out of secure mode
|
* out of secure mode
|
||||||
|
|
|
@ -36,22 +36,6 @@ static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
|
||||||
return to_dpu_kms(priv->kms);
|
return to_dpu_kms(priv->kms);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool _dpu_core_video_mode_intf_connected(struct drm_crtc *crtc)
|
|
||||||
{
|
|
||||||
struct drm_crtc *tmp_crtc;
|
|
||||||
|
|
||||||
drm_for_each_crtc(tmp_crtc, crtc->dev) {
|
|
||||||
if ((dpu_crtc_get_intf_mode(tmp_crtc) == INTF_MODE_VIDEO) &&
|
|
||||||
tmp_crtc->enabled) {
|
|
||||||
DPU_DEBUG("video interface connected crtc:%d\n",
|
|
||||||
tmp_crtc->base.id);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
|
static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
|
||||||
struct drm_crtc *crtc,
|
struct drm_crtc *crtc,
|
||||||
struct drm_crtc_state *state,
|
struct drm_crtc_state *state,
|
||||||
|
@ -94,7 +78,6 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
|
||||||
u32 bw, threshold;
|
u32 bw, threshold;
|
||||||
u64 bw_sum_of_intfs = 0;
|
u64 bw_sum_of_intfs = 0;
|
||||||
enum dpu_crtc_client_type curr_client_type;
|
enum dpu_crtc_client_type curr_client_type;
|
||||||
bool is_video_mode;
|
|
||||||
struct dpu_crtc_state *dpu_cstate;
|
struct dpu_crtc_state *dpu_cstate;
|
||||||
struct drm_crtc *tmp_crtc;
|
struct drm_crtc *tmp_crtc;
|
||||||
struct dpu_kms *kms;
|
struct dpu_kms *kms;
|
||||||
|
@ -144,11 +127,7 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
|
||||||
bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
|
bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
|
||||||
DPU_DEBUG("calculated bandwidth=%uk\n", bw);
|
DPU_DEBUG("calculated bandwidth=%uk\n", bw);
|
||||||
|
|
||||||
is_video_mode = dpu_crtc_get_intf_mode(crtc) == INTF_MODE_VIDEO;
|
threshold = kms->catalog->perf.max_bw_high;
|
||||||
threshold = (is_video_mode ||
|
|
||||||
_dpu_core_video_mode_intf_connected(crtc)) ?
|
|
||||||
kms->catalog->perf.max_bw_low :
|
|
||||||
kms->catalog->perf.max_bw_high;
|
|
||||||
|
|
||||||
DPU_DEBUG("final threshold bw limit = %d\n", threshold);
|
DPU_DEBUG("final threshold bw limit = %d\n", threshold);
|
||||||
|
|
||||||
|
|
|
@ -9,6 +9,7 @@
|
||||||
#include <linux/sort.h>
|
#include <linux/sort.h>
|
||||||
#include <linux/debugfs.h>
|
#include <linux/debugfs.h>
|
||||||
#include <linux/ktime.h>
|
#include <linux/ktime.h>
|
||||||
|
#include <linux/bits.h>
|
||||||
|
|
||||||
#include <drm/drm_crtc.h>
|
#include <drm/drm_crtc.h>
|
||||||
#include <drm/drm_flip_work.h>
|
#include <drm/drm_flip_work.h>
|
||||||
|
@ -20,6 +21,7 @@
|
||||||
#include "dpu_kms.h"
|
#include "dpu_kms.h"
|
||||||
#include "dpu_hw_lm.h"
|
#include "dpu_hw_lm.h"
|
||||||
#include "dpu_hw_ctl.h"
|
#include "dpu_hw_ctl.h"
|
||||||
|
#include "dpu_hw_dspp.h"
|
||||||
#include "dpu_crtc.h"
|
#include "dpu_crtc.h"
|
||||||
#include "dpu_plane.h"
|
#include "dpu_plane.h"
|
||||||
#include "dpu_encoder.h"
|
#include "dpu_encoder.h"
|
||||||
|
@ -40,6 +42,9 @@
|
||||||
/* timeout in ms waiting for frame done */
|
/* timeout in ms waiting for frame done */
|
||||||
#define DPU_CRTC_FRAME_DONE_TIMEOUT_MS 60
|
#define DPU_CRTC_FRAME_DONE_TIMEOUT_MS 60
|
||||||
|
|
||||||
|
#define CONVERT_S3_15(val) \
|
||||||
|
(((((u64)val) & ~BIT_ULL(63)) >> 17) & GENMASK_ULL(17, 0))
|
||||||
|
|
||||||
static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
|
static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
|
||||||
{
|
{
|
||||||
struct msm_drm_private *priv = crtc->dev->dev_private;
|
struct msm_drm_private *priv = crtc->dev->dev_private;
|
||||||
|
@ -88,11 +93,9 @@ static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
|
||||||
|
|
||||||
static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
|
static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
|
||||||
{
|
{
|
||||||
struct dpu_crtc *dpu_crtc;
|
|
||||||
struct dpu_crtc_state *crtc_state;
|
struct dpu_crtc_state *crtc_state;
|
||||||
int lm_idx, lm_horiz_position;
|
int lm_idx, lm_horiz_position;
|
||||||
|
|
||||||
dpu_crtc = to_dpu_crtc(crtc);
|
|
||||||
crtc_state = to_dpu_crtc_state(crtc->state);
|
crtc_state = to_dpu_crtc_state(crtc->state);
|
||||||
|
|
||||||
lm_horiz_position = 0;
|
lm_horiz_position = 0;
|
||||||
|
@ -422,6 +425,74 @@ static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
|
||||||
drm_mode_debug_printmodeline(adj_mode);
|
drm_mode_debug_printmodeline(adj_mode);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
|
||||||
|
struct dpu_hw_pcc_cfg *cfg)
|
||||||
|
{
|
||||||
|
struct drm_color_ctm *ctm;
|
||||||
|
|
||||||
|
memset(cfg, 0, sizeof(struct dpu_hw_pcc_cfg));
|
||||||
|
|
||||||
|
ctm = (struct drm_color_ctm *)state->ctm->data;
|
||||||
|
|
||||||
|
if (!ctm)
|
||||||
|
return;
|
||||||
|
|
||||||
|
cfg->r.r = CONVERT_S3_15(ctm->matrix[0]);
|
||||||
|
cfg->g.r = CONVERT_S3_15(ctm->matrix[1]);
|
||||||
|
cfg->b.r = CONVERT_S3_15(ctm->matrix[2]);
|
||||||
|
|
||||||
|
cfg->r.g = CONVERT_S3_15(ctm->matrix[3]);
|
||||||
|
cfg->g.g = CONVERT_S3_15(ctm->matrix[4]);
|
||||||
|
cfg->b.g = CONVERT_S3_15(ctm->matrix[5]);
|
||||||
|
|
||||||
|
cfg->r.b = CONVERT_S3_15(ctm->matrix[6]);
|
||||||
|
cfg->g.b = CONVERT_S3_15(ctm->matrix[7]);
|
||||||
|
cfg->b.b = CONVERT_S3_15(ctm->matrix[8]);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
|
||||||
|
{
|
||||||
|
struct drm_crtc_state *state = crtc->state;
|
||||||
|
struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
|
||||||
|
struct dpu_crtc_mixer *mixer = cstate->mixers;
|
||||||
|
struct dpu_hw_pcc_cfg cfg;
|
||||||
|
struct dpu_hw_ctl *ctl;
|
||||||
|
struct dpu_hw_mixer *lm;
|
||||||
|
struct dpu_hw_dspp *dspp;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
|
||||||
|
if (!state->color_mgmt_changed)
|
||||||
|
return;
|
||||||
|
|
||||||
|
for (i = 0; i < cstate->num_mixers; i++) {
|
||||||
|
ctl = mixer[i].lm_ctl;
|
||||||
|
lm = mixer[i].hw_lm;
|
||||||
|
dspp = mixer[i].hw_dspp;
|
||||||
|
|
||||||
|
if (!dspp || !dspp->ops.setup_pcc)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (!state->ctm) {
|
||||||
|
dspp->ops.setup_pcc(dspp, NULL);
|
||||||
|
} else {
|
||||||
|
_dpu_crtc_get_pcc_coeff(state, &cfg);
|
||||||
|
dspp->ops.setup_pcc(dspp, &cfg);
|
||||||
|
}
|
||||||
|
|
||||||
|
mixer[i].flush_mask |= ctl->ops.get_bitmask_dspp(ctl,
|
||||||
|
mixer[i].hw_dspp->idx);
|
||||||
|
|
||||||
|
/* stage config flush mask */
|
||||||
|
ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
|
||||||
|
|
||||||
|
DPU_DEBUG("lm %d, ctl %d, flush mask 0x%x\n",
|
||||||
|
mixer[i].hw_lm->idx - DSPP_0,
|
||||||
|
ctl->idx - CTL_0,
|
||||||
|
mixer[i].flush_mask);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
|
static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||||
struct drm_crtc_state *old_state)
|
struct drm_crtc_state *old_state)
|
||||||
{
|
{
|
||||||
|
@ -430,7 +501,6 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||||
struct drm_encoder *encoder;
|
struct drm_encoder *encoder;
|
||||||
struct drm_device *dev;
|
struct drm_device *dev;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct dpu_crtc_smmu_state_data *smmu_state;
|
|
||||||
|
|
||||||
if (!crtc) {
|
if (!crtc) {
|
||||||
DPU_ERROR("invalid crtc\n");
|
DPU_ERROR("invalid crtc\n");
|
||||||
|
@ -448,7 +518,6 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||||
dpu_crtc = to_dpu_crtc(crtc);
|
dpu_crtc = to_dpu_crtc(crtc);
|
||||||
cstate = to_dpu_crtc_state(crtc->state);
|
cstate = to_dpu_crtc_state(crtc->state);
|
||||||
dev = crtc->dev;
|
dev = crtc->dev;
|
||||||
smmu_state = &dpu_crtc->smmu_state;
|
|
||||||
|
|
||||||
_dpu_crtc_setup_lm_bounds(crtc, crtc->state);
|
_dpu_crtc_setup_lm_bounds(crtc, crtc->state);
|
||||||
|
|
||||||
|
@ -475,6 +544,8 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||||
|
|
||||||
_dpu_crtc_blend_setup(crtc);
|
_dpu_crtc_blend_setup(crtc);
|
||||||
|
|
||||||
|
_dpu_crtc_setup_cp_blocks(crtc);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* PP_DONE irq is only used by command mode for now.
|
* PP_DONE irq is only used by command mode for now.
|
||||||
* It is better to request pending before FLUSH and START trigger
|
* It is better to request pending before FLUSH and START trigger
|
||||||
|
@ -491,7 +562,6 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||||
struct drm_device *dev;
|
struct drm_device *dev;
|
||||||
struct drm_plane *plane;
|
struct drm_plane *plane;
|
||||||
struct msm_drm_private *priv;
|
struct msm_drm_private *priv;
|
||||||
struct msm_drm_thread *event_thread;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct dpu_crtc_state *cstate;
|
struct dpu_crtc_state *cstate;
|
||||||
|
|
||||||
|
@ -513,8 +583,6 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
event_thread = &priv->event_thread[crtc->index];
|
|
||||||
|
|
||||||
if (dpu_crtc->event) {
|
if (dpu_crtc->event) {
|
||||||
DPU_DEBUG("already received dpu_crtc->event\n");
|
DPU_DEBUG("already received dpu_crtc->event\n");
|
||||||
} else {
|
} else {
|
||||||
|
@ -567,7 +635,6 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||||
static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
|
static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
|
||||||
struct drm_crtc_state *state)
|
struct drm_crtc_state *state)
|
||||||
{
|
{
|
||||||
struct dpu_crtc *dpu_crtc;
|
|
||||||
struct dpu_crtc_state *cstate;
|
struct dpu_crtc_state *cstate;
|
||||||
|
|
||||||
if (!crtc || !state) {
|
if (!crtc || !state) {
|
||||||
|
@ -575,7 +642,6 @@ static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
dpu_crtc = to_dpu_crtc(crtc);
|
|
||||||
cstate = to_dpu_crtc_state(state);
|
cstate = to_dpu_crtc_state(state);
|
||||||
|
|
||||||
DPU_DEBUG("crtc%d\n", crtc->base.id);
|
DPU_DEBUG("crtc%d\n", crtc->base.id);
|
||||||
|
@ -662,11 +728,9 @@ static void dpu_crtc_reset(struct drm_crtc *crtc)
|
||||||
/**
|
/**
|
||||||
* dpu_crtc_duplicate_state - state duplicate hook
|
* dpu_crtc_duplicate_state - state duplicate hook
|
||||||
* @crtc: Pointer to drm crtc structure
|
* @crtc: Pointer to drm crtc structure
|
||||||
* @Returns: Pointer to new drm_crtc_state structure
|
|
||||||
*/
|
*/
|
||||||
static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
|
static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
|
||||||
{
|
{
|
||||||
struct dpu_crtc *dpu_crtc;
|
|
||||||
struct dpu_crtc_state *cstate, *old_cstate;
|
struct dpu_crtc_state *cstate, *old_cstate;
|
||||||
|
|
||||||
if (!crtc || !crtc->state) {
|
if (!crtc || !crtc->state) {
|
||||||
|
@ -674,7 +738,6 @@ static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
dpu_crtc = to_dpu_crtc(crtc);
|
|
||||||
old_cstate = to_dpu_crtc_state(crtc->state);
|
old_cstate = to_dpu_crtc_state(crtc->state);
|
||||||
cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
|
cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
|
||||||
if (!cstate) {
|
if (!cstate) {
|
||||||
|
@ -693,9 +756,7 @@ static void dpu_crtc_disable(struct drm_crtc *crtc,
|
||||||
{
|
{
|
||||||
struct dpu_crtc *dpu_crtc;
|
struct dpu_crtc *dpu_crtc;
|
||||||
struct dpu_crtc_state *cstate;
|
struct dpu_crtc_state *cstate;
|
||||||
struct drm_display_mode *mode;
|
|
||||||
struct drm_encoder *encoder;
|
struct drm_encoder *encoder;
|
||||||
struct msm_drm_private *priv;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
bool release_bandwidth = false;
|
bool release_bandwidth = false;
|
||||||
|
|
||||||
|
@ -705,8 +766,6 @@ static void dpu_crtc_disable(struct drm_crtc *crtc,
|
||||||
}
|
}
|
||||||
dpu_crtc = to_dpu_crtc(crtc);
|
dpu_crtc = to_dpu_crtc(crtc);
|
||||||
cstate = to_dpu_crtc_state(crtc->state);
|
cstate = to_dpu_crtc_state(crtc->state);
|
||||||
mode = &cstate->base.adjusted_mode;
|
|
||||||
priv = crtc->dev->dev_private;
|
|
||||||
|
|
||||||
DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
|
DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
|
||||||
|
|
||||||
|
@ -768,14 +827,12 @@ static void dpu_crtc_enable(struct drm_crtc *crtc,
|
||||||
{
|
{
|
||||||
struct dpu_crtc *dpu_crtc;
|
struct dpu_crtc *dpu_crtc;
|
||||||
struct drm_encoder *encoder;
|
struct drm_encoder *encoder;
|
||||||
struct msm_drm_private *priv;
|
|
||||||
bool request_bandwidth;
|
bool request_bandwidth;
|
||||||
|
|
||||||
if (!crtc) {
|
if (!crtc) {
|
||||||
DPU_ERROR("invalid crtc\n");
|
DPU_ERROR("invalid crtc\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
priv = crtc->dev->dev_private;
|
|
||||||
|
|
||||||
pm_runtime_get_sync(crtc->dev->dev);
|
pm_runtime_get_sync(crtc->dev->dev);
|
||||||
|
|
||||||
|
@ -1319,6 +1376,8 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
|
||||||
|
|
||||||
drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
|
drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
|
||||||
|
|
||||||
|
drm_crtc_enable_color_mgmt(crtc, 0, true, 0);
|
||||||
|
|
||||||
/* save user friendly CRTC name for later */
|
/* save user friendly CRTC name for later */
|
||||||
snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
|
snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
|
||||||
|
|
||||||
|
|
|
@ -73,12 +73,14 @@ struct dpu_crtc_smmu_state_data {
|
||||||
* struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC
|
* struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC
|
||||||
* @hw_lm: LM HW Driver context
|
* @hw_lm: LM HW Driver context
|
||||||
* @lm_ctl: CTL Path HW driver context
|
* @lm_ctl: CTL Path HW driver context
|
||||||
|
* @lm_dspp: DSPP HW driver context
|
||||||
* @mixer_op_mode: mixer blending operation mode
|
* @mixer_op_mode: mixer blending operation mode
|
||||||
* @flush_mask: mixer flush mask for ctl, mixer and pipe
|
* @flush_mask: mixer flush mask for ctl, mixer and pipe
|
||||||
*/
|
*/
|
||||||
struct dpu_crtc_mixer {
|
struct dpu_crtc_mixer {
|
||||||
struct dpu_hw_mixer *hw_lm;
|
struct dpu_hw_mixer *hw_lm;
|
||||||
struct dpu_hw_ctl *lm_ctl;
|
struct dpu_hw_ctl *lm_ctl;
|
||||||
|
struct dpu_hw_dspp *hw_dspp;
|
||||||
u32 mixer_op_mode;
|
u32 mixer_op_mode;
|
||||||
u32 flush_mask;
|
u32 flush_mask;
|
||||||
};
|
};
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
#include "dpu_hw_catalog.h"
|
#include "dpu_hw_catalog.h"
|
||||||
#include "dpu_hw_intf.h"
|
#include "dpu_hw_intf.h"
|
||||||
#include "dpu_hw_ctl.h"
|
#include "dpu_hw_ctl.h"
|
||||||
|
#include "dpu_hw_dspp.h"
|
||||||
#include "dpu_formats.h"
|
#include "dpu_formats.h"
|
||||||
#include "dpu_encoder_phys.h"
|
#include "dpu_encoder_phys.h"
|
||||||
#include "dpu_crtc.h"
|
#include "dpu_crtc.h"
|
||||||
|
@ -536,6 +537,7 @@ static struct msm_display_topology dpu_encoder_get_topology(
|
||||||
* 1 LM, 1 INTF
|
* 1 LM, 1 INTF
|
||||||
* 2 LM, 1 INTF (stream merge to support high resolution interfaces)
|
* 2 LM, 1 INTF (stream merge to support high resolution interfaces)
|
||||||
*
|
*
|
||||||
|
* Adding color blocks only to primary interface
|
||||||
*/
|
*/
|
||||||
if (intf_count == 2)
|
if (intf_count == 2)
|
||||||
topology.num_lm = 2;
|
topology.num_lm = 2;
|
||||||
|
@ -544,6 +546,9 @@ static struct msm_display_topology dpu_encoder_get_topology(
|
||||||
else
|
else
|
||||||
topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
|
topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
|
||||||
|
|
||||||
|
if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI)
|
||||||
|
topology.num_dspp = topology.num_lm;
|
||||||
|
|
||||||
topology.num_enc = 0;
|
topology.num_enc = 0;
|
||||||
topology.num_intf = intf_count;
|
topology.num_intf = intf_count;
|
||||||
|
|
||||||
|
@ -959,7 +964,8 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
|
||||||
struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
|
struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
|
||||||
struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
|
struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
|
||||||
struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
|
struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
|
||||||
int num_lm, num_ctl, num_pp;
|
struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL };
|
||||||
|
int num_lm, num_ctl, num_pp, num_dspp;
|
||||||
int i, j;
|
int i, j;
|
||||||
|
|
||||||
if (!drm_enc) {
|
if (!drm_enc) {
|
||||||
|
@ -1008,6 +1014,9 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
|
||||||
drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
|
drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
|
||||||
num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
|
num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
|
||||||
drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
|
drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
|
||||||
|
num_dspp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
|
||||||
|
drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
|
||||||
|
ARRAY_SIZE(hw_dspp));
|
||||||
|
|
||||||
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
|
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
|
||||||
dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
|
dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
|
||||||
|
@ -1020,6 +1029,7 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
|
||||||
|
|
||||||
cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
|
cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
|
||||||
cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
|
cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
|
||||||
|
cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
cstate->num_mixers = num_lm;
|
cstate->num_mixers = num_lm;
|
||||||
|
|
|
@ -41,6 +41,8 @@
|
||||||
#define PINGPONG_SDM845_SPLIT_MASK \
|
#define PINGPONG_SDM845_SPLIT_MASK \
|
||||||
(PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2))
|
(PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2))
|
||||||
|
|
||||||
|
#define DSPP_SC7180_MASK BIT(DPU_DSPP_PCC)
|
||||||
|
|
||||||
#define DEFAULT_PIXEL_RAM_SIZE (50 * 1024)
|
#define DEFAULT_PIXEL_RAM_SIZE (50 * 1024)
|
||||||
#define DEFAULT_DPU_LINE_WIDTH 2048
|
#define DEFAULT_DPU_LINE_WIDTH 2048
|
||||||
#define DEFAULT_DPU_OUTPUT_LINE_WIDTH 2560
|
#define DEFAULT_DPU_OUTPUT_LINE_WIDTH 2560
|
||||||
|
@ -291,29 +293,30 @@ static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
#define LM_BLK(_name, _id, _base, _fmask, _sblk, _pp, _lmpair) \
|
#define LM_BLK(_name, _id, _base, _fmask, _sblk, _pp, _lmpair, _dspp) \
|
||||||
{ \
|
{ \
|
||||||
.name = _name, .id = _id, \
|
.name = _name, .id = _id, \
|
||||||
.base = _base, .len = 0x320, \
|
.base = _base, .len = 0x320, \
|
||||||
.features = _fmask, \
|
.features = _fmask, \
|
||||||
.sblk = _sblk, \
|
.sblk = _sblk, \
|
||||||
.pingpong = _pp, \
|
.pingpong = _pp, \
|
||||||
.lm_pair_mask = (1 << _lmpair) \
|
.lm_pair_mask = (1 << _lmpair), \
|
||||||
|
.dspp = _dspp \
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct dpu_lm_cfg sdm845_lm[] = {
|
static const struct dpu_lm_cfg sdm845_lm[] = {
|
||||||
LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK,
|
LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK,
|
||||||
&sdm845_lm_sblk, PINGPONG_0, LM_1),
|
&sdm845_lm_sblk, PINGPONG_0, LM_1, 0),
|
||||||
LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK,
|
LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK,
|
||||||
&sdm845_lm_sblk, PINGPONG_1, LM_0),
|
&sdm845_lm_sblk, PINGPONG_1, LM_0, 0),
|
||||||
LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK,
|
LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK,
|
||||||
&sdm845_lm_sblk, PINGPONG_2, LM_5),
|
&sdm845_lm_sblk, PINGPONG_2, LM_5, 0),
|
||||||
LM_BLK("lm_3", LM_3, 0x0, MIXER_SDM845_MASK,
|
LM_BLK("lm_3", LM_3, 0x0, MIXER_SDM845_MASK,
|
||||||
&sdm845_lm_sblk, PINGPONG_MAX, 0),
|
&sdm845_lm_sblk, PINGPONG_MAX, 0, 0),
|
||||||
LM_BLK("lm_4", LM_4, 0x0, MIXER_SDM845_MASK,
|
LM_BLK("lm_4", LM_4, 0x0, MIXER_SDM845_MASK,
|
||||||
&sdm845_lm_sblk, PINGPONG_MAX, 0),
|
&sdm845_lm_sblk, PINGPONG_MAX, 0, 0),
|
||||||
LM_BLK("lm_5", LM_5, 0x49000, MIXER_SDM845_MASK,
|
LM_BLK("lm_5", LM_5, 0x49000, MIXER_SDM845_MASK,
|
||||||
&sdm845_lm_sblk, PINGPONG_3, LM_2),
|
&sdm845_lm_sblk, PINGPONG_3, LM_2, 0),
|
||||||
};
|
};
|
||||||
|
|
||||||
/* SC7180 */
|
/* SC7180 */
|
||||||
|
@ -328,11 +331,30 @@ static const struct dpu_lm_sub_blks sc7180_lm_sblk = {
|
||||||
|
|
||||||
static const struct dpu_lm_cfg sc7180_lm[] = {
|
static const struct dpu_lm_cfg sc7180_lm[] = {
|
||||||
LM_BLK("lm_0", LM_0, 0x44000, MIXER_SC7180_MASK,
|
LM_BLK("lm_0", LM_0, 0x44000, MIXER_SC7180_MASK,
|
||||||
&sc7180_lm_sblk, PINGPONG_0, LM_1),
|
&sc7180_lm_sblk, PINGPONG_0, LM_1, DSPP_0),
|
||||||
LM_BLK("lm_1", LM_1, 0x45000, MIXER_SC7180_MASK,
|
LM_BLK("lm_1", LM_1, 0x45000, MIXER_SC7180_MASK,
|
||||||
&sc7180_lm_sblk, PINGPONG_1, LM_0),
|
&sc7180_lm_sblk, PINGPONG_1, LM_0, 0),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*************************************************************
|
||||||
|
* DSPP sub blocks config
|
||||||
|
*************************************************************/
|
||||||
|
static const struct dpu_dspp_sub_blks sc7180_dspp_sblk = {
|
||||||
|
.pcc = {.id = DPU_DSPP_PCC, .base = 0x1700,
|
||||||
|
.len = 0x90, .version = 0x10000},
|
||||||
|
};
|
||||||
|
|
||||||
|
#define DSPP_BLK(_name, _id, _base) \
|
||||||
|
{\
|
||||||
|
.name = _name, .id = _id, \
|
||||||
|
.base = _base, .len = 0x1800, \
|
||||||
|
.features = DSPP_SC7180_MASK, \
|
||||||
|
.sblk = &sc7180_dspp_sblk \
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct dpu_dspp_cfg sc7180_dspp[] = {
|
||||||
|
DSPP_BLK("dspp_0", DSPP_0, 0x54000),
|
||||||
|
};
|
||||||
/*************************************************************
|
/*************************************************************
|
||||||
* PINGPONG sub blocks config
|
* PINGPONG sub blocks config
|
||||||
*************************************************************/
|
*************************************************************/
|
||||||
|
@ -515,8 +537,8 @@ static const struct dpu_perf_cfg sdm845_perf_data = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct dpu_perf_cfg sc7180_perf_data = {
|
static const struct dpu_perf_cfg sc7180_perf_data = {
|
||||||
.max_bw_low = 3900000,
|
.max_bw_low = 6800000,
|
||||||
.max_bw_high = 5500000,
|
.max_bw_high = 6800000,
|
||||||
.min_core_ib = 2400000,
|
.min_core_ib = 2400000,
|
||||||
.min_llcc_ib = 800000,
|
.min_llcc_ib = 800000,
|
||||||
.min_dram_ib = 800000,
|
.min_dram_ib = 800000,
|
||||||
|
@ -587,6 +609,8 @@ static void sc7180_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
|
||||||
.sspp = sc7180_sspp,
|
.sspp = sc7180_sspp,
|
||||||
.mixer_count = ARRAY_SIZE(sc7180_lm),
|
.mixer_count = ARRAY_SIZE(sc7180_lm),
|
||||||
.mixer = sc7180_lm,
|
.mixer = sc7180_lm,
|
||||||
|
.dspp_count = ARRAY_SIZE(sc7180_dspp),
|
||||||
|
.dspp = sc7180_dspp,
|
||||||
.pingpong_count = ARRAY_SIZE(sc7180_pp),
|
.pingpong_count = ARRAY_SIZE(sc7180_pp),
|
||||||
.pingpong = sc7180_pp,
|
.pingpong = sc7180_pp,
|
||||||
.intf_count = ARRAY_SIZE(sc7180_intf),
|
.intf_count = ARRAY_SIZE(sc7180_intf),
|
||||||
|
|
|
@ -145,6 +145,17 @@ enum {
|
||||||
DPU_MIXER_MAX
|
DPU_MIXER_MAX
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* DSPP sub-blocks
|
||||||
|
* @DPU_DSPP_PCC Panel color correction block
|
||||||
|
* @DPU_DSPP_GC Gamma correction block
|
||||||
|
*/
|
||||||
|
enum {
|
||||||
|
DPU_DSPP_PCC = 0x1,
|
||||||
|
DPU_DSPP_GC,
|
||||||
|
DPU_DSPP_MAX
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* PINGPONG sub-blocks
|
* PINGPONG sub-blocks
|
||||||
* @DPU_PINGPONG_TE Tear check block
|
* @DPU_PINGPONG_TE Tear check block
|
||||||
|
@ -377,6 +388,16 @@ struct dpu_lm_sub_blks {
|
||||||
struct dpu_pp_blk gc;
|
struct dpu_pp_blk gc;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct dpu_dspp_sub_blks: Information of DSPP block
|
||||||
|
* @gc : gamma correction block
|
||||||
|
* @pcc: pixel color correction block
|
||||||
|
*/
|
||||||
|
struct dpu_dspp_sub_blks {
|
||||||
|
struct dpu_pp_blk gc;
|
||||||
|
struct dpu_pp_blk pcc;
|
||||||
|
};
|
||||||
|
|
||||||
struct dpu_pingpong_sub_blks {
|
struct dpu_pingpong_sub_blks {
|
||||||
struct dpu_pp_blk te;
|
struct dpu_pp_blk te;
|
||||||
struct dpu_pp_blk te2;
|
struct dpu_pp_blk te2;
|
||||||
|
@ -471,9 +492,23 @@ struct dpu_lm_cfg {
|
||||||
DPU_HW_BLK_INFO;
|
DPU_HW_BLK_INFO;
|
||||||
const struct dpu_lm_sub_blks *sblk;
|
const struct dpu_lm_sub_blks *sblk;
|
||||||
u32 pingpong;
|
u32 pingpong;
|
||||||
|
u32 dspp;
|
||||||
unsigned long lm_pair_mask;
|
unsigned long lm_pair_mask;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct dpu_dspp_cfg - information of DSPP blocks
|
||||||
|
* @id enum identifying this block
|
||||||
|
* @base register offset of this block
|
||||||
|
* @features bit mask identifying sub-blocks/features
|
||||||
|
* supported by this block
|
||||||
|
* @sblk sub-blocks information
|
||||||
|
*/
|
||||||
|
struct dpu_dspp_cfg {
|
||||||
|
DPU_HW_BLK_INFO;
|
||||||
|
const struct dpu_dspp_sub_blks *sblk;
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct dpu_pingpong_cfg - information of PING-PONG blocks
|
* struct dpu_pingpong_cfg - information of PING-PONG blocks
|
||||||
* @id enum identifying this block
|
* @id enum identifying this block
|
||||||
|
@ -688,6 +723,9 @@ struct dpu_mdss_cfg {
|
||||||
|
|
||||||
u32 ad_count;
|
u32 ad_count;
|
||||||
|
|
||||||
|
u32 dspp_count;
|
||||||
|
const struct dpu_dspp_cfg *dspp;
|
||||||
|
|
||||||
/* Add additional block data structures here */
|
/* Add additional block data structures here */
|
||||||
|
|
||||||
struct dpu_perf_cfg perf;
|
struct dpu_perf_cfg perf;
|
||||||
|
@ -716,6 +754,7 @@ struct dpu_mdss_hw_cfg_handler {
|
||||||
#define BLK_PINGPONG(s) ((s)->pingpong)
|
#define BLK_PINGPONG(s) ((s)->pingpong)
|
||||||
#define BLK_INTF(s) ((s)->intf)
|
#define BLK_INTF(s) ((s)->intf)
|
||||||
#define BLK_AD(s) ((s)->ad)
|
#define BLK_AD(s) ((s)->ad)
|
||||||
|
#define BLK_DSPP(s) ((s)->dspp)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dpu_hw_catalog_init - dpu hardware catalog init API retrieves
|
* dpu_hw_catalog_init - dpu hardware catalog init API retrieves
|
||||||
|
|
|
@ -272,6 +272,31 @@ static int dpu_hw_ctl_active_get_bitmask_intf(struct dpu_hw_ctl *ctx,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static uint32_t dpu_hw_ctl_get_bitmask_dspp(struct dpu_hw_ctl *ctx,
|
||||||
|
enum dpu_dspp dspp)
|
||||||
|
{
|
||||||
|
uint32_t flushbits = 0;
|
||||||
|
|
||||||
|
switch (dspp) {
|
||||||
|
case DSPP_0:
|
||||||
|
flushbits = BIT(13);
|
||||||
|
break;
|
||||||
|
case DSPP_1:
|
||||||
|
flushbits = BIT(14);
|
||||||
|
break;
|
||||||
|
case DSPP_2:
|
||||||
|
flushbits = BIT(15);
|
||||||
|
break;
|
||||||
|
case DSPP_3:
|
||||||
|
flushbits = BIT(21);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return flushbits;
|
||||||
|
}
|
||||||
|
|
||||||
static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
|
static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
|
||||||
{
|
{
|
||||||
struct dpu_hw_blk_reg_map *c = &ctx->hw;
|
struct dpu_hw_blk_reg_map *c = &ctx->hw;
|
||||||
|
@ -548,6 +573,7 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
|
||||||
ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
|
ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
|
||||||
ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
|
ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
|
||||||
ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
|
ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
|
||||||
|
ops->get_bitmask_dspp = dpu_hw_ctl_get_bitmask_dspp;
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct dpu_hw_blk_ops dpu_hw_ops;
|
static struct dpu_hw_blk_ops dpu_hw_ops;
|
||||||
|
|
|
@ -139,6 +139,9 @@ struct dpu_hw_ctl_ops {
|
||||||
uint32_t (*get_bitmask_mixer)(struct dpu_hw_ctl *ctx,
|
uint32_t (*get_bitmask_mixer)(struct dpu_hw_ctl *ctx,
|
||||||
enum dpu_lm blk);
|
enum dpu_lm blk);
|
||||||
|
|
||||||
|
uint32_t (*get_bitmask_dspp)(struct dpu_hw_ctl *ctx,
|
||||||
|
enum dpu_dspp blk);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Query the value of the intf flush mask
|
* Query the value of the intf flush mask
|
||||||
* No effect on hardware
|
* No effect on hardware
|
||||||
|
|
|
@ -0,0 +1,129 @@
|
||||||
|
// SPDX-License-Identifier: GPL-2.0-only
|
||||||
|
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "dpu_hwio.h"
|
||||||
|
#include "dpu_hw_catalog.h"
|
||||||
|
#include "dpu_hw_lm.h"
|
||||||
|
#include "dpu_hw_dspp.h"
|
||||||
|
#include "dpu_kms.h"
|
||||||
|
|
||||||
|
|
||||||
|
/* DSPP_PCC */
|
||||||
|
#define PCC_EN BIT(0)
|
||||||
|
#define PCC_DIS 0
|
||||||
|
#define PCC_RED_R_OFF 0x10
|
||||||
|
#define PCC_RED_G_OFF 0x1C
|
||||||
|
#define PCC_RED_B_OFF 0x28
|
||||||
|
#define PCC_GREEN_R_OFF 0x14
|
||||||
|
#define PCC_GREEN_G_OFF 0x20
|
||||||
|
#define PCC_GREEN_B_OFF 0x2C
|
||||||
|
#define PCC_BLUE_R_OFF 0x18
|
||||||
|
#define PCC_BLUE_G_OFF 0x24
|
||||||
|
#define PCC_BLUE_B_OFF 0x30
|
||||||
|
|
||||||
|
static void dpu_setup_dspp_pcc(struct dpu_hw_dspp *ctx,
|
||||||
|
struct dpu_hw_pcc_cfg *cfg)
|
||||||
|
{
|
||||||
|
|
||||||
|
u32 base = ctx->cap->sblk->pcc.base;
|
||||||
|
|
||||||
|
if (!ctx || !base) {
|
||||||
|
DRM_ERROR("invalid ctx %pK pcc base 0x%x\n", ctx, base);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!cfg) {
|
||||||
|
DRM_DEBUG_DRIVER("disable pcc feature\n");
|
||||||
|
DPU_REG_WRITE(&ctx->hw, base, PCC_DIS);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
DPU_REG_WRITE(&ctx->hw, base + PCC_RED_R_OFF, cfg->r.r);
|
||||||
|
DPU_REG_WRITE(&ctx->hw, base + PCC_RED_G_OFF, cfg->r.g);
|
||||||
|
DPU_REG_WRITE(&ctx->hw, base + PCC_RED_B_OFF, cfg->r.b);
|
||||||
|
|
||||||
|
DPU_REG_WRITE(&ctx->hw, base + PCC_GREEN_R_OFF, cfg->g.r);
|
||||||
|
DPU_REG_WRITE(&ctx->hw, base + PCC_GREEN_G_OFF, cfg->g.g);
|
||||||
|
DPU_REG_WRITE(&ctx->hw, base + PCC_GREEN_B_OFF, cfg->g.b);
|
||||||
|
|
||||||
|
DPU_REG_WRITE(&ctx->hw, base + PCC_BLUE_R_OFF, cfg->b.r);
|
||||||
|
DPU_REG_WRITE(&ctx->hw, base + PCC_BLUE_G_OFF, cfg->b.g);
|
||||||
|
DPU_REG_WRITE(&ctx->hw, base + PCC_BLUE_B_OFF, cfg->b.b);
|
||||||
|
|
||||||
|
DPU_REG_WRITE(&ctx->hw, base, PCC_EN);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void _setup_dspp_ops(struct dpu_hw_dspp *c,
|
||||||
|
unsigned long features)
|
||||||
|
{
|
||||||
|
if (test_bit(DPU_DSPP_PCC, &features) &&
|
||||||
|
IS_SC7180_TARGET(c->hw.hwversion))
|
||||||
|
c->ops.setup_pcc = dpu_setup_dspp_pcc;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct dpu_dspp_cfg *_dspp_offset(enum dpu_dspp dspp,
|
||||||
|
const struct dpu_mdss_cfg *m,
|
||||||
|
void __iomem *addr,
|
||||||
|
struct dpu_hw_blk_reg_map *b)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (!m || !addr || !b)
|
||||||
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
|
for (i = 0; i < m->dspp_count; i++) {
|
||||||
|
if (dspp == m->dspp[i].id) {
|
||||||
|
b->base_off = addr;
|
||||||
|
b->blk_off = m->dspp[i].base;
|
||||||
|
b->length = m->dspp[i].len;
|
||||||
|
b->hwversion = m->hwversion;
|
||||||
|
b->log_mask = DPU_DBG_MASK_DSPP;
|
||||||
|
return &m->dspp[i];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ERR_PTR(-EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct dpu_hw_blk_ops dpu_hw_ops;
|
||||||
|
|
||||||
|
struct dpu_hw_dspp *dpu_hw_dspp_init(enum dpu_dspp idx,
|
||||||
|
void __iomem *addr,
|
||||||
|
const struct dpu_mdss_cfg *m)
|
||||||
|
{
|
||||||
|
struct dpu_hw_dspp *c;
|
||||||
|
const struct dpu_dspp_cfg *cfg;
|
||||||
|
|
||||||
|
if (!addr || !m)
|
||||||
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
|
c = kzalloc(sizeof(*c), GFP_KERNEL);
|
||||||
|
if (!c)
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
cfg = _dspp_offset(idx, m, addr, &c->hw);
|
||||||
|
if (IS_ERR_OR_NULL(cfg)) {
|
||||||
|
kfree(c);
|
||||||
|
return ERR_PTR(-EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Assign ops */
|
||||||
|
c->idx = idx;
|
||||||
|
c->cap = cfg;
|
||||||
|
_setup_dspp_ops(c, c->cap->features);
|
||||||
|
|
||||||
|
dpu_hw_blk_init(&c->base, DPU_HW_BLK_DSPP, idx, &dpu_hw_ops);
|
||||||
|
|
||||||
|
return c;
|
||||||
|
}
|
||||||
|
|
||||||
|
void dpu_hw_dspp_destroy(struct dpu_hw_dspp *dspp)
|
||||||
|
{
|
||||||
|
if (dspp)
|
||||||
|
dpu_hw_blk_destroy(&dspp->base);
|
||||||
|
|
||||||
|
kfree(dspp);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,100 @@
|
||||||
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||||
|
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _DPU_HW_DSPP_H
|
||||||
|
#define _DPU_HW_DSPP_H
|
||||||
|
|
||||||
|
#include "dpu_hw_blk.h"
|
||||||
|
|
||||||
|
struct dpu_hw_dspp;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct dpu_hw_pcc_coeff - PCC coefficient structure for each color
|
||||||
|
* component.
|
||||||
|
* @r: red coefficient.
|
||||||
|
* @g: green coefficient.
|
||||||
|
* @b: blue coefficient.
|
||||||
|
*/
|
||||||
|
|
||||||
|
struct dpu_hw_pcc_coeff {
|
||||||
|
__u32 r;
|
||||||
|
__u32 g;
|
||||||
|
__u32 b;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct dpu_hw_pcc - pcc feature structure
|
||||||
|
* @r: red coefficients.
|
||||||
|
* @g: green coefficients.
|
||||||
|
* @b: blue coefficients.
|
||||||
|
*/
|
||||||
|
struct dpu_hw_pcc_cfg {
|
||||||
|
struct dpu_hw_pcc_coeff r;
|
||||||
|
struct dpu_hw_pcc_coeff g;
|
||||||
|
struct dpu_hw_pcc_coeff b;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct dpu_hw_dspp_ops - interface to the dspp hardware driver functions
|
||||||
|
* Caller must call the init function to get the dspp context for each dspp
|
||||||
|
* Assumption is these functions will be called after clocks are enabled
|
||||||
|
*/
|
||||||
|
struct dpu_hw_dspp_ops {
|
||||||
|
/**
|
||||||
|
* setup_pcc - setup dspp pcc
|
||||||
|
* @ctx: Pointer to dspp context
|
||||||
|
* @cfg: Pointer to configuration
|
||||||
|
*/
|
||||||
|
void (*setup_pcc)(struct dpu_hw_dspp *ctx, struct dpu_hw_pcc_cfg *cfg);
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct dpu_hw_dspp - dspp description
|
||||||
|
* @base: Hardware block base structure
|
||||||
|
* @hw: Block hardware details
|
||||||
|
* @idx: DSPP index
|
||||||
|
* @cap: Pointer to layer_cfg
|
||||||
|
* @ops: Pointer to operations possible for this DSPP
|
||||||
|
*/
|
||||||
|
struct dpu_hw_dspp {
|
||||||
|
struct dpu_hw_blk base;
|
||||||
|
struct dpu_hw_blk_reg_map hw;
|
||||||
|
|
||||||
|
/* dspp */
|
||||||
|
int idx;
|
||||||
|
const struct dpu_dspp_cfg *cap;
|
||||||
|
|
||||||
|
/* Ops */
|
||||||
|
struct dpu_hw_dspp_ops ops;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dpu_hw_dspp - convert base object dpu_hw_base to container
|
||||||
|
* @hw: Pointer to base hardware block
|
||||||
|
* return: Pointer to hardware block container
|
||||||
|
*/
|
||||||
|
static inline struct dpu_hw_dspp *to_dpu_hw_dspp(struct dpu_hw_blk *hw)
|
||||||
|
{
|
||||||
|
return container_of(hw, struct dpu_hw_dspp, base);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dpu_hw_dspp_init - initializes the dspp hw driver object.
|
||||||
|
* should be called once before accessing every dspp.
|
||||||
|
* @idx: DSPP index for which driver object is required
|
||||||
|
* @addr: Mapped register io address of MDP
|
||||||
|
* @Return: pointer to structure or ERR_PTR
|
||||||
|
*/
|
||||||
|
struct dpu_hw_dspp *dpu_hw_dspp_init(enum dpu_dspp idx,
|
||||||
|
void __iomem *addr, const struct dpu_mdss_cfg *m);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dpu_hw_dspp_destroy(): Destroys DSPP driver context
|
||||||
|
* @dspp: Pointer to DSPP driver context
|
||||||
|
*/
|
||||||
|
void dpu_hw_dspp_destroy(struct dpu_hw_dspp *dspp);
|
||||||
|
|
||||||
|
#endif /*_DPU_HW_DSPP_H */
|
||||||
|
|
|
@ -95,6 +95,7 @@ enum dpu_hw_blk_type {
|
||||||
DPU_HW_BLK_PINGPONG,
|
DPU_HW_BLK_PINGPONG,
|
||||||
DPU_HW_BLK_INTF,
|
DPU_HW_BLK_INTF,
|
||||||
DPU_HW_BLK_WB,
|
DPU_HW_BLK_WB,
|
||||||
|
DPU_HW_BLK_DSPP,
|
||||||
DPU_HW_BLK_MAX,
|
DPU_HW_BLK_MAX,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -425,5 +426,6 @@ struct dpu_mdss_color {
|
||||||
#define DPU_DBG_MASK_TOP (1 << 7)
|
#define DPU_DBG_MASK_TOP (1 << 7)
|
||||||
#define DPU_DBG_MASK_VBIF (1 << 8)
|
#define DPU_DBG_MASK_VBIF (1 << 8)
|
||||||
#define DPU_DBG_MASK_ROT (1 << 9)
|
#define DPU_DBG_MASK_ROT (1 << 9)
|
||||||
|
#define DPU_DBG_MASK_DSPP (1 << 10)
|
||||||
|
|
||||||
#endif /* _DPU_HW_MDSS_H */
|
#endif /* _DPU_HW_MDSS_H */
|
||||||
|
|
|
@ -772,29 +772,21 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
|
||||||
{
|
{
|
||||||
struct iommu_domain *domain;
|
struct iommu_domain *domain;
|
||||||
struct msm_gem_address_space *aspace;
|
struct msm_gem_address_space *aspace;
|
||||||
int ret;
|
struct msm_mmu *mmu;
|
||||||
|
|
||||||
domain = iommu_domain_alloc(&platform_bus_type);
|
domain = iommu_domain_alloc(&platform_bus_type);
|
||||||
if (!domain)
|
if (!domain)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
domain->geometry.aperture_start = 0x1000;
|
mmu = msm_iommu_new(dpu_kms->dev->dev, domain);
|
||||||
domain->geometry.aperture_end = 0xffffffff;
|
aspace = msm_gem_address_space_create(mmu, "dpu1",
|
||||||
|
0x1000, 0xfffffff);
|
||||||
|
|
||||||
aspace = msm_gem_address_space_create(dpu_kms->dev->dev,
|
|
||||||
domain, "dpu1");
|
|
||||||
if (IS_ERR(aspace)) {
|
if (IS_ERR(aspace)) {
|
||||||
iommu_domain_free(domain);
|
mmu->funcs->destroy(mmu);
|
||||||
return PTR_ERR(aspace);
|
return PTR_ERR(aspace);
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = aspace->mmu->funcs->attach(aspace->mmu);
|
|
||||||
if (ret) {
|
|
||||||
DPU_ERROR("failed to attach iommu %d\n", ret);
|
|
||||||
msm_gem_address_space_put(aspace);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
dpu_kms->base.aspace = aspace;
|
dpu_kms->base.aspace = aspace;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -158,6 +158,7 @@ struct dpu_global_state {
|
||||||
uint32_t mixer_to_enc_id[LM_MAX - LM_0];
|
uint32_t mixer_to_enc_id[LM_MAX - LM_0];
|
||||||
uint32_t ctl_to_enc_id[CTL_MAX - CTL_0];
|
uint32_t ctl_to_enc_id[CTL_MAX - CTL_0];
|
||||||
uint32_t intf_to_enc_id[INTF_MAX - INTF_0];
|
uint32_t intf_to_enc_id[INTF_MAX - INTF_0];
|
||||||
|
uint32_t dspp_to_enc_id[DSPP_MAX - DSPP_0];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct dpu_global_state
|
struct dpu_global_state
|
||||||
|
|
|
@ -9,6 +9,7 @@
|
||||||
#include "dpu_hw_ctl.h"
|
#include "dpu_hw_ctl.h"
|
||||||
#include "dpu_hw_pingpong.h"
|
#include "dpu_hw_pingpong.h"
|
||||||
#include "dpu_hw_intf.h"
|
#include "dpu_hw_intf.h"
|
||||||
|
#include "dpu_hw_dspp.h"
|
||||||
#include "dpu_encoder.h"
|
#include "dpu_encoder.h"
|
||||||
#include "dpu_trace.h"
|
#include "dpu_trace.h"
|
||||||
|
|
||||||
|
@ -174,6 +175,23 @@ int dpu_rm_init(struct dpu_rm *rm,
|
||||||
rm->ctl_blks[ctl->id - CTL_0] = &hw->base;
|
rm->ctl_blks[ctl->id - CTL_0] = &hw->base;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < cat->dspp_count; i++) {
|
||||||
|
struct dpu_hw_dspp *hw;
|
||||||
|
const struct dpu_dspp_cfg *dspp = &cat->dspp[i];
|
||||||
|
|
||||||
|
if (dspp->id < DSPP_0 || dspp->id >= DSPP_MAX) {
|
||||||
|
DPU_ERROR("skip dspp %d with invalid id\n", dspp->id);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
hw = dpu_hw_dspp_init(dspp->id, mmio, cat);
|
||||||
|
if (IS_ERR_OR_NULL(hw)) {
|
||||||
|
rc = PTR_ERR(hw);
|
||||||
|
DPU_ERROR("failed dspp object creation: err %d\n", rc);
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
rm->dspp_blks[dspp->id - DSPP_0] = &hw->base;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
|
@ -222,12 +240,17 @@ static bool _dpu_rm_check_lm_peer(struct dpu_rm *rm, int primary_idx,
|
||||||
* if lm, and all other hardwired blocks connected to the lm (pp) is
|
* if lm, and all other hardwired blocks connected to the lm (pp) is
|
||||||
* available and appropriate
|
* available and appropriate
|
||||||
* @pp_idx: output parameter, index of pingpong block attached to the layer
|
* @pp_idx: output parameter, index of pingpong block attached to the layer
|
||||||
* mixer in rm->pongpong_blks[].
|
* mixer in rm->pingpong_blks[].
|
||||||
|
* @dspp_idx: output parameter, index of dspp block attached to the layer
|
||||||
|
* mixer in rm->dspp_blks[].
|
||||||
|
* @reqs: input parameter, rm requirements for HW blocks needed in the
|
||||||
|
* datapath.
|
||||||
* @Return: true if lm matches all requirements, false otherwise
|
* @Return: true if lm matches all requirements, false otherwise
|
||||||
*/
|
*/
|
||||||
static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
|
static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
|
||||||
struct dpu_global_state *global_state,
|
struct dpu_global_state *global_state,
|
||||||
uint32_t enc_id, int lm_idx, int *pp_idx)
|
uint32_t enc_id, int lm_idx, int *pp_idx, int *dspp_idx,
|
||||||
|
struct dpu_rm_requirements *reqs)
|
||||||
{
|
{
|
||||||
const struct dpu_lm_cfg *lm_cfg;
|
const struct dpu_lm_cfg *lm_cfg;
|
||||||
int idx;
|
int idx;
|
||||||
|
@ -251,6 +274,23 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
*pp_idx = idx;
|
*pp_idx = idx;
|
||||||
|
|
||||||
|
if (!reqs->topology.num_dspp)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
idx = lm_cfg->dspp - DSPP_0;
|
||||||
|
if (idx < 0 || idx >= ARRAY_SIZE(rm->dspp_blks)) {
|
||||||
|
DPU_ERROR("failed to get dspp on lm %d\n", lm_cfg->dspp);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (reserved_by_other(global_state->dspp_to_enc_id, idx, enc_id)) {
|
||||||
|
DPU_DEBUG("lm %d dspp %d already reserved\n", lm_cfg->id,
|
||||||
|
lm_cfg->dspp);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
*dspp_idx = idx;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -262,6 +302,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
|
||||||
{
|
{
|
||||||
int lm_idx[MAX_BLOCKS];
|
int lm_idx[MAX_BLOCKS];
|
||||||
int pp_idx[MAX_BLOCKS];
|
int pp_idx[MAX_BLOCKS];
|
||||||
|
int dspp_idx[MAX_BLOCKS] = {0};
|
||||||
int i, j, lm_count = 0;
|
int i, j, lm_count = 0;
|
||||||
|
|
||||||
if (!reqs->topology.num_lm) {
|
if (!reqs->topology.num_lm) {
|
||||||
|
@ -279,7 +320,8 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
|
||||||
lm_idx[lm_count] = i;
|
lm_idx[lm_count] = i;
|
||||||
|
|
||||||
if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state,
|
if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state,
|
||||||
enc_id, i, &pp_idx[lm_count])) {
|
enc_id, i, &pp_idx[lm_count],
|
||||||
|
&dspp_idx[lm_count], reqs)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -299,7 +341,8 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
|
||||||
|
|
||||||
if (!_dpu_rm_check_lm_and_get_connected_blks(rm,
|
if (!_dpu_rm_check_lm_and_get_connected_blks(rm,
|
||||||
global_state, enc_id, j,
|
global_state, enc_id, j,
|
||||||
&pp_idx[lm_count])) {
|
&pp_idx[lm_count], &dspp_idx[lm_count],
|
||||||
|
reqs)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -316,6 +359,8 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
|
||||||
for (i = 0; i < lm_count; i++) {
|
for (i = 0; i < lm_count; i++) {
|
||||||
global_state->mixer_to_enc_id[lm_idx[i]] = enc_id;
|
global_state->mixer_to_enc_id[lm_idx[i]] = enc_id;
|
||||||
global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id;
|
global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id;
|
||||||
|
global_state->dspp_to_enc_id[dspp_idx[i]] =
|
||||||
|
reqs->topology.num_dspp ? enc_id : 0;
|
||||||
|
|
||||||
trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id,
|
trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id,
|
||||||
pp_idx[i] + PINGPONG_0);
|
pp_idx[i] + PINGPONG_0);
|
||||||
|
@ -560,6 +605,11 @@ int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
|
||||||
hw_to_enc_id = global_state->intf_to_enc_id;
|
hw_to_enc_id = global_state->intf_to_enc_id;
|
||||||
max_blks = ARRAY_SIZE(rm->intf_blks);
|
max_blks = ARRAY_SIZE(rm->intf_blks);
|
||||||
break;
|
break;
|
||||||
|
case DPU_HW_BLK_DSPP:
|
||||||
|
hw_blks = rm->dspp_blks;
|
||||||
|
hw_to_enc_id = global_state->dspp_to_enc_id;
|
||||||
|
max_blks = ARRAY_SIZE(rm->dspp_blks);
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
DPU_ERROR("blk type %d not managed by rm\n", type);
|
DPU_ERROR("blk type %d not managed by rm\n", type);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -19,6 +19,7 @@ struct dpu_global_state;
|
||||||
* @mixer_blks: array of layer mixer hardware resources
|
* @mixer_blks: array of layer mixer hardware resources
|
||||||
* @ctl_blks: array of ctl hardware resources
|
* @ctl_blks: array of ctl hardware resources
|
||||||
* @intf_blks: array of intf hardware resources
|
* @intf_blks: array of intf hardware resources
|
||||||
|
* @dspp_blks: array of dspp hardware resources
|
||||||
* @lm_max_width: cached layer mixer maximum width
|
* @lm_max_width: cached layer mixer maximum width
|
||||||
* @rm_lock: resource manager mutex
|
* @rm_lock: resource manager mutex
|
||||||
*/
|
*/
|
||||||
|
@ -27,6 +28,7 @@ struct dpu_rm {
|
||||||
struct dpu_hw_blk *mixer_blks[LM_MAX - LM_0];
|
struct dpu_hw_blk *mixer_blks[LM_MAX - LM_0];
|
||||||
struct dpu_hw_blk *ctl_blks[CTL_MAX - CTL_0];
|
struct dpu_hw_blk *ctl_blks[CTL_MAX - CTL_0];
|
||||||
struct dpu_hw_blk *intf_blks[INTF_MAX - INTF_0];
|
struct dpu_hw_blk *intf_blks[INTF_MAX - INTF_0];
|
||||||
|
struct dpu_hw_blk *dspp_blks[DSPP_MAX - DSPP_0];
|
||||||
|
|
||||||
uint32_t lm_max_width;
|
uint32_t lm_max_width;
|
||||||
};
|
};
|
||||||
|
|
|
@ -510,18 +510,20 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
|
||||||
mdelay(16);
|
mdelay(16);
|
||||||
|
|
||||||
if (config->iommu) {
|
if (config->iommu) {
|
||||||
aspace = msm_gem_address_space_create(&pdev->dev,
|
struct msm_mmu *mmu = msm_iommu_new(&pdev->dev,
|
||||||
config->iommu, "mdp4");
|
config->iommu);
|
||||||
|
|
||||||
|
aspace = msm_gem_address_space_create(mmu,
|
||||||
|
"mdp4", 0x1000, 0xffffffff);
|
||||||
|
|
||||||
if (IS_ERR(aspace)) {
|
if (IS_ERR(aspace)) {
|
||||||
|
if (!IS_ERR(mmu))
|
||||||
|
mmu->funcs->destroy(mmu);
|
||||||
ret = PTR_ERR(aspace);
|
ret = PTR_ERR(aspace);
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
kms->aspace = aspace;
|
kms->aspace = aspace;
|
||||||
|
|
||||||
ret = aspace->mmu->funcs->attach(aspace->mmu);
|
|
||||||
if (ret)
|
|
||||||
goto fail;
|
|
||||||
} else {
|
} else {
|
||||||
DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
|
DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
|
||||||
"contig buffers for scanout\n");
|
"contig buffers for scanout\n");
|
||||||
|
@ -569,10 +571,6 @@ static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
|
||||||
/* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */
|
/* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */
|
||||||
config.max_clk = 266667000;
|
config.max_clk = 266667000;
|
||||||
config.iommu = iommu_domain_alloc(&platform_bus_type);
|
config.iommu = iommu_domain_alloc(&platform_bus_type);
|
||||||
if (config.iommu) {
|
|
||||||
config.iommu->geometry.aperture_start = 0x1000;
|
|
||||||
config.iommu->geometry.aperture_end = 0xffffffff;
|
|
||||||
}
|
|
||||||
|
|
||||||
return &config;
|
return &config;
|
||||||
}
|
}
|
||||||
|
|
|
@ -342,6 +342,81 @@ static const struct mdp5_cfg_hw msm8x16_config = {
|
||||||
.max_clk = 320000000,
|
.max_clk = 320000000,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct mdp5_cfg_hw msm8x36_config = {
|
||||||
|
.name = "msm8x36",
|
||||||
|
.mdp = {
|
||||||
|
.count = 1,
|
||||||
|
.base = { 0x0 },
|
||||||
|
.caps = MDP_CAP_SMP |
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
.smp = {
|
||||||
|
.mmb_count = 8,
|
||||||
|
.mmb_size = 10240,
|
||||||
|
.clients = {
|
||||||
|
[SSPP_VIG0] = 1, [SSPP_DMA0] = 4,
|
||||||
|
[SSPP_RGB0] = 7, [SSPP_RGB1] = 8,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
.ctl = {
|
||||||
|
.count = 3,
|
||||||
|
.base = { 0x01000, 0x01200, 0x01400 },
|
||||||
|
.flush_hw_mask = 0x4003ffff,
|
||||||
|
},
|
||||||
|
.pipe_vig = {
|
||||||
|
.count = 1,
|
||||||
|
.base = { 0x04000 },
|
||||||
|
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
|
||||||
|
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
|
||||||
|
MDP_PIPE_CAP_DECIMATION,
|
||||||
|
},
|
||||||
|
.pipe_rgb = {
|
||||||
|
.count = 2,
|
||||||
|
.base = { 0x14000, 0x16000 },
|
||||||
|
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
|
||||||
|
MDP_PIPE_CAP_DECIMATION,
|
||||||
|
},
|
||||||
|
.pipe_dma = {
|
||||||
|
.count = 1,
|
||||||
|
.base = { 0x24000 },
|
||||||
|
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
|
||||||
|
},
|
||||||
|
.lm = {
|
||||||
|
.count = 2,
|
||||||
|
.base = { 0x44000, 0x47000 },
|
||||||
|
.instances = {
|
||||||
|
{ .id = 0, .pp = 0, .dspp = 0,
|
||||||
|
.caps = MDP_LM_CAP_DISPLAY, },
|
||||||
|
{ .id = 1, .pp = -1, .dspp = -1,
|
||||||
|
.caps = MDP_LM_CAP_WB, },
|
||||||
|
},
|
||||||
|
.nb_stages = 8,
|
||||||
|
.max_width = 2560,
|
||||||
|
.max_height = 0xFFFF,
|
||||||
|
},
|
||||||
|
.pp = {
|
||||||
|
.count = 1,
|
||||||
|
.base = { 0x70000 },
|
||||||
|
},
|
||||||
|
.ad = {
|
||||||
|
.count = 1,
|
||||||
|
.base = { 0x78000 },
|
||||||
|
},
|
||||||
|
.dspp = {
|
||||||
|
.count = 1,
|
||||||
|
.base = { 0x54000 },
|
||||||
|
},
|
||||||
|
.intf = {
|
||||||
|
.base = { 0x00000, 0x6a800, 0x6b000 },
|
||||||
|
.connect = {
|
||||||
|
[0] = INTF_DISABLED,
|
||||||
|
[1] = INTF_DSI,
|
||||||
|
[2] = INTF_DSI,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
.max_clk = 366670000,
|
||||||
|
};
|
||||||
|
|
||||||
static const struct mdp5_cfg_hw msm8x94_config = {
|
static const struct mdp5_cfg_hw msm8x94_config = {
|
||||||
.name = "msm8x94",
|
.name = "msm8x94",
|
||||||
.mdp = {
|
.mdp = {
|
||||||
|
@ -840,6 +915,7 @@ static const struct mdp5_cfg_handler cfg_handlers_v1[] = {
|
||||||
{ .revision = 2, .config = { .hw = &msm8x74v2_config } },
|
{ .revision = 2, .config = { .hw = &msm8x74v2_config } },
|
||||||
{ .revision = 3, .config = { .hw = &apq8084_config } },
|
{ .revision = 3, .config = { .hw = &apq8084_config } },
|
||||||
{ .revision = 6, .config = { .hw = &msm8x16_config } },
|
{ .revision = 6, .config = { .hw = &msm8x16_config } },
|
||||||
|
{ .revision = 8, .config = { .hw = &msm8x36_config } },
|
||||||
{ .revision = 9, .config = { .hw = &msm8x94_config } },
|
{ .revision = 9, .config = { .hw = &msm8x94_config } },
|
||||||
{ .revision = 7, .config = { .hw = &msm8x96_config } },
|
{ .revision = 7, .config = { .hw = &msm8x96_config } },
|
||||||
{ .revision = 11, .config = { .hw = &msm8x76_config } },
|
{ .revision = 11, .config = { .hw = &msm8x76_config } },
|
||||||
|
@ -941,10 +1017,6 @@ static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)
|
||||||
static struct mdp5_cfg_platform config = {};
|
static struct mdp5_cfg_platform config = {};
|
||||||
|
|
||||||
config.iommu = iommu_domain_alloc(&platform_bus_type);
|
config.iommu = iommu_domain_alloc(&platform_bus_type);
|
||||||
if (config.iommu) {
|
|
||||||
config.iommu->geometry.aperture_start = 0x1000;
|
|
||||||
config.iommu->geometry.aperture_end = 0xffffffff;
|
|
||||||
}
|
|
||||||
|
|
||||||
return &config;
|
return &config;
|
||||||
}
|
}
|
||||||
|
|
|
@ -959,7 +959,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
|
||||||
if (!ctl)
|
if (!ctl)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* don't support LM cursors when we we have source split enabled */
|
/* don't support LM cursors when we have source split enabled */
|
||||||
if (mdp5_cstate->pipeline.r_mixer)
|
if (mdp5_cstate->pipeline.r_mixer)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
@ -1030,7 +1030,7 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* don't support LM cursors when we we have source split enabled */
|
/* don't support LM cursors when we have source split enabled */
|
||||||
if (mdp5_cstate->pipeline.r_mixer)
|
if (mdp5_cstate->pipeline.r_mixer)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
|
|
@ -624,25 +624,25 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
|
||||||
mdelay(16);
|
mdelay(16);
|
||||||
|
|
||||||
if (config->platform.iommu) {
|
if (config->platform.iommu) {
|
||||||
|
struct msm_mmu *mmu;
|
||||||
|
|
||||||
iommu_dev = &pdev->dev;
|
iommu_dev = &pdev->dev;
|
||||||
if (!dev_iommu_fwspec_get(iommu_dev))
|
if (!dev_iommu_fwspec_get(iommu_dev))
|
||||||
iommu_dev = iommu_dev->parent;
|
iommu_dev = iommu_dev->parent;
|
||||||
|
|
||||||
aspace = msm_gem_address_space_create(iommu_dev,
|
mmu = msm_iommu_new(iommu_dev, config->platform.iommu);
|
||||||
config->platform.iommu, "mdp5");
|
|
||||||
|
aspace = msm_gem_address_space_create(mmu, "mdp5",
|
||||||
|
0x1000, 0xffffffff);
|
||||||
|
|
||||||
if (IS_ERR(aspace)) {
|
if (IS_ERR(aspace)) {
|
||||||
|
if (!IS_ERR(mmu))
|
||||||
|
mmu->funcs->destroy(mmu);
|
||||||
ret = PTR_ERR(aspace);
|
ret = PTR_ERR(aspace);
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
kms->aspace = aspace;
|
kms->aspace = aspace;
|
||||||
|
|
||||||
ret = aspace->mmu->funcs->attach(aspace->mmu);
|
|
||||||
if (ret) {
|
|
||||||
DRM_DEV_ERROR(&pdev->dev, "failed to attach iommu: %d\n",
|
|
||||||
ret);
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
DRM_DEV_INFO(&pdev->dev,
|
DRM_DEV_INFO(&pdev->dev,
|
||||||
"no iommu, fallback to phys contig buffers for scanout\n");
|
"no iommu, fallback to phys contig buffers for scanout\n");
|
||||||
|
@ -935,7 +935,8 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
fail:
|
fail:
|
||||||
mdp5_destroy(pdev);
|
if (mdp5_kms)
|
||||||
|
mdp5_destroy(pdev);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -37,9 +37,10 @@
|
||||||
* - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
|
* - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
|
||||||
* GEM object's debug name
|
* GEM object's debug name
|
||||||
* - 1.5.0 - Add SUBMITQUERY_QUERY ioctl
|
* - 1.5.0 - Add SUBMITQUERY_QUERY ioctl
|
||||||
|
* - 1.6.0 - Syncobj support
|
||||||
*/
|
*/
|
||||||
#define MSM_VERSION_MAJOR 1
|
#define MSM_VERSION_MAJOR 1
|
||||||
#define MSM_VERSION_MINOR 5
|
#define MSM_VERSION_MINOR 6
|
||||||
#define MSM_VERSION_PATCHLEVEL 0
|
#define MSM_VERSION_PATCHLEVEL 0
|
||||||
|
|
||||||
static const struct drm_mode_config_funcs mode_config_funcs = {
|
static const struct drm_mode_config_funcs mode_config_funcs = {
|
||||||
|
@ -1002,7 +1003,8 @@ static struct drm_driver msm_driver = {
|
||||||
.driver_features = DRIVER_GEM |
|
.driver_features = DRIVER_GEM |
|
||||||
DRIVER_RENDER |
|
DRIVER_RENDER |
|
||||||
DRIVER_ATOMIC |
|
DRIVER_ATOMIC |
|
||||||
DRIVER_MODESET,
|
DRIVER_MODESET |
|
||||||
|
DRIVER_SYNCOBJ,
|
||||||
.open = msm_open,
|
.open = msm_open,
|
||||||
.postclose = msm_postclose,
|
.postclose = msm_postclose,
|
||||||
.lastclose = drm_fb_helper_lastclose,
|
.lastclose = drm_fb_helper_lastclose,
|
||||||
|
|
|
@ -105,6 +105,7 @@ struct msm_display_topology {
|
||||||
u32 num_lm;
|
u32 num_lm;
|
||||||
u32 num_enc;
|
u32 num_enc;
|
||||||
u32 num_intf;
|
u32 num_intf;
|
||||||
|
u32 num_dspp;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -236,7 +237,8 @@ int msm_crtc_enable_vblank(struct drm_crtc *crtc);
|
||||||
void msm_crtc_disable_vblank(struct drm_crtc *crtc);
|
void msm_crtc_disable_vblank(struct drm_crtc *crtc);
|
||||||
|
|
||||||
int msm_gem_init_vma(struct msm_gem_address_space *aspace,
|
int msm_gem_init_vma(struct msm_gem_address_space *aspace,
|
||||||
struct msm_gem_vma *vma, int npages);
|
struct msm_gem_vma *vma, int npages,
|
||||||
|
u64 range_start, u64 range_end);
|
||||||
void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
|
void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
|
||||||
struct msm_gem_vma *vma);
|
struct msm_gem_vma *vma);
|
||||||
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
|
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
|
||||||
|
@ -250,12 +252,8 @@ void msm_gem_close_vma(struct msm_gem_address_space *aspace,
|
||||||
void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
|
void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
|
||||||
|
|
||||||
struct msm_gem_address_space *
|
struct msm_gem_address_space *
|
||||||
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
|
msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
|
||||||
const char *name);
|
u64 va_start, u64 size);
|
||||||
|
|
||||||
struct msm_gem_address_space *
|
|
||||||
msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu,
|
|
||||||
const char *name, uint64_t va_start, uint64_t va_end);
|
|
||||||
|
|
||||||
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
|
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
|
||||||
void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
|
void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
|
||||||
|
@ -276,6 +274,9 @@ vm_fault_t msm_gem_fault(struct vm_fault *vmf);
|
||||||
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
|
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
|
||||||
int msm_gem_get_iova(struct drm_gem_object *obj,
|
int msm_gem_get_iova(struct drm_gem_object *obj,
|
||||||
struct msm_gem_address_space *aspace, uint64_t *iova);
|
struct msm_gem_address_space *aspace, uint64_t *iova);
|
||||||
|
int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
|
||||||
|
struct msm_gem_address_space *aspace, uint64_t *iova,
|
||||||
|
u64 range_start, u64 range_end);
|
||||||
int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
|
int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
|
||||||
struct msm_gem_address_space *aspace, uint64_t *iova);
|
struct msm_gem_address_space *aspace, uint64_t *iova);
|
||||||
uint64_t msm_gem_iova(struct drm_gem_object *obj,
|
uint64_t msm_gem_iova(struct drm_gem_object *obj,
|
||||||
|
|
|
@ -389,7 +389,8 @@ put_iova(struct drm_gem_object *obj)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
|
static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
|
||||||
struct msm_gem_address_space *aspace, uint64_t *iova)
|
struct msm_gem_address_space *aspace, uint64_t *iova,
|
||||||
|
u64 range_start, u64 range_end)
|
||||||
{
|
{
|
||||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||||
struct msm_gem_vma *vma;
|
struct msm_gem_vma *vma;
|
||||||
|
@ -404,7 +405,8 @@ static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
|
||||||
if (IS_ERR(vma))
|
if (IS_ERR(vma))
|
||||||
return PTR_ERR(vma);
|
return PTR_ERR(vma);
|
||||||
|
|
||||||
ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT);
|
ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT,
|
||||||
|
range_start, range_end);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
del_vma(vma);
|
del_vma(vma);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -426,6 +428,9 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
|
||||||
if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
|
if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
|
||||||
prot |= IOMMU_WRITE;
|
prot |= IOMMU_WRITE;
|
||||||
|
|
||||||
|
if (msm_obj->flags & MSM_BO_MAP_PRIV)
|
||||||
|
prot |= IOMMU_PRIV;
|
||||||
|
|
||||||
WARN_ON(!mutex_is_locked(&msm_obj->lock));
|
WARN_ON(!mutex_is_locked(&msm_obj->lock));
|
||||||
|
|
||||||
if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
|
if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
|
||||||
|
@ -443,9 +448,13 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
|
||||||
msm_obj->sgt, obj->size >> PAGE_SHIFT);
|
msm_obj->sgt, obj->size >> PAGE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* get iova and pin it. Should have a matching put */
|
/*
|
||||||
int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
|
* get iova and pin it. Should have a matching put
|
||||||
struct msm_gem_address_space *aspace, uint64_t *iova)
|
* limits iova to specified range (in pages)
|
||||||
|
*/
|
||||||
|
int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
|
||||||
|
struct msm_gem_address_space *aspace, uint64_t *iova,
|
||||||
|
u64 range_start, u64 range_end)
|
||||||
{
|
{
|
||||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||||
u64 local;
|
u64 local;
|
||||||
|
@ -453,7 +462,8 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
|
||||||
|
|
||||||
mutex_lock(&msm_obj->lock);
|
mutex_lock(&msm_obj->lock);
|
||||||
|
|
||||||
ret = msm_gem_get_iova_locked(obj, aspace, &local);
|
ret = msm_gem_get_iova_locked(obj, aspace, &local,
|
||||||
|
range_start, range_end);
|
||||||
|
|
||||||
if (!ret)
|
if (!ret)
|
||||||
ret = msm_gem_pin_iova(obj, aspace);
|
ret = msm_gem_pin_iova(obj, aspace);
|
||||||
|
@ -465,6 +475,13 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* get iova and pin it. Should have a matching put */
|
||||||
|
int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
|
||||||
|
struct msm_gem_address_space *aspace, uint64_t *iova)
|
||||||
|
{
|
||||||
|
return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get an iova but don't pin it. Doesn't need a put because iovas are currently
|
* Get an iova but don't pin it. Doesn't need a put because iovas are currently
|
||||||
* valid for the life of the object
|
* valid for the life of the object
|
||||||
|
@ -476,7 +493,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
mutex_lock(&msm_obj->lock);
|
mutex_lock(&msm_obj->lock);
|
||||||
ret = msm_gem_get_iova_locked(obj, aspace, iova);
|
ret = msm_gem_get_iova_locked(obj, aspace, iova, 0, U64_MAX);
|
||||||
mutex_unlock(&msm_obj->lock);
|
mutex_unlock(&msm_obj->lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -13,6 +13,7 @@
|
||||||
|
|
||||||
/* Additional internal-use only BO flags: */
|
/* Additional internal-use only BO flags: */
|
||||||
#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
|
#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
|
||||||
|
#define MSM_BO_MAP_PRIV 0x20000000 /* use IOMMU_PRIV when mapping */
|
||||||
|
|
||||||
struct msm_gem_address_space {
|
struct msm_gem_address_space {
|
||||||
const char *name;
|
const char *name;
|
||||||
|
|
|
@ -8,7 +8,9 @@
|
||||||
#include <linux/sync_file.h>
|
#include <linux/sync_file.h>
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
|
|
||||||
|
#include <drm/drm_drv.h>
|
||||||
#include <drm/drm_file.h>
|
#include <drm/drm_file.h>
|
||||||
|
#include <drm/drm_syncobj.h>
|
||||||
|
|
||||||
#include "msm_drv.h"
|
#include "msm_drv.h"
|
||||||
#include "msm_gpu.h"
|
#include "msm_gpu.h"
|
||||||
|
@ -391,6 +393,186 @@ static void submit_cleanup(struct msm_gem_submit *submit)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
struct msm_submit_post_dep {
|
||||||
|
struct drm_syncobj *syncobj;
|
||||||
|
uint64_t point;
|
||||||
|
struct dma_fence_chain *chain;
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct drm_syncobj **msm_wait_deps(struct drm_device *dev,
|
||||||
|
struct drm_file *file,
|
||||||
|
uint64_t in_syncobjs_addr,
|
||||||
|
uint32_t nr_in_syncobjs,
|
||||||
|
size_t syncobj_stride,
|
||||||
|
struct msm_ringbuffer *ring)
|
||||||
|
{
|
||||||
|
struct drm_syncobj **syncobjs = NULL;
|
||||||
|
struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
|
||||||
|
int ret = 0;
|
||||||
|
uint32_t i, j;
|
||||||
|
|
||||||
|
syncobjs = kcalloc(nr_in_syncobjs, sizeof(*syncobjs),
|
||||||
|
GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
|
||||||
|
if (!syncobjs)
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
for (i = 0; i < nr_in_syncobjs; ++i) {
|
||||||
|
uint64_t address = in_syncobjs_addr + i * syncobj_stride;
|
||||||
|
struct dma_fence *fence;
|
||||||
|
|
||||||
|
if (copy_from_user(&syncobj_desc,
|
||||||
|
u64_to_user_ptr(address),
|
||||||
|
min(syncobj_stride, sizeof(syncobj_desc)))) {
|
||||||
|
ret = -EFAULT;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (syncobj_desc.point &&
|
||||||
|
!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) {
|
||||||
|
ret = -EOPNOTSUPP;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (syncobj_desc.flags & ~MSM_SUBMIT_SYNCOBJ_FLAGS) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = drm_syncobj_find_fence(file, syncobj_desc.handle,
|
||||||
|
syncobj_desc.point, 0, &fence);
|
||||||
|
if (ret)
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (!dma_fence_match_context(fence, ring->fctx->context))
|
||||||
|
ret = dma_fence_wait(fence, true);
|
||||||
|
|
||||||
|
dma_fence_put(fence);
|
||||||
|
if (ret)
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (syncobj_desc.flags & MSM_SUBMIT_SYNCOBJ_RESET) {
|
||||||
|
syncobjs[i] =
|
||||||
|
drm_syncobj_find(file, syncobj_desc.handle);
|
||||||
|
if (!syncobjs[i]) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ret) {
|
||||||
|
for (j = 0; j <= i; ++j) {
|
||||||
|
if (syncobjs[j])
|
||||||
|
drm_syncobj_put(syncobjs[j]);
|
||||||
|
}
|
||||||
|
kfree(syncobjs);
|
||||||
|
return ERR_PTR(ret);
|
||||||
|
}
|
||||||
|
return syncobjs;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void msm_reset_syncobjs(struct drm_syncobj **syncobjs,
|
||||||
|
uint32_t nr_syncobjs)
|
||||||
|
{
|
||||||
|
uint32_t i;
|
||||||
|
|
||||||
|
for (i = 0; syncobjs && i < nr_syncobjs; ++i) {
|
||||||
|
if (syncobjs[i])
|
||||||
|
drm_syncobj_replace_fence(syncobjs[i], NULL);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
|
||||||
|
struct drm_file *file,
|
||||||
|
uint64_t syncobjs_addr,
|
||||||
|
uint32_t nr_syncobjs,
|
||||||
|
size_t syncobj_stride)
|
||||||
|
{
|
||||||
|
struct msm_submit_post_dep *post_deps;
|
||||||
|
struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
|
||||||
|
int ret = 0;
|
||||||
|
uint32_t i, j;
|
||||||
|
|
||||||
|
post_deps = kmalloc_array(nr_syncobjs, sizeof(*post_deps),
|
||||||
|
GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
|
||||||
|
if (!post_deps)
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
for (i = 0; i < nr_syncobjs; ++i) {
|
||||||
|
uint64_t address = syncobjs_addr + i * syncobj_stride;
|
||||||
|
|
||||||
|
if (copy_from_user(&syncobj_desc,
|
||||||
|
u64_to_user_ptr(address),
|
||||||
|
min(syncobj_stride, sizeof(syncobj_desc)))) {
|
||||||
|
ret = -EFAULT;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
post_deps[i].point = syncobj_desc.point;
|
||||||
|
post_deps[i].chain = NULL;
|
||||||
|
|
||||||
|
if (syncobj_desc.flags) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (syncobj_desc.point) {
|
||||||
|
if (!drm_core_check_feature(dev,
|
||||||
|
DRIVER_SYNCOBJ_TIMELINE)) {
|
||||||
|
ret = -EOPNOTSUPP;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
post_deps[i].chain =
|
||||||
|
kmalloc(sizeof(*post_deps[i].chain),
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (!post_deps[i].chain) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
post_deps[i].syncobj =
|
||||||
|
drm_syncobj_find(file, syncobj_desc.handle);
|
||||||
|
if (!post_deps[i].syncobj) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ret) {
|
||||||
|
for (j = 0; j <= i; ++j) {
|
||||||
|
kfree(post_deps[j].chain);
|
||||||
|
if (post_deps[j].syncobj)
|
||||||
|
drm_syncobj_put(post_deps[j].syncobj);
|
||||||
|
}
|
||||||
|
|
||||||
|
kfree(post_deps);
|
||||||
|
return ERR_PTR(ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
return post_deps;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void msm_process_post_deps(struct msm_submit_post_dep *post_deps,
|
||||||
|
uint32_t count, struct dma_fence *fence)
|
||||||
|
{
|
||||||
|
uint32_t i;
|
||||||
|
|
||||||
|
for (i = 0; post_deps && i < count; ++i) {
|
||||||
|
if (post_deps[i].chain) {
|
||||||
|
drm_syncobj_add_point(post_deps[i].syncobj,
|
||||||
|
post_deps[i].chain,
|
||||||
|
fence, post_deps[i].point);
|
||||||
|
post_deps[i].chain = NULL;
|
||||||
|
} else {
|
||||||
|
drm_syncobj_replace_fence(post_deps[i].syncobj,
|
||||||
|
fence);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file)
|
struct drm_file *file)
|
||||||
{
|
{
|
||||||
|
@ -403,6 +585,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
||||||
struct sync_file *sync_file = NULL;
|
struct sync_file *sync_file = NULL;
|
||||||
struct msm_gpu_submitqueue *queue;
|
struct msm_gpu_submitqueue *queue;
|
||||||
struct msm_ringbuffer *ring;
|
struct msm_ringbuffer *ring;
|
||||||
|
struct msm_submit_post_dep *post_deps = NULL;
|
||||||
|
struct drm_syncobj **syncobjs_to_reset = NULL;
|
||||||
int out_fence_fd = -1;
|
int out_fence_fd = -1;
|
||||||
struct pid *pid = get_pid(task_pid(current));
|
struct pid *pid = get_pid(task_pid(current));
|
||||||
bool has_ww_ticket = false;
|
bool has_ww_ticket = false;
|
||||||
|
@ -411,6 +595,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
||||||
if (!gpu)
|
if (!gpu)
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
|
|
||||||
|
if (args->pad)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
/* for now, we just have 3d pipe.. eventually this would need to
|
/* for now, we just have 3d pipe.. eventually this would need to
|
||||||
* be more clever to dispatch to appropriate gpu module:
|
* be more clever to dispatch to appropriate gpu module:
|
||||||
*/
|
*/
|
||||||
|
@ -458,9 +645,29 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (args->flags & MSM_SUBMIT_SYNCOBJ_IN) {
|
||||||
|
syncobjs_to_reset = msm_wait_deps(dev, file,
|
||||||
|
args->in_syncobjs,
|
||||||
|
args->nr_in_syncobjs,
|
||||||
|
args->syncobj_stride, ring);
|
||||||
|
if (IS_ERR(syncobjs_to_reset))
|
||||||
|
return PTR_ERR(syncobjs_to_reset);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (args->flags & MSM_SUBMIT_SYNCOBJ_OUT) {
|
||||||
|
post_deps = msm_parse_post_deps(dev, file,
|
||||||
|
args->out_syncobjs,
|
||||||
|
args->nr_out_syncobjs,
|
||||||
|
args->syncobj_stride);
|
||||||
|
if (IS_ERR(post_deps)) {
|
||||||
|
ret = PTR_ERR(post_deps);
|
||||||
|
goto out_post_unlock;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
goto out_post_unlock;
|
||||||
|
|
||||||
if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
|
if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
|
||||||
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
|
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
|
||||||
|
@ -587,6 +794,11 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
||||||
args->fence_fd = out_fence_fd;
|
args->fence_fd = out_fence_fd;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs);
|
||||||
|
msm_process_post_deps(post_deps, args->nr_out_syncobjs,
|
||||||
|
submit->fence);
|
||||||
|
|
||||||
|
|
||||||
out:
|
out:
|
||||||
submit_cleanup(submit);
|
submit_cleanup(submit);
|
||||||
if (has_ww_ticket)
|
if (has_ww_ticket)
|
||||||
|
@ -597,5 +809,23 @@ out_unlock:
|
||||||
if (ret && (out_fence_fd >= 0))
|
if (ret && (out_fence_fd >= 0))
|
||||||
put_unused_fd(out_fence_fd);
|
put_unused_fd(out_fence_fd);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
|
out_post_unlock:
|
||||||
|
if (!IS_ERR_OR_NULL(post_deps)) {
|
||||||
|
for (i = 0; i < args->nr_out_syncobjs; ++i) {
|
||||||
|
kfree(post_deps[i].chain);
|
||||||
|
drm_syncobj_put(post_deps[i].syncobj);
|
||||||
|
}
|
||||||
|
kfree(post_deps);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!IS_ERR_OR_NULL(syncobjs_to_reset)) {
|
||||||
|
for (i = 0; i < args->nr_in_syncobjs; ++i) {
|
||||||
|
if (syncobjs_to_reset[i])
|
||||||
|
drm_syncobj_put(syncobjs_to_reset[i]);
|
||||||
|
}
|
||||||
|
kfree(syncobjs_to_reset);
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -103,7 +103,8 @@ void msm_gem_close_vma(struct msm_gem_address_space *aspace,
|
||||||
|
|
||||||
/* Initialize a new vma and allocate an iova for it */
|
/* Initialize a new vma and allocate an iova for it */
|
||||||
int msm_gem_init_vma(struct msm_gem_address_space *aspace,
|
int msm_gem_init_vma(struct msm_gem_address_space *aspace,
|
||||||
struct msm_gem_vma *vma, int npages)
|
struct msm_gem_vma *vma, int npages,
|
||||||
|
u64 range_start, u64 range_end)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -111,7 +112,8 @@ int msm_gem_init_vma(struct msm_gem_address_space *aspace,
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
spin_lock(&aspace->lock);
|
spin_lock(&aspace->lock);
|
||||||
ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages);
|
ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node, npages, 0,
|
||||||
|
0, range_start, range_end, 0);
|
||||||
spin_unlock(&aspace->lock);
|
spin_unlock(&aspace->lock);
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -125,14 +127,14 @@ int msm_gem_init_vma(struct msm_gem_address_space *aspace,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
struct msm_gem_address_space *
|
struct msm_gem_address_space *
|
||||||
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
|
msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
|
||||||
const char *name)
|
u64 va_start, u64 size)
|
||||||
{
|
{
|
||||||
struct msm_gem_address_space *aspace;
|
struct msm_gem_address_space *aspace;
|
||||||
u64 size = domain->geometry.aperture_end -
|
|
||||||
domain->geometry.aperture_start;
|
if (IS_ERR(mmu))
|
||||||
|
return ERR_CAST(mmu);
|
||||||
|
|
||||||
aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
|
aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
|
||||||
if (!aspace)
|
if (!aspace)
|
||||||
|
@ -140,33 +142,9 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
|
||||||
|
|
||||||
spin_lock_init(&aspace->lock);
|
spin_lock_init(&aspace->lock);
|
||||||
aspace->name = name;
|
aspace->name = name;
|
||||||
aspace->mmu = msm_iommu_new(dev, domain);
|
aspace->mmu = mmu;
|
||||||
|
|
||||||
drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT),
|
drm_mm_init(&aspace->mm, va_start >> PAGE_SHIFT, size >> PAGE_SHIFT);
|
||||||
size >> PAGE_SHIFT);
|
|
||||||
|
|
||||||
kref_init(&aspace->kref);
|
|
||||||
|
|
||||||
return aspace;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct msm_gem_address_space *
|
|
||||||
msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu,
|
|
||||||
const char *name, uint64_t va_start, uint64_t va_end)
|
|
||||||
{
|
|
||||||
struct msm_gem_address_space *aspace;
|
|
||||||
u64 size = va_end - va_start;
|
|
||||||
|
|
||||||
aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
|
|
||||||
if (!aspace)
|
|
||||||
return ERR_PTR(-ENOMEM);
|
|
||||||
|
|
||||||
spin_lock_init(&aspace->lock);
|
|
||||||
aspace->name = name;
|
|
||||||
aspace->mmu = msm_gpummu_new(dev, gpu);
|
|
||||||
|
|
||||||
drm_mm_init(&aspace->mm, (va_start >> PAGE_SHIFT),
|
|
||||||
size >> PAGE_SHIFT);
|
|
||||||
|
|
||||||
kref_init(&aspace->kref);
|
kref_init(&aspace->kref);
|
||||||
|
|
||||||
|
|
|
@ -821,51 +821,6 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct msm_gem_address_space *
|
|
||||||
msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
|
|
||||||
uint64_t va_start, uint64_t va_end)
|
|
||||||
{
|
|
||||||
struct msm_gem_address_space *aspace;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Setup IOMMU.. eventually we will (I think) do this once per context
|
|
||||||
* and have separate page tables per context. For now, to keep things
|
|
||||||
* simple and to get something working, just use a single address space:
|
|
||||||
*/
|
|
||||||
if (!adreno_is_a2xx(to_adreno_gpu(gpu))) {
|
|
||||||
struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type);
|
|
||||||
if (!iommu)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
iommu->geometry.aperture_start = va_start;
|
|
||||||
iommu->geometry.aperture_end = va_end;
|
|
||||||
|
|
||||||
DRM_DEV_INFO(gpu->dev->dev, "%s: using IOMMU\n", gpu->name);
|
|
||||||
|
|
||||||
aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu");
|
|
||||||
if (IS_ERR(aspace))
|
|
||||||
iommu_domain_free(iommu);
|
|
||||||
} else {
|
|
||||||
aspace = msm_gem_address_space_create_a2xx(&pdev->dev, gpu, "gpu",
|
|
||||||
va_start, va_end);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (IS_ERR(aspace)) {
|
|
||||||
DRM_DEV_ERROR(gpu->dev->dev, "failed to init mmu: %ld\n",
|
|
||||||
PTR_ERR(aspace));
|
|
||||||
return ERR_CAST(aspace);
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = aspace->mmu->funcs->attach(aspace->mmu);
|
|
||||||
if (ret) {
|
|
||||||
msm_gem_address_space_put(aspace);
|
|
||||||
return ERR_PTR(ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
return aspace;
|
|
||||||
}
|
|
||||||
|
|
||||||
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||||
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
|
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
|
||||||
const char *name, struct msm_gpu_config *config)
|
const char *name, struct msm_gpu_config *config)
|
||||||
|
@ -938,8 +893,8 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||||
|
|
||||||
msm_devfreq_init(gpu);
|
msm_devfreq_init(gpu);
|
||||||
|
|
||||||
gpu->aspace = msm_gpu_create_address_space(gpu, pdev,
|
|
||||||
config->va_start, config->va_end);
|
gpu->aspace = gpu->funcs->create_address_space(gpu, pdev);
|
||||||
|
|
||||||
if (gpu->aspace == NULL)
|
if (gpu->aspace == NULL)
|
||||||
DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
|
DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
|
||||||
|
|
|
@ -21,8 +21,6 @@ struct msm_gpu_state;
|
||||||
|
|
||||||
struct msm_gpu_config {
|
struct msm_gpu_config {
|
||||||
const char *ioname;
|
const char *ioname;
|
||||||
uint64_t va_start;
|
|
||||||
uint64_t va_end;
|
|
||||||
unsigned int nr_rings;
|
unsigned int nr_rings;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -64,6 +62,8 @@ struct msm_gpu_funcs {
|
||||||
int (*gpu_state_put)(struct msm_gpu_state *state);
|
int (*gpu_state_put)(struct msm_gpu_state *state);
|
||||||
unsigned long (*gpu_get_freq)(struct msm_gpu *gpu);
|
unsigned long (*gpu_get_freq)(struct msm_gpu *gpu);
|
||||||
void (*gpu_set_freq)(struct msm_gpu *gpu, unsigned long freq);
|
void (*gpu_set_freq)(struct msm_gpu *gpu, unsigned long freq);
|
||||||
|
struct msm_gem_address_space *(*create_address_space)
|
||||||
|
(struct msm_gpu *gpu, struct platform_device *pdev);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct msm_gpu {
|
struct msm_gpu {
|
||||||
|
|
|
@ -21,17 +21,12 @@ struct msm_gpummu {
|
||||||
#define GPUMMU_PAGE_SIZE SZ_4K
|
#define GPUMMU_PAGE_SIZE SZ_4K
|
||||||
#define TABLE_SIZE (sizeof(uint32_t) * GPUMMU_VA_RANGE / GPUMMU_PAGE_SIZE)
|
#define TABLE_SIZE (sizeof(uint32_t) * GPUMMU_VA_RANGE / GPUMMU_PAGE_SIZE)
|
||||||
|
|
||||||
static int msm_gpummu_attach(struct msm_mmu *mmu)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void msm_gpummu_detach(struct msm_mmu *mmu)
|
static void msm_gpummu_detach(struct msm_mmu *mmu)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
|
static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
|
||||||
struct sg_table *sgt, unsigned len, int prot)
|
struct sg_table *sgt, size_t len, int prot)
|
||||||
{
|
{
|
||||||
struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
|
struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
|
||||||
unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
|
unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
|
||||||
|
@ -59,7 +54,7 @@ static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len)
|
static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
|
||||||
{
|
{
|
||||||
struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
|
struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
|
||||||
unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
|
unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
|
||||||
|
@ -85,7 +80,6 @@ static void msm_gpummu_destroy(struct msm_mmu *mmu)
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct msm_mmu_funcs funcs = {
|
static const struct msm_mmu_funcs funcs = {
|
||||||
.attach = msm_gpummu_attach,
|
|
||||||
.detach = msm_gpummu_detach,
|
.detach = msm_gpummu_detach,
|
||||||
.map = msm_gpummu_map,
|
.map = msm_gpummu_map,
|
||||||
.unmap = msm_gpummu_unmap,
|
.unmap = msm_gpummu_unmap,
|
||||||
|
|
|
@ -23,13 +23,6 @@ static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int msm_iommu_attach(struct msm_mmu *mmu)
|
|
||||||
{
|
|
||||||
struct msm_iommu *iommu = to_msm_iommu(mmu);
|
|
||||||
|
|
||||||
return iommu_attach_device(iommu->domain, mmu->dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void msm_iommu_detach(struct msm_mmu *mmu)
|
static void msm_iommu_detach(struct msm_mmu *mmu)
|
||||||
{
|
{
|
||||||
struct msm_iommu *iommu = to_msm_iommu(mmu);
|
struct msm_iommu *iommu = to_msm_iommu(mmu);
|
||||||
|
@ -38,7 +31,7 @@ static void msm_iommu_detach(struct msm_mmu *mmu)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
|
static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
|
||||||
struct sg_table *sgt, unsigned len, int prot)
|
struct sg_table *sgt, size_t len, int prot)
|
||||||
{
|
{
|
||||||
struct msm_iommu *iommu = to_msm_iommu(mmu);
|
struct msm_iommu *iommu = to_msm_iommu(mmu);
|
||||||
size_t ret;
|
size_t ret;
|
||||||
|
@ -49,7 +42,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
|
||||||
return (ret == len) ? 0 : -EINVAL;
|
return (ret == len) ? 0 : -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len)
|
static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
|
||||||
{
|
{
|
||||||
struct msm_iommu *iommu = to_msm_iommu(mmu);
|
struct msm_iommu *iommu = to_msm_iommu(mmu);
|
||||||
|
|
||||||
|
@ -66,7 +59,6 @@ static void msm_iommu_destroy(struct msm_mmu *mmu)
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct msm_mmu_funcs funcs = {
|
static const struct msm_mmu_funcs funcs = {
|
||||||
.attach = msm_iommu_attach,
|
|
||||||
.detach = msm_iommu_detach,
|
.detach = msm_iommu_detach,
|
||||||
.map = msm_iommu_map,
|
.map = msm_iommu_map,
|
||||||
.unmap = msm_iommu_unmap,
|
.unmap = msm_iommu_unmap,
|
||||||
|
@ -76,6 +68,10 @@ static const struct msm_mmu_funcs funcs = {
|
||||||
struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
|
struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
|
||||||
{
|
{
|
||||||
struct msm_iommu *iommu;
|
struct msm_iommu *iommu;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (!domain)
|
||||||
|
return ERR_PTR(-ENODEV);
|
||||||
|
|
||||||
iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
|
iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
|
||||||
if (!iommu)
|
if (!iommu)
|
||||||
|
@ -85,5 +81,11 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
|
||||||
msm_mmu_init(&iommu->base, dev, &funcs);
|
msm_mmu_init(&iommu->base, dev, &funcs);
|
||||||
iommu_set_fault_handler(domain, msm_fault_handler, iommu);
|
iommu_set_fault_handler(domain, msm_fault_handler, iommu);
|
||||||
|
|
||||||
|
ret = iommu_attach_device(iommu->domain, dev);
|
||||||
|
if (ret) {
|
||||||
|
kfree(iommu);
|
||||||
|
return ERR_PTR(ret);
|
||||||
|
}
|
||||||
|
|
||||||
return &iommu->base;
|
return &iommu->base;
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,11 +10,10 @@
|
||||||
#include <linux/iommu.h>
|
#include <linux/iommu.h>
|
||||||
|
|
||||||
struct msm_mmu_funcs {
|
struct msm_mmu_funcs {
|
||||||
int (*attach)(struct msm_mmu *mmu);
|
|
||||||
void (*detach)(struct msm_mmu *mmu);
|
void (*detach)(struct msm_mmu *mmu);
|
||||||
int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
|
int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
|
||||||
unsigned len, int prot);
|
size_t len, int prot);
|
||||||
int (*unmap)(struct msm_mmu *mmu, uint64_t iova, unsigned len);
|
int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len);
|
||||||
void (*destroy)(struct msm_mmu *mmu);
|
void (*destroy)(struct msm_mmu *mmu);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -29,8 +29,6 @@
|
||||||
* or shader programs (if not emitted inline in cmdstream).
|
* or shader programs (if not emitted inline in cmdstream).
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_FS
|
|
||||||
|
|
||||||
#include <linux/circ_buf.h>
|
#include <linux/circ_buf.h>
|
||||||
#include <linux/debugfs.h>
|
#include <linux/debugfs.h>
|
||||||
#include <linux/kfifo.h>
|
#include <linux/kfifo.h>
|
||||||
|
@ -47,6 +45,8 @@ bool rd_full = false;
|
||||||
MODULE_PARM_DESC(rd_full, "If true, $debugfs/.../rd will snapshot all buffer contents");
|
MODULE_PARM_DESC(rd_full, "If true, $debugfs/.../rd will snapshot all buffer contents");
|
||||||
module_param_named(rd_full, rd_full, bool, 0600);
|
module_param_named(rd_full, rd_full, bool, 0600);
|
||||||
|
|
||||||
|
#ifdef CONFIG_DEBUG_FS
|
||||||
|
|
||||||
enum rd_sect_type {
|
enum rd_sect_type {
|
||||||
RD_NONE,
|
RD_NONE,
|
||||||
RD_TEST, /* ascii text */
|
RD_TEST, /* ascii text */
|
||||||
|
|
|
@ -217,13 +217,28 @@ struct drm_msm_gem_submit_bo {
|
||||||
#define MSM_SUBMIT_FENCE_FD_IN 0x40000000 /* enable input fence_fd */
|
#define MSM_SUBMIT_FENCE_FD_IN 0x40000000 /* enable input fence_fd */
|
||||||
#define MSM_SUBMIT_FENCE_FD_OUT 0x20000000 /* enable output fence_fd */
|
#define MSM_SUBMIT_FENCE_FD_OUT 0x20000000 /* enable output fence_fd */
|
||||||
#define MSM_SUBMIT_SUDO 0x10000000 /* run submitted cmds from RB */
|
#define MSM_SUBMIT_SUDO 0x10000000 /* run submitted cmds from RB */
|
||||||
|
#define MSM_SUBMIT_SYNCOBJ_IN 0x08000000 /* enable input syncobj */
|
||||||
|
#define MSM_SUBMIT_SYNCOBJ_OUT 0x04000000 /* enable output syncobj */
|
||||||
#define MSM_SUBMIT_FLAGS ( \
|
#define MSM_SUBMIT_FLAGS ( \
|
||||||
MSM_SUBMIT_NO_IMPLICIT | \
|
MSM_SUBMIT_NO_IMPLICIT | \
|
||||||
MSM_SUBMIT_FENCE_FD_IN | \
|
MSM_SUBMIT_FENCE_FD_IN | \
|
||||||
MSM_SUBMIT_FENCE_FD_OUT | \
|
MSM_SUBMIT_FENCE_FD_OUT | \
|
||||||
MSM_SUBMIT_SUDO | \
|
MSM_SUBMIT_SUDO | \
|
||||||
|
MSM_SUBMIT_SYNCOBJ_IN | \
|
||||||
|
MSM_SUBMIT_SYNCOBJ_OUT | \
|
||||||
0)
|
0)
|
||||||
|
|
||||||
|
#define MSM_SUBMIT_SYNCOBJ_RESET 0x00000001 /* Reset syncobj after wait. */
|
||||||
|
#define MSM_SUBMIT_SYNCOBJ_FLAGS ( \
|
||||||
|
MSM_SUBMIT_SYNCOBJ_RESET | \
|
||||||
|
0)
|
||||||
|
|
||||||
|
struct drm_msm_gem_submit_syncobj {
|
||||||
|
__u32 handle; /* in, syncobj handle. */
|
||||||
|
__u32 flags; /* in, from MSM_SUBMIT_SYNCOBJ_FLAGS */
|
||||||
|
__u64 point; /* in, timepoint for timeline syncobjs. */
|
||||||
|
};
|
||||||
|
|
||||||
/* Each cmdstream submit consists of a table of buffers involved, and
|
/* Each cmdstream submit consists of a table of buffers involved, and
|
||||||
* one or more cmdstream buffers. This allows for conditional execution
|
* one or more cmdstream buffers. This allows for conditional execution
|
||||||
* (context-restore), and IB buffers needed for per tile/bin draw cmds.
|
* (context-restore), and IB buffers needed for per tile/bin draw cmds.
|
||||||
|
@ -236,7 +251,14 @@ struct drm_msm_gem_submit {
|
||||||
__u64 bos; /* in, ptr to array of submit_bo's */
|
__u64 bos; /* in, ptr to array of submit_bo's */
|
||||||
__u64 cmds; /* in, ptr to array of submit_cmd's */
|
__u64 cmds; /* in, ptr to array of submit_cmd's */
|
||||||
__s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
|
__s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
|
||||||
__u32 queueid; /* in, submitqueue id */
|
__u32 queueid; /* in, submitqueue id */
|
||||||
|
__u64 in_syncobjs; /* in, ptr to to array of drm_msm_gem_submit_syncobj */
|
||||||
|
__u64 out_syncobjs; /* in, ptr to to array of drm_msm_gem_submit_syncobj */
|
||||||
|
__u32 nr_in_syncobjs; /* in, number of entries in in_syncobj */
|
||||||
|
__u32 nr_out_syncobjs; /* in, number of entries in out_syncobj. */
|
||||||
|
__u32 syncobj_stride; /* in, stride of syncobj arrays. */
|
||||||
|
__u32 pad; /*in, reserved for future use, always 0. */
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* The normal way to synchronize with the GPU is just to CPU_PREP on
|
/* The normal way to synchronize with the GPU is just to CPU_PREP on
|
||||||
|
|
Loading…
Reference in New Issue