drm fixes for 5.13-rc3
dma-buf: - WARN fix amdgpu: - Fix downscaling ratio on DCN3.x - Fix for non-4K pages - PCO/RV compute hang fix - Dongle fix - Aldebaran codec query support - Refcount leak fix - Use after free fix - Navi12 golden settings updates - GPU reset fixes radeon: - Fix for imported BO handling i915: - Pin the L-shape quirked object as unshrinkable to fix crashes - Disable HiZ Raw Stall Optimization on broken gen7 to fix glitches, gfx corruption - GVT: Move mdev attribute groups into kvmgt module to fix kconfig deps issue exynos: - Correct kerneldoc of fimd_shadow_protect_win function. - Drop redundant error messages. -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmCnNz0ACgkQDHTzWXnE hr5oCw//Ux2+zc7aBvEdotgJo6GW17ymvrIpJSJ2sdUAq3xxvYgMOpSaWJPXW1RF wLtbdxbXOGEbGI59uBMPNrCJ4ukJbAiL7J4LIa/531y1DA0Kwfz0bzz3G2uqRzCG hbxLrLG8pUZ4GbzStjrfRCRDlNywShGtuDxznnouF30FitBgQUsjeLxM7/oKfZZj kq3kJkEBbDw+nQ5RVAjm1KdUqa8AGnAKLH+7GkzPiy/ZuwZpEqAlZfVFLnxANaz4 Vij/IYUDPBBeeQTtPwHJUSZgwFiJTex/vDXopSu0zt7GKmh7WguKhx0E5ccQAZdb 6k8BobLno4yTDwQXWFTvos4lHm2MKPNYa1cops7cgGcB+sLtMfG7BSJ7GsxSSUYh 2q+POHDym75M2SvFq4B+r0RPYNWj4W3FCvVFX4bhuwtRv2OfTw+enYnTrcBVodPF 1K6447IDSvMrYUo2e6rRn/OTldaOsIQEIk5k5xj1a+AOVMIUt2SUTOwIhwGCEeaY HVfwbkkqcfjPFX6wqqO7+TTlK5x+aj8G2h4ZJFIiftnsfhdw5DLsiOthjypxlb/l zQjl0mGi7QaIYnhS1FTby5YDW7mmFKKoPaA7gFIzgrALG7zDoomxzbRbrFEwGanS ufLA5JXY2/1iff6O4IxmeWLuBwXbcColgLU5L/0bCI6DtsaGl9M= =EJFf -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2021-05-21-1' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "Usual collection, mostly amdgpu and some i915 regression fixes. I nearly managed to hose my build/sign machine this week, but I recovered it just in time, and I even got clang12 built. dma-buf: - WARN fix amdgpu: - Fix downscaling ratio on DCN3.x - Fix for non-4K pages - PCO/RV compute hang fix - Dongle fix - Aldebaran codec query support - Refcount leak fix - Use after free fix - Navi12 golden settings updates - GPU reset fixes radeon: - Fix for imported BO handling i915: - Pin the L-shape quirked object as unshrinkable to fix crashes - Disable HiZ Raw Stall Optimization on broken gen7 to fix glitches, gfx corruption - GVT: Move mdev attribute groups into kvmgt module to fix kconfig deps issue exynos: - Correct kerneldoc of fimd_shadow_protect_win function - Drop redundant error messages" * tag 'drm-fixes-2021-05-21-1' of git://anongit.freedesktop.org/drm/drm: dma-buf: fix unintended pin/unpin warnings drm/amdgpu: stop touching sched.ready in the backend drm/amd/amdgpu: fix a potential deadlock in gpu reset drm/amdgpu: update sdma golden setting for Navi12 drm/amdgpu: update gc golden setting for Navi12 drm/amdgpu: Fix a use-after-free drm/amdgpu: add video_codecs query support for aldebaran drm/amd/amdgpu: fix refcount leak drm/amd/display: Disconnect non-DP with no EDID drm/amdgpu: disable 3DCGCG on picasso/raven1 to avoid compute hang drm/amdgpu: Fix GPU TLB update error when PAGE_SIZE > AMDGPU_PAGE_SIZE drm/radeon: use the dummy page for GART if needed drm/amd/display: Use the correct max downscaling value for DCN3.x family drm/i915/gt: Disable HiZ Raw Stall Optimization on broken gen7 drm/i915/gem: Pin the L-shape quirked object as unshrinkable drm/exynos/decon5433: Remove redundant error printing in exynos5433_decon_probe() drm/exynos: Remove redundant error printing in exynos_dsi_probe() drm/exynos: correct exynos_drm_fimd kerneldoc drm/i915/gvt: Move mdev attribute groups into kvmgt module
This commit is contained in:
commit
79a106fc65
|
@ -760,7 +760,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
|
||||||
|
|
||||||
if (dma_buf_is_dynamic(attach->dmabuf)) {
|
if (dma_buf_is_dynamic(attach->dmabuf)) {
|
||||||
dma_resv_lock(attach->dmabuf->resv, NULL);
|
dma_resv_lock(attach->dmabuf->resv, NULL);
|
||||||
ret = dma_buf_pin(attach);
|
ret = dmabuf->ops->pin(attach);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_unlock;
|
goto err_unlock;
|
||||||
}
|
}
|
||||||
|
@ -786,7 +786,7 @@ err_attach:
|
||||||
|
|
||||||
err_unpin:
|
err_unpin:
|
||||||
if (dma_buf_is_dynamic(attach->dmabuf))
|
if (dma_buf_is_dynamic(attach->dmabuf))
|
||||||
dma_buf_unpin(attach);
|
dmabuf->ops->unpin(attach);
|
||||||
|
|
||||||
err_unlock:
|
err_unlock:
|
||||||
if (dma_buf_is_dynamic(attach->dmabuf))
|
if (dma_buf_is_dynamic(attach->dmabuf))
|
||||||
|
@ -843,7 +843,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
|
||||||
__unmap_dma_buf(attach, attach->sgt, attach->dir);
|
__unmap_dma_buf(attach, attach->sgt, attach->dir);
|
||||||
|
|
||||||
if (dma_buf_is_dynamic(attach->dmabuf)) {
|
if (dma_buf_is_dynamic(attach->dmabuf)) {
|
||||||
dma_buf_unpin(attach);
|
dmabuf->ops->unpin(attach);
|
||||||
dma_resv_unlock(attach->dmabuf->resv);
|
dma_resv_unlock(attach->dmabuf->resv);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -956,7 +956,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
|
||||||
if (dma_buf_is_dynamic(attach->dmabuf)) {
|
if (dma_buf_is_dynamic(attach->dmabuf)) {
|
||||||
dma_resv_assert_held(attach->dmabuf->resv);
|
dma_resv_assert_held(attach->dmabuf->resv);
|
||||||
if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
|
if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
|
||||||
r = dma_buf_pin(attach);
|
r = attach->dmabuf->ops->pin(attach);
|
||||||
if (r)
|
if (r)
|
||||||
return ERR_PTR(r);
|
return ERR_PTR(r);
|
||||||
}
|
}
|
||||||
|
@ -968,7 +968,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
|
||||||
|
|
||||||
if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
|
if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
|
||||||
!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
|
!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
|
||||||
dma_buf_unpin(attach);
|
attach->dmabuf->ops->unpin(attach);
|
||||||
|
|
||||||
if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
|
if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
|
||||||
attach->sgt = sg_table;
|
attach->sgt = sg_table;
|
||||||
|
|
|
@ -4479,7 +4479,6 @@ out:
|
||||||
r = amdgpu_ib_ring_tests(tmp_adev);
|
r = amdgpu_ib_ring_tests(tmp_adev);
|
||||||
if (r) {
|
if (r) {
|
||||||
dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
|
dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
|
||||||
r = amdgpu_device_ip_suspend(tmp_adev);
|
|
||||||
need_full_reset = true;
|
need_full_reset = true;
|
||||||
r = -EAGAIN;
|
r = -EAGAIN;
|
||||||
goto end;
|
goto end;
|
||||||
|
|
|
@ -288,10 +288,13 @@ out:
|
||||||
static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev)
|
static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev)
|
||||||
{
|
{
|
||||||
struct amdgpu_framebuffer *rfb = &rfbdev->rfb;
|
struct amdgpu_framebuffer *rfb = &rfbdev->rfb;
|
||||||
|
int i;
|
||||||
|
|
||||||
drm_fb_helper_unregister_fbi(&rfbdev->helper);
|
drm_fb_helper_unregister_fbi(&rfbdev->helper);
|
||||||
|
|
||||||
if (rfb->base.obj[0]) {
|
if (rfb->base.obj[0]) {
|
||||||
|
for (i = 0; i < rfb->base.format->num_planes; i++)
|
||||||
|
drm_gem_object_put(rfb->base.obj[0]);
|
||||||
amdgpufb_destroy_pinned_object(rfb->base.obj[0]);
|
amdgpufb_destroy_pinned_object(rfb->base.obj[0]);
|
||||||
rfb->base.obj[0] = NULL;
|
rfb->base.obj[0] = NULL;
|
||||||
drm_framebuffer_unregister_private(&rfb->base);
|
drm_framebuffer_unregister_private(&rfb->base);
|
||||||
|
|
|
@ -225,7 +225,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
|
||||||
*addr += mm_cur->start & ~PAGE_MASK;
|
*addr += mm_cur->start & ~PAGE_MASK;
|
||||||
|
|
||||||
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
|
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
|
||||||
num_bytes = num_pages * 8;
|
num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
|
||||||
|
|
||||||
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
|
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
|
||||||
AMDGPU_IB_POOL_DELAYED, &job);
|
AMDGPU_IB_POOL_DELAYED, &job);
|
||||||
|
@ -1210,6 +1210,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
|
||||||
if (gtt && gtt->userptr) {
|
if (gtt && gtt->userptr) {
|
||||||
amdgpu_ttm_tt_set_user_pages(ttm, NULL);
|
amdgpu_ttm_tt_set_user_pages(ttm, NULL);
|
||||||
kfree(ttm->sg);
|
kfree(ttm->sg);
|
||||||
|
ttm->sg = NULL;
|
||||||
ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
|
ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1395,9 +1395,10 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04800000),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04900000),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204),
|
||||||
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1800ff, 0x00000044),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x00007fff, 0x000001fe),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x00007fff, 0x000001fe),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
|
||||||
|
@ -1415,12 +1416,13 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000820, 0x00000820),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000820, 0x00000820),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
|
||||||
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x001f0000, 0x00070104),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0xffdf80ff, 0x479c0010),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0xffdf80ff, 0x479c0010),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00800000)
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00c00000)
|
||||||
};
|
};
|
||||||
|
|
||||||
static bool gfx_v10_is_rlcg_rw(struct amdgpu_device *adev, u32 offset, uint32_t *flag, bool write)
|
static bool gfx_v10_is_rlcg_rw(struct amdgpu_device *adev, u32 offset, uint32_t *flag, bool write)
|
||||||
|
|
|
@ -4943,7 +4943,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
|
||||||
amdgpu_gfx_rlc_enter_safe_mode(adev);
|
amdgpu_gfx_rlc_enter_safe_mode(adev);
|
||||||
|
|
||||||
/* Enable 3D CGCG/CGLS */
|
/* Enable 3D CGCG/CGLS */
|
||||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
|
if (enable) {
|
||||||
/* write cmd to clear cgcg/cgls ov */
|
/* write cmd to clear cgcg/cgls ov */
|
||||||
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
|
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
|
||||||
/* unset CGCG override */
|
/* unset CGCG override */
|
||||||
|
@ -4955,8 +4955,12 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
|
||||||
/* enable 3Dcgcg FSM(0x0000363f) */
|
/* enable 3Dcgcg FSM(0x0000363f) */
|
||||||
def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
|
def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
|
||||||
|
|
||||||
|
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
|
||||||
data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
|
data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
|
||||||
RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
|
RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
|
||||||
|
else
|
||||||
|
data = 0x0 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT;
|
||||||
|
|
||||||
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
|
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
|
||||||
data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
|
data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
|
||||||
RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
|
RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
|
||||||
|
|
|
@ -198,8 +198,6 @@ static int jpeg_v2_5_hw_fini(void *handle)
|
||||||
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
|
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
|
||||||
RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS))
|
RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS))
|
||||||
jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
|
jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
|
||||||
|
|
||||||
ring->sched.ready = false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -166,8 +166,6 @@ static int jpeg_v3_0_hw_fini(void *handle)
|
||||||
RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
|
RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
|
||||||
jpeg_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
|
jpeg_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
|
||||||
|
|
||||||
ring->sched.ready = false;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -123,6 +123,10 @@ static const struct soc15_reg_golden golden_settings_sdma_nv14[] = {
|
||||||
|
|
||||||
static const struct soc15_reg_golden golden_settings_sdma_nv12[] = {
|
static const struct soc15_reg_golden golden_settings_sdma_nv12[] = {
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||||
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
|
||||||
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
|
||||||
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
|
||||||
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
|
||||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -497,11 +497,6 @@ static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev)
|
||||||
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
|
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
|
||||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
|
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
|
||||||
}
|
}
|
||||||
|
|
||||||
sdma0->sched.ready = false;
|
|
||||||
sdma1->sched.ready = false;
|
|
||||||
sdma2->sched.ready = false;
|
|
||||||
sdma3->sched.ready = false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -302,6 +302,7 @@ static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode,
|
||||||
*codecs = &rv_video_codecs_decode;
|
*codecs = &rv_video_codecs_decode;
|
||||||
return 0;
|
return 0;
|
||||||
case CHIP_ARCTURUS:
|
case CHIP_ARCTURUS:
|
||||||
|
case CHIP_ALDEBARAN:
|
||||||
case CHIP_RENOIR:
|
case CHIP_RENOIR:
|
||||||
if (encode)
|
if (encode)
|
||||||
*codecs = &vega_video_codecs_encode;
|
*codecs = &vega_video_codecs_encode;
|
||||||
|
@ -1392,7 +1393,6 @@ static int soc15_common_early_init(void *handle)
|
||||||
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
|
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
|
||||||
AMD_CG_SUPPORT_GFX_MGLS |
|
AMD_CG_SUPPORT_GFX_MGLS |
|
||||||
AMD_CG_SUPPORT_GFX_CP_LS |
|
AMD_CG_SUPPORT_GFX_CP_LS |
|
||||||
AMD_CG_SUPPORT_GFX_3D_CGCG |
|
|
||||||
AMD_CG_SUPPORT_GFX_3D_CGLS |
|
AMD_CG_SUPPORT_GFX_3D_CGLS |
|
||||||
AMD_CG_SUPPORT_GFX_CGCG |
|
AMD_CG_SUPPORT_GFX_CGCG |
|
||||||
AMD_CG_SUPPORT_GFX_CGLS |
|
AMD_CG_SUPPORT_GFX_CGLS |
|
||||||
|
@ -1412,7 +1412,6 @@ static int soc15_common_early_init(void *handle)
|
||||||
AMD_CG_SUPPORT_GFX_MGLS |
|
AMD_CG_SUPPORT_GFX_MGLS |
|
||||||
AMD_CG_SUPPORT_GFX_RLC_LS |
|
AMD_CG_SUPPORT_GFX_RLC_LS |
|
||||||
AMD_CG_SUPPORT_GFX_CP_LS |
|
AMD_CG_SUPPORT_GFX_CP_LS |
|
||||||
AMD_CG_SUPPORT_GFX_3D_CGCG |
|
|
||||||
AMD_CG_SUPPORT_GFX_3D_CGLS |
|
AMD_CG_SUPPORT_GFX_3D_CGLS |
|
||||||
AMD_CG_SUPPORT_GFX_CGCG |
|
AMD_CG_SUPPORT_GFX_CGCG |
|
||||||
AMD_CG_SUPPORT_GFX_CGLS |
|
AMD_CG_SUPPORT_GFX_CGLS |
|
||||||
|
|
|
@ -373,7 +373,7 @@ static int vcn_v3_0_hw_fini(void *handle)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct amdgpu_ring *ring;
|
struct amdgpu_ring *ring;
|
||||||
int i, j;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
|
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
|
||||||
if (adev->vcn.harvest_config & (1 << i))
|
if (adev->vcn.harvest_config & (1 << i))
|
||||||
|
@ -388,12 +388,6 @@ static int vcn_v3_0_hw_fini(void *handle)
|
||||||
vcn_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
|
vcn_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ring->sched.ready = false;
|
|
||||||
|
|
||||||
for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
|
|
||||||
ring = &adev->vcn.inst[i].ring_enc[j];
|
|
||||||
ring->sched.ready = false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -1076,6 +1076,24 @@ static bool dc_link_detect_helper(struct dc_link *link,
|
||||||
dc_is_dvi_signal(link->connector_signal)) {
|
dc_is_dvi_signal(link->connector_signal)) {
|
||||||
if (prev_sink)
|
if (prev_sink)
|
||||||
dc_sink_release(prev_sink);
|
dc_sink_release(prev_sink);
|
||||||
|
link_disconnect_sink(link);
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* Abort detection for DP connectors if we have
|
||||||
|
* no EDID and connector is active converter
|
||||||
|
* as there are no display downstream
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
if (dc_is_dp_sst_signal(link->connector_signal) &&
|
||||||
|
(link->dpcd_caps.dongle_type ==
|
||||||
|
DISPLAY_DONGLE_DP_VGA_CONVERTER ||
|
||||||
|
link->dpcd_caps.dongle_type ==
|
||||||
|
DISPLAY_DONGLE_DP_DVI_CONVERTER)) {
|
||||||
|
if (prev_sink)
|
||||||
|
dc_sink_release(prev_sink);
|
||||||
|
link_disconnect_sink(link);
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -826,10 +826,11 @@ static const struct dc_plane_cap plane_cap = {
|
||||||
.fp16 = 16000
|
.fp16 = 16000
|
||||||
},
|
},
|
||||||
|
|
||||||
|
/* 6:1 downscaling ratio: 1000/6 = 166.666 */
|
||||||
.max_downscale_factor = {
|
.max_downscale_factor = {
|
||||||
.argb8888 = 600,
|
.argb8888 = 167,
|
||||||
.nv12 = 600,
|
.nv12 = 167,
|
||||||
.fp16 = 600
|
.fp16 = 167
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -843,10 +843,11 @@ static const struct dc_plane_cap plane_cap = {
|
||||||
.fp16 = 16000
|
.fp16 = 16000
|
||||||
},
|
},
|
||||||
|
|
||||||
|
/* 6:1 downscaling ratio: 1000/6 = 166.666 */
|
||||||
.max_downscale_factor = {
|
.max_downscale_factor = {
|
||||||
.argb8888 = 600,
|
.argb8888 = 167,
|
||||||
.nv12 = 600,
|
.nv12 = 167,
|
||||||
.fp16 = 600
|
.fp16 = 167
|
||||||
},
|
},
|
||||||
64,
|
64,
|
||||||
64
|
64
|
||||||
|
|
|
@ -284,10 +284,11 @@ static const struct dc_plane_cap plane_cap = {
|
||||||
.nv12 = 16000,
|
.nv12 = 16000,
|
||||||
.fp16 = 16000
|
.fp16 = 16000
|
||||||
},
|
},
|
||||||
|
/* 6:1 downscaling ratio: 1000/6 = 166.666 */
|
||||||
.max_downscale_factor = {
|
.max_downscale_factor = {
|
||||||
.argb8888 = 600,
|
.argb8888 = 167,
|
||||||
.nv12 = 600,
|
.nv12 = 167,
|
||||||
.fp16 = 600
|
.fp16 = 167
|
||||||
},
|
},
|
||||||
16,
|
16,
|
||||||
16
|
16
|
||||||
|
|
|
@ -815,10 +815,8 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||||
ctx->addr = devm_ioremap_resource(dev, res);
|
ctx->addr = devm_ioremap_resource(dev, res);
|
||||||
if (IS_ERR(ctx->addr)) {
|
if (IS_ERR(ctx->addr))
|
||||||
dev_err(dev, "ioremap failed\n");
|
|
||||||
return PTR_ERR(ctx->addr);
|
return PTR_ERR(ctx->addr);
|
||||||
}
|
|
||||||
|
|
||||||
ret = decon_conf_irq(ctx, "vsync", decon_irq_handler, 0);
|
ret = decon_conf_irq(ctx, "vsync", decon_irq_handler, 0);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
|
|
|
@ -1786,10 +1786,8 @@ static int exynos_dsi_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||||
dsi->reg_base = devm_ioremap_resource(dev, res);
|
dsi->reg_base = devm_ioremap_resource(dev, res);
|
||||||
if (IS_ERR(dsi->reg_base)) {
|
if (IS_ERR(dsi->reg_base))
|
||||||
dev_err(dev, "failed to remap io region\n");
|
|
||||||
return PTR_ERR(dsi->reg_base);
|
return PTR_ERR(dsi->reg_base);
|
||||||
}
|
|
||||||
|
|
||||||
dsi->phy = devm_phy_get(dev, "dsim");
|
dsi->phy = devm_phy_get(dev, "dsim");
|
||||||
if (IS_ERR(dsi->phy)) {
|
if (IS_ERR(dsi->phy)) {
|
||||||
|
|
|
@ -723,7 +723,7 @@ static void fimd_win_set_colkey(struct fimd_context *ctx, unsigned int win)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* shadow_protect_win() - disable updating values from shadow registers at vsync
|
* fimd_shadow_protect_win() - disable updating values from shadow registers at vsync
|
||||||
*
|
*
|
||||||
* @ctx: local driver data
|
* @ctx: local driver data
|
||||||
* @win: window to protect registers for
|
* @win: window to protect registers for
|
||||||
|
|
|
@ -102,7 +102,6 @@ config DRM_I915_GVT
|
||||||
bool "Enable Intel GVT-g graphics virtualization host support"
|
bool "Enable Intel GVT-g graphics virtualization host support"
|
||||||
depends on DRM_I915
|
depends on DRM_I915
|
||||||
depends on 64BIT
|
depends on 64BIT
|
||||||
depends on VFIO_MDEV=y || VFIO_MDEV=DRM_I915
|
|
||||||
default n
|
default n
|
||||||
help
|
help
|
||||||
Choose this option if you want to enable Intel GVT-g graphics
|
Choose this option if you want to enable Intel GVT-g graphics
|
||||||
|
|
|
@ -63,6 +63,8 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
|
||||||
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
|
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
|
||||||
GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
|
GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
|
||||||
i915_gem_object_set_tiling_quirk(obj);
|
i915_gem_object_set_tiling_quirk(obj);
|
||||||
|
GEM_BUG_ON(!list_empty(&obj->mm.link));
|
||||||
|
atomic_inc(&obj->mm.shrink_pin);
|
||||||
shrinkable = false;
|
shrinkable = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -397,7 +397,10 @@ static void emit_batch(struct i915_vma * const vma,
|
||||||
gen7_emit_pipeline_invalidate(&cmds);
|
gen7_emit_pipeline_invalidate(&cmds);
|
||||||
batch_add(&cmds, MI_LOAD_REGISTER_IMM(2));
|
batch_add(&cmds, MI_LOAD_REGISTER_IMM(2));
|
||||||
batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7));
|
batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7));
|
||||||
batch_add(&cmds, 0xffff0000);
|
batch_add(&cmds, 0xffff0000 |
|
||||||
|
((IS_IVB_GT1(i915) || IS_VALLEYVIEW(i915)) ?
|
||||||
|
HIZ_RAW_STALL_OPT_DISABLE :
|
||||||
|
0));
|
||||||
batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1));
|
batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1));
|
||||||
batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
|
batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
|
||||||
gen7_emit_pipeline_invalidate(&cmds);
|
gen7_emit_pipeline_invalidate(&cmds);
|
||||||
|
|
|
@ -46,118 +46,6 @@ static const char * const supported_hypervisors[] = {
|
||||||
[INTEL_GVT_HYPERVISOR_KVM] = "KVM",
|
[INTEL_GVT_HYPERVISOR_KVM] = "KVM",
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct intel_vgpu_type *
|
|
||||||
intel_gvt_find_vgpu_type(struct intel_gvt *gvt, unsigned int type_group_id)
|
|
||||||
{
|
|
||||||
if (WARN_ON(type_group_id >= gvt->num_types))
|
|
||||||
return NULL;
|
|
||||||
return &gvt->types[type_group_id];
|
|
||||||
}
|
|
||||||
|
|
||||||
static ssize_t available_instances_show(struct mdev_type *mtype,
|
|
||||||
struct mdev_type_attribute *attr,
|
|
||||||
char *buf)
|
|
||||||
{
|
|
||||||
struct intel_vgpu_type *type;
|
|
||||||
unsigned int num = 0;
|
|
||||||
void *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
|
|
||||||
|
|
||||||
type = intel_gvt_find_vgpu_type(gvt, mtype_get_type_group_id(mtype));
|
|
||||||
if (!type)
|
|
||||||
num = 0;
|
|
||||||
else
|
|
||||||
num = type->avail_instance;
|
|
||||||
|
|
||||||
return sprintf(buf, "%u\n", num);
|
|
||||||
}
|
|
||||||
|
|
||||||
static ssize_t device_api_show(struct mdev_type *mtype,
|
|
||||||
struct mdev_type_attribute *attr, char *buf)
|
|
||||||
{
|
|
||||||
return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
|
|
||||||
}
|
|
||||||
|
|
||||||
static ssize_t description_show(struct mdev_type *mtype,
|
|
||||||
struct mdev_type_attribute *attr, char *buf)
|
|
||||||
{
|
|
||||||
struct intel_vgpu_type *type;
|
|
||||||
void *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
|
|
||||||
|
|
||||||
type = intel_gvt_find_vgpu_type(gvt, mtype_get_type_group_id(mtype));
|
|
||||||
if (!type)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
|
|
||||||
"fence: %d\nresolution: %s\n"
|
|
||||||
"weight: %d\n",
|
|
||||||
BYTES_TO_MB(type->low_gm_size),
|
|
||||||
BYTES_TO_MB(type->high_gm_size),
|
|
||||||
type->fence, vgpu_edid_str(type->resolution),
|
|
||||||
type->weight);
|
|
||||||
}
|
|
||||||
|
|
||||||
static MDEV_TYPE_ATTR_RO(available_instances);
|
|
||||||
static MDEV_TYPE_ATTR_RO(device_api);
|
|
||||||
static MDEV_TYPE_ATTR_RO(description);
|
|
||||||
|
|
||||||
static struct attribute *gvt_type_attrs[] = {
|
|
||||||
&mdev_type_attr_available_instances.attr,
|
|
||||||
&mdev_type_attr_device_api.attr,
|
|
||||||
&mdev_type_attr_description.attr,
|
|
||||||
NULL,
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct attribute_group *gvt_vgpu_type_groups[] = {
|
|
||||||
[0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
|
|
||||||
};
|
|
||||||
|
|
||||||
static bool intel_get_gvt_attrs(struct attribute_group ***intel_vgpu_type_groups)
|
|
||||||
{
|
|
||||||
*intel_vgpu_type_groups = gvt_vgpu_type_groups;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
|
|
||||||
{
|
|
||||||
int i, j;
|
|
||||||
struct intel_vgpu_type *type;
|
|
||||||
struct attribute_group *group;
|
|
||||||
|
|
||||||
for (i = 0; i < gvt->num_types; i++) {
|
|
||||||
type = &gvt->types[i];
|
|
||||||
|
|
||||||
group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
|
|
||||||
if (WARN_ON(!group))
|
|
||||||
goto unwind;
|
|
||||||
|
|
||||||
group->name = type->name;
|
|
||||||
group->attrs = gvt_type_attrs;
|
|
||||||
gvt_vgpu_type_groups[i] = group;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
unwind:
|
|
||||||
for (j = 0; j < i; j++) {
|
|
||||||
group = gvt_vgpu_type_groups[j];
|
|
||||||
kfree(group);
|
|
||||||
}
|
|
||||||
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
struct attribute_group *group;
|
|
||||||
|
|
||||||
for (i = 0; i < gvt->num_types; i++) {
|
|
||||||
group = gvt_vgpu_type_groups[i];
|
|
||||||
gvt_vgpu_type_groups[i] = NULL;
|
|
||||||
kfree(group);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct intel_gvt_ops intel_gvt_ops = {
|
static const struct intel_gvt_ops intel_gvt_ops = {
|
||||||
.emulate_cfg_read = intel_vgpu_emulate_cfg_read,
|
.emulate_cfg_read = intel_vgpu_emulate_cfg_read,
|
||||||
.emulate_cfg_write = intel_vgpu_emulate_cfg_write,
|
.emulate_cfg_write = intel_vgpu_emulate_cfg_write,
|
||||||
|
@ -169,8 +57,6 @@ static const struct intel_gvt_ops intel_gvt_ops = {
|
||||||
.vgpu_reset = intel_gvt_reset_vgpu,
|
.vgpu_reset = intel_gvt_reset_vgpu,
|
||||||
.vgpu_activate = intel_gvt_activate_vgpu,
|
.vgpu_activate = intel_gvt_activate_vgpu,
|
||||||
.vgpu_deactivate = intel_gvt_deactivate_vgpu,
|
.vgpu_deactivate = intel_gvt_deactivate_vgpu,
|
||||||
.gvt_find_vgpu_type = intel_gvt_find_vgpu_type,
|
|
||||||
.get_gvt_attrs = intel_get_gvt_attrs,
|
|
||||||
.vgpu_query_plane = intel_vgpu_query_plane,
|
.vgpu_query_plane = intel_vgpu_query_plane,
|
||||||
.vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
|
.vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
|
||||||
.write_protect_handler = intel_vgpu_page_track_handler,
|
.write_protect_handler = intel_vgpu_page_track_handler,
|
||||||
|
@ -274,7 +160,6 @@ void intel_gvt_clean_device(struct drm_i915_private *i915)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
|
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
|
||||||
intel_gvt_cleanup_vgpu_type_groups(gvt);
|
|
||||||
intel_gvt_clean_vgpu_types(gvt);
|
intel_gvt_clean_vgpu_types(gvt);
|
||||||
|
|
||||||
intel_gvt_debugfs_clean(gvt);
|
intel_gvt_debugfs_clean(gvt);
|
||||||
|
@ -363,12 +248,6 @@ int intel_gvt_init_device(struct drm_i915_private *i915)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_clean_thread;
|
goto out_clean_thread;
|
||||||
|
|
||||||
ret = intel_gvt_init_vgpu_type_groups(gvt);
|
|
||||||
if (ret) {
|
|
||||||
gvt_err("failed to init vgpu type groups: %d\n", ret);
|
|
||||||
goto out_clean_types;
|
|
||||||
}
|
|
||||||
|
|
||||||
vgpu = intel_gvt_create_idle_vgpu(gvt);
|
vgpu = intel_gvt_create_idle_vgpu(gvt);
|
||||||
if (IS_ERR(vgpu)) {
|
if (IS_ERR(vgpu)) {
|
||||||
ret = PTR_ERR(vgpu);
|
ret = PTR_ERR(vgpu);
|
||||||
|
@ -454,7 +333,8 @@ EXPORT_SYMBOL_GPL(intel_gvt_register_hypervisor);
|
||||||
void
|
void
|
||||||
intel_gvt_unregister_hypervisor(void)
|
intel_gvt_unregister_hypervisor(void)
|
||||||
{
|
{
|
||||||
intel_gvt_hypervisor_host_exit(intel_gvt_host.dev);
|
void *gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt;
|
||||||
|
intel_gvt_hypervisor_host_exit(intel_gvt_host.dev, gvt);
|
||||||
module_put(THIS_MODULE);
|
module_put(THIS_MODULE);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(intel_gvt_unregister_hypervisor);
|
EXPORT_SYMBOL_GPL(intel_gvt_unregister_hypervisor);
|
||||||
|
|
|
@ -574,9 +574,6 @@ struct intel_gvt_ops {
|
||||||
void (*vgpu_reset)(struct intel_vgpu *);
|
void (*vgpu_reset)(struct intel_vgpu *);
|
||||||
void (*vgpu_activate)(struct intel_vgpu *);
|
void (*vgpu_activate)(struct intel_vgpu *);
|
||||||
void (*vgpu_deactivate)(struct intel_vgpu *);
|
void (*vgpu_deactivate)(struct intel_vgpu *);
|
||||||
struct intel_vgpu_type *(*gvt_find_vgpu_type)(
|
|
||||||
struct intel_gvt *gvt, unsigned int type_group_id);
|
|
||||||
bool (*get_gvt_attrs)(struct attribute_group ***intel_vgpu_type_groups);
|
|
||||||
int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
|
int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
|
||||||
int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
|
int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
|
||||||
int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
|
int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
|
||||||
|
|
|
@ -49,7 +49,7 @@ enum hypervisor_type {
|
||||||
struct intel_gvt_mpt {
|
struct intel_gvt_mpt {
|
||||||
enum hypervisor_type type;
|
enum hypervisor_type type;
|
||||||
int (*host_init)(struct device *dev, void *gvt, const void *ops);
|
int (*host_init)(struct device *dev, void *gvt, const void *ops);
|
||||||
void (*host_exit)(struct device *dev);
|
void (*host_exit)(struct device *dev, void *gvt);
|
||||||
int (*attach_vgpu)(void *vgpu, unsigned long *handle);
|
int (*attach_vgpu)(void *vgpu, unsigned long *handle);
|
||||||
void (*detach_vgpu)(void *vgpu);
|
void (*detach_vgpu)(void *vgpu);
|
||||||
int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
|
int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
|
||||||
|
|
|
@ -144,6 +144,104 @@ static inline bool handle_valid(unsigned long handle)
|
||||||
return !!(handle & ~0xff);
|
return !!(handle & ~0xff);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ssize_t available_instances_show(struct mdev_type *mtype,
|
||||||
|
struct mdev_type_attribute *attr,
|
||||||
|
char *buf)
|
||||||
|
{
|
||||||
|
struct intel_vgpu_type *type;
|
||||||
|
unsigned int num = 0;
|
||||||
|
struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
|
||||||
|
|
||||||
|
type = &gvt->types[mtype_get_type_group_id(mtype)];
|
||||||
|
if (!type)
|
||||||
|
num = 0;
|
||||||
|
else
|
||||||
|
num = type->avail_instance;
|
||||||
|
|
||||||
|
return sprintf(buf, "%u\n", num);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t device_api_show(struct mdev_type *mtype,
|
||||||
|
struct mdev_type_attribute *attr, char *buf)
|
||||||
|
{
|
||||||
|
return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t description_show(struct mdev_type *mtype,
|
||||||
|
struct mdev_type_attribute *attr, char *buf)
|
||||||
|
{
|
||||||
|
struct intel_vgpu_type *type;
|
||||||
|
struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
|
||||||
|
|
||||||
|
type = &gvt->types[mtype_get_type_group_id(mtype)];
|
||||||
|
if (!type)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
|
||||||
|
"fence: %d\nresolution: %s\n"
|
||||||
|
"weight: %d\n",
|
||||||
|
BYTES_TO_MB(type->low_gm_size),
|
||||||
|
BYTES_TO_MB(type->high_gm_size),
|
||||||
|
type->fence, vgpu_edid_str(type->resolution),
|
||||||
|
type->weight);
|
||||||
|
}
|
||||||
|
|
||||||
|
static MDEV_TYPE_ATTR_RO(available_instances);
|
||||||
|
static MDEV_TYPE_ATTR_RO(device_api);
|
||||||
|
static MDEV_TYPE_ATTR_RO(description);
|
||||||
|
|
||||||
|
static struct attribute *gvt_type_attrs[] = {
|
||||||
|
&mdev_type_attr_available_instances.attr,
|
||||||
|
&mdev_type_attr_device_api.attr,
|
||||||
|
&mdev_type_attr_description.attr,
|
||||||
|
NULL,
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct attribute_group *gvt_vgpu_type_groups[] = {
|
||||||
|
[0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
|
||||||
|
};
|
||||||
|
|
||||||
|
static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
|
||||||
|
{
|
||||||
|
int i, j;
|
||||||
|
struct intel_vgpu_type *type;
|
||||||
|
struct attribute_group *group;
|
||||||
|
|
||||||
|
for (i = 0; i < gvt->num_types; i++) {
|
||||||
|
type = &gvt->types[i];
|
||||||
|
|
||||||
|
group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
|
||||||
|
if (!group)
|
||||||
|
goto unwind;
|
||||||
|
|
||||||
|
group->name = type->name;
|
||||||
|
group->attrs = gvt_type_attrs;
|
||||||
|
gvt_vgpu_type_groups[i] = group;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
unwind:
|
||||||
|
for (j = 0; j < i; j++) {
|
||||||
|
group = gvt_vgpu_type_groups[j];
|
||||||
|
kfree(group);
|
||||||
|
}
|
||||||
|
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
struct attribute_group *group;
|
||||||
|
|
||||||
|
for (i = 0; i < gvt->num_types; i++) {
|
||||||
|
group = gvt_vgpu_type_groups[i];
|
||||||
|
gvt_vgpu_type_groups[i] = NULL;
|
||||||
|
kfree(group);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int kvmgt_guest_init(struct mdev_device *mdev);
|
static int kvmgt_guest_init(struct mdev_device *mdev);
|
||||||
static void intel_vgpu_release_work(struct work_struct *work);
|
static void intel_vgpu_release_work(struct work_struct *work);
|
||||||
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
|
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
|
||||||
|
@ -694,14 +792,13 @@ static int intel_vgpu_create(struct mdev_device *mdev)
|
||||||
struct intel_vgpu *vgpu = NULL;
|
struct intel_vgpu *vgpu = NULL;
|
||||||
struct intel_vgpu_type *type;
|
struct intel_vgpu_type *type;
|
||||||
struct device *pdev;
|
struct device *pdev;
|
||||||
void *gvt;
|
struct intel_gvt *gvt;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
pdev = mdev_parent_dev(mdev);
|
pdev = mdev_parent_dev(mdev);
|
||||||
gvt = kdev_to_i915(pdev)->gvt;
|
gvt = kdev_to_i915(pdev)->gvt;
|
||||||
|
|
||||||
type = intel_gvt_ops->gvt_find_vgpu_type(gvt,
|
type = &gvt->types[mdev_get_type_group_id(mdev)];
|
||||||
mdev_get_type_group_id(mdev));
|
|
||||||
if (!type) {
|
if (!type) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -1667,19 +1764,26 @@ static struct mdev_parent_ops intel_vgpu_ops = {
|
||||||
|
|
||||||
static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
|
static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
|
||||||
{
|
{
|
||||||
struct attribute_group **kvm_vgpu_type_groups;
|
int ret;
|
||||||
|
|
||||||
|
ret = intel_gvt_init_vgpu_type_groups((struct intel_gvt *)gvt);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
intel_gvt_ops = ops;
|
intel_gvt_ops = ops;
|
||||||
if (!intel_gvt_ops->get_gvt_attrs(&kvm_vgpu_type_groups))
|
intel_vgpu_ops.supported_type_groups = gvt_vgpu_type_groups;
|
||||||
return -EFAULT;
|
|
||||||
intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups;
|
|
||||||
|
|
||||||
return mdev_register_device(dev, &intel_vgpu_ops);
|
ret = mdev_register_device(dev, &intel_vgpu_ops);
|
||||||
|
if (ret)
|
||||||
|
intel_gvt_cleanup_vgpu_type_groups((struct intel_gvt *)gvt);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kvmgt_host_exit(struct device *dev)
|
static void kvmgt_host_exit(struct device *dev, void *gvt)
|
||||||
{
|
{
|
||||||
mdev_unregister_device(dev);
|
mdev_unregister_device(dev);
|
||||||
|
intel_gvt_cleanup_vgpu_type_groups((struct intel_gvt *)gvt);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kvmgt_page_track_add(unsigned long handle, u64 gfn)
|
static int kvmgt_page_track_add(unsigned long handle, u64 gfn)
|
||||||
|
|
|
@ -63,13 +63,13 @@ static inline int intel_gvt_hypervisor_host_init(struct device *dev,
|
||||||
/**
|
/**
|
||||||
* intel_gvt_hypervisor_host_exit - exit GVT-g host side
|
* intel_gvt_hypervisor_host_exit - exit GVT-g host side
|
||||||
*/
|
*/
|
||||||
static inline void intel_gvt_hypervisor_host_exit(struct device *dev)
|
static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt)
|
||||||
{
|
{
|
||||||
/* optional to provide */
|
/* optional to provide */
|
||||||
if (!intel_gvt_host.mpt->host_exit)
|
if (!intel_gvt_host.mpt->host_exit)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
intel_gvt_host.mpt->host_exit(dev);
|
intel_gvt_host.mpt->host_exit(dev, gvt);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -999,12 +999,11 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
|
||||||
obj->mm.madv = args->madv;
|
obj->mm.madv = args->madv;
|
||||||
|
|
||||||
if (i915_gem_object_has_pages(obj)) {
|
if (i915_gem_object_has_pages(obj)) {
|
||||||
struct list_head *list;
|
|
||||||
|
|
||||||
if (i915_gem_object_is_shrinkable(obj)) {
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&i915->mm.obj_lock, flags);
|
spin_lock_irqsave(&i915->mm.obj_lock, flags);
|
||||||
|
if (!list_empty(&obj->mm.link)) {
|
||||||
|
struct list_head *list;
|
||||||
|
|
||||||
if (obj->mm.madv != I915_MADV_WILLNEED)
|
if (obj->mm.madv != I915_MADV_WILLNEED)
|
||||||
list = &i915->mm.purge_list;
|
list = &i915->mm.purge_list;
|
||||||
|
@ -1012,8 +1011,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
|
||||||
list = &i915->mm.shrink_list;
|
list = &i915->mm.shrink_list;
|
||||||
list_move_tail(&obj->mm.link, list);
|
list_move_tail(&obj->mm.link, list);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
|
|
||||||
}
|
}
|
||||||
|
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* if the object is no longer attached, discard its backing storage */
|
/* if the object is no longer attached, discard its backing storage */
|
||||||
|
|
|
@ -301,7 +301,8 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
|
||||||
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
|
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
|
||||||
|
|
||||||
for (i = 0; i < pages; i++, p++) {
|
for (i = 0; i < pages; i++, p++) {
|
||||||
rdev->gart.pages[p] = pagelist[i];
|
rdev->gart.pages[p] = pagelist ? pagelist[i] :
|
||||||
|
rdev->dummy_page.page;
|
||||||
page_base = dma_addr[i];
|
page_base = dma_addr[i];
|
||||||
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
|
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
|
||||||
page_entry = radeon_gart_get_page_entry(page_base, flags);
|
page_entry = radeon_gart_get_page_entry(page_base, flags);
|
||||||
|
|
Loading…
Reference in New Issue