drm-misc-next for 6.2:
UAPI Changes: Cross-subsystem Changes: - dma-buf: locking improvements - firmware: New API in the RaspberryPi firmware driver used by vc4 Core Changes: - client: Null pointer dereference fix in drm_client_buffer_delete() - mm/buddy: Add back random seed log - ttm: Convert ttm_resource to use size_t for its size, fix for an undefined behaviour Driver Changes: - bridge: - adv7511: use dev_err_probe - it6505: Fix return value check of pm_runtime_get_sync - panel: - sitronix: Fixes and clean-ups - lcdif: Increase DMA burst size - rockchip: runtime_pm improvements - vc4: Fix for a regression preventing the use of 4k @ 60Hz, and further HDMI rate constraints check. - vmwgfx: Cursor improvements -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQRcEzekXsqa64kGDp7j7w1vZxhRxQUCY2N8/gAKCRDj7w1vZxhR xT1TAQDiwdSyL/bzOk/WYggmj5wmFqb2PPFbFRt/DOTLl52ZdwEAmtE9I2WKvY8w sxZYF9gfFnQC3id7YwNs+CDb1kAangc= =6GBd -----END PGP SIGNATURE----- Merge tag 'drm-misc-next-2022-11-03' of git://anongit.freedesktop.org/drm/drm-misc into drm-next drm-misc-next for 6.2: UAPI Changes: Cross-subsystem Changes: - dma-buf: locking improvements - firmware: New API in the RaspberryPi firmware driver used by vc4 Core Changes: - client: Null pointer dereference fix in drm_client_buffer_delete() - mm/buddy: Add back random seed log - ttm: Convert ttm_resource to use size_t for its size, fix for an undefined behaviour Driver Changes: - bridge: - adv7511: use dev_err_probe - it6505: Fix return value check of pm_runtime_get_sync - panel: - sitronix: Fixes and clean-ups - lcdif: Increase DMA burst size - rockchip: runtime_pm improvements - vc4: Fix for a regression preventing the use of 4k @ 60Hz, and further HDMI rate constraints check. - vmwgfx: Cursor improvements Signed-off-by: Dave Airlie <airlied@redhat.com> From: Maxime Ripard <maxime@cerno.tech> Link: https://patchwork.freedesktop.org/patch/msgid/20221103083437.ksrh3hcdvxaof62l@houat
This commit is contained in:
commit
441f0ec0ae
|
@ -18,25 +18,6 @@
|
|||
|
||||
#include <soc/bcm2835/raspberrypi-firmware.h>
|
||||
|
||||
enum rpi_firmware_clk_id {
|
||||
RPI_FIRMWARE_EMMC_CLK_ID = 1,
|
||||
RPI_FIRMWARE_UART_CLK_ID,
|
||||
RPI_FIRMWARE_ARM_CLK_ID,
|
||||
RPI_FIRMWARE_CORE_CLK_ID,
|
||||
RPI_FIRMWARE_V3D_CLK_ID,
|
||||
RPI_FIRMWARE_H264_CLK_ID,
|
||||
RPI_FIRMWARE_ISP_CLK_ID,
|
||||
RPI_FIRMWARE_SDRAM_CLK_ID,
|
||||
RPI_FIRMWARE_PIXEL_CLK_ID,
|
||||
RPI_FIRMWARE_PWM_CLK_ID,
|
||||
RPI_FIRMWARE_HEVC_CLK_ID,
|
||||
RPI_FIRMWARE_EMMC2_CLK_ID,
|
||||
RPI_FIRMWARE_M2MC_CLK_ID,
|
||||
RPI_FIRMWARE_PIXEL_BVB_CLK_ID,
|
||||
RPI_FIRMWARE_VEC_CLK_ID,
|
||||
RPI_FIRMWARE_NUM_CLK_ID,
|
||||
};
|
||||
|
||||
static char *rpi_firmware_clk_names[] = {
|
||||
[RPI_FIRMWARE_EMMC_CLK_ID] = "emmc",
|
||||
[RPI_FIRMWARE_UART_CLK_ID] = "uart",
|
||||
|
|
|
@ -995,10 +995,10 @@ static void __unmap_dma_buf(struct dma_buf_attachment *attach,
|
|||
*/
|
||||
void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
|
||||
{
|
||||
if (WARN_ON(!dmabuf || !attach))
|
||||
if (WARN_ON(!dmabuf || !attach || dmabuf != attach->dmabuf))
|
||||
return;
|
||||
|
||||
dma_resv_lock(attach->dmabuf->resv, NULL);
|
||||
dma_resv_lock(dmabuf->resv, NULL);
|
||||
|
||||
if (attach->sgt) {
|
||||
|
||||
|
|
|
@ -228,6 +228,26 @@ static void rpi_register_clk_driver(struct device *dev)
|
|||
-1, NULL, 0);
|
||||
}
|
||||
|
||||
unsigned int rpi_firmware_clk_get_max_rate(struct rpi_firmware *fw, unsigned int id)
|
||||
{
|
||||
struct rpi_firmware_clk_rate_request msg =
|
||||
RPI_FIRMWARE_CLK_RATE_REQUEST(id);
|
||||
int ret;
|
||||
|
||||
ret = rpi_firmware_property(fw, RPI_FIRMWARE_GET_MAX_CLOCK_RATE,
|
||||
&msg, sizeof(msg));
|
||||
if (ret)
|
||||
/*
|
||||
* If our firmware doesn't support that operation, or fails, we
|
||||
* assume the maximum clock rate is absolute maximum we can
|
||||
* store over our type.
|
||||
*/
|
||||
return UINT_MAX;
|
||||
|
||||
return le32_to_cpu(msg.rate);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpi_firmware_clk_get_max_rate);
|
||||
|
||||
static void rpi_firmware_delete(struct kref *kref)
|
||||
{
|
||||
struct rpi_firmware *fw = container_of(kref, struct rpi_firmware,
|
||||
|
@ -311,6 +331,18 @@ static int rpi_firmware_remove(struct platform_device *pdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id rpi_firmware_of_match[] = {
|
||||
{ .compatible = "raspberrypi,bcm2835-firmware", },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, rpi_firmware_of_match);
|
||||
|
||||
struct device_node *rpi_firmware_find_node(void)
|
||||
{
|
||||
return of_find_matching_node(NULL, rpi_firmware_of_match);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpi_firmware_find_node);
|
||||
|
||||
/**
|
||||
* rpi_firmware_get - Get pointer to rpi_firmware structure.
|
||||
* @firmware_node: Pointer to the firmware Device Tree node.
|
||||
|
@ -366,12 +398,6 @@ struct rpi_firmware *devm_rpi_firmware_get(struct device *dev,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(devm_rpi_firmware_get);
|
||||
|
||||
static const struct of_device_id rpi_firmware_of_match[] = {
|
||||
{ .compatible = "raspberrypi,bcm2835-firmware", },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, rpi_firmware_of_match);
|
||||
|
||||
static struct platform_driver rpi_firmware_driver = {
|
||||
.driver = {
|
||||
.name = "raspberrypi-firmware",
|
||||
|
|
|
@ -144,7 +144,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
|
|||
node->base.start = node->mm_nodes[0].start;
|
||||
} else {
|
||||
node->mm_nodes[0].start = 0;
|
||||
node->mm_nodes[0].size = node->base.num_pages;
|
||||
node->mm_nodes[0].size = PFN_UP(node->base.size);
|
||||
node->base.start = AMDGPU_BO_INVALID_OFFSET;
|
||||
}
|
||||
|
||||
|
|
|
@ -542,6 +542,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
|
|||
/* GWS and OA don't need any alignment. */
|
||||
page_align = bp->byte_align;
|
||||
size <<= PAGE_SHIFT;
|
||||
|
||||
} else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
|
||||
/* Both size and alignment must be a multiple of 4. */
|
||||
page_align = ALIGN(bp->byte_align, 4);
|
||||
|
@ -776,7 +777,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap);
|
||||
r = ttm_bo_kmap(&bo->tbo, 0, PFN_UP(bo->tbo.base.size), &bo->kmap);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
@ -62,7 +62,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
|
|||
if (!res)
|
||||
goto fallback;
|
||||
|
||||
BUG_ON(start + size > res->num_pages << PAGE_SHIFT);
|
||||
BUG_ON(start + size > res->size);
|
||||
|
||||
cur->mem_type = res->mem_type;
|
||||
|
||||
|
@ -110,7 +110,7 @@ fallback:
|
|||
cur->size = size;
|
||||
cur->remaining = size;
|
||||
cur->node = NULL;
|
||||
WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT);
|
||||
WARN_ON(res && start + size > res->size);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -127,7 +127,7 @@ TRACE_EVENT(amdgpu_bo_create,
|
|||
|
||||
TP_fast_assign(
|
||||
__entry->bo = bo;
|
||||
__entry->pages = bo->tbo.resource->num_pages;
|
||||
__entry->pages = PFN_UP(bo->tbo.resource->size);
|
||||
__entry->type = bo->tbo.resource->mem_type;
|
||||
__entry->prefer = bo->preferred_domains;
|
||||
__entry->allow = bo->allowed_domains;
|
||||
|
|
|
@ -381,7 +381,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
|
|||
dst.offset = 0;
|
||||
|
||||
r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
|
||||
new_mem->num_pages << PAGE_SHIFT,
|
||||
new_mem->size,
|
||||
amdgpu_bo_encrypted(abo),
|
||||
bo->base.resv, &fence);
|
||||
if (r)
|
||||
|
@ -424,7 +424,7 @@ error:
|
|||
static bool amdgpu_mem_visible(struct amdgpu_device *adev,
|
||||
struct ttm_resource *mem)
|
||||
{
|
||||
u64 mem_size = (u64)mem->num_pages << PAGE_SHIFT;
|
||||
u64 mem_size = (u64)mem->size;
|
||||
struct amdgpu_res_cursor cursor;
|
||||
u64 end;
|
||||
|
||||
|
@ -571,7 +571,7 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
|
|||
struct ttm_resource *mem)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
|
||||
size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
|
||||
size_t bus_size = (size_t)mem->size;
|
||||
|
||||
switch (mem->mem_type) {
|
||||
case TTM_PL_SYSTEM:
|
||||
|
|
|
@ -439,7 +439,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
|
|||
/* Allocate blocks in desired range */
|
||||
vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
|
||||
|
||||
remaining_size = (u64)vres->base.num_pages << PAGE_SHIFT;
|
||||
remaining_size = (u64)vres->base.size;
|
||||
|
||||
mutex_lock(&mgr->lock);
|
||||
while (remaining_size) {
|
||||
|
@ -498,7 +498,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
|
|||
LIST_HEAD(temp);
|
||||
|
||||
trim_list = &vres->blocks;
|
||||
original_size = (u64)vres->base.num_pages << PAGE_SHIFT;
|
||||
original_size = (u64)vres->base.size;
|
||||
|
||||
/*
|
||||
* If size value is rounded up to min_block_size, trim the last
|
||||
|
@ -533,8 +533,8 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
|
|||
amdgpu_vram_mgr_block_size(block);
|
||||
start >>= PAGE_SHIFT;
|
||||
|
||||
if (start > vres->base.num_pages)
|
||||
start -= vres->base.num_pages;
|
||||
if (start > PFN_UP(vres->base.size))
|
||||
start -= PFN_UP(vres->base.size);
|
||||
else
|
||||
start = 0;
|
||||
vres->base.start = max(vres->base.start, start);
|
||||
|
|
|
@ -1219,10 +1219,8 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
|
|||
return ret;
|
||||
|
||||
ret = adv7511_init_regulators(adv7511);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to init regulators\n");
|
||||
return ret;
|
||||
}
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret, "failed to init regulators\n");
|
||||
|
||||
/*
|
||||
* The power down GPIO is optional. If present, toggle it from active to
|
||||
|
|
|
@ -149,16 +149,14 @@ int adv7533_attach_dsi(struct adv7511 *adv)
|
|||
};
|
||||
|
||||
host = of_find_mipi_dsi_host_by_node(adv->host_node);
|
||||
if (!host) {
|
||||
dev_err(dev, "failed to find dsi host\n");
|
||||
return -EPROBE_DEFER;
|
||||
}
|
||||
if (!host)
|
||||
return dev_err_probe(dev, -EPROBE_DEFER,
|
||||
"failed to find dsi host\n");
|
||||
|
||||
dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
|
||||
if (IS_ERR(dsi)) {
|
||||
dev_err(dev, "failed to create dsi device\n");
|
||||
return PTR_ERR(dsi);
|
||||
}
|
||||
if (IS_ERR(dsi))
|
||||
return dev_err_probe(dev, PTR_ERR(dsi),
|
||||
"failed to create dsi device\n");
|
||||
|
||||
adv->dsi = dsi;
|
||||
|
||||
|
@ -168,10 +166,8 @@ int adv7533_attach_dsi(struct adv7511 *adv)
|
|||
MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_MODE_VIDEO_HSE;
|
||||
|
||||
ret = devm_mipi_dsi_attach(dev, dsi);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "failed to attach dsi to host\n");
|
||||
return ret;
|
||||
}
|
||||
if (ret < 0)
|
||||
return dev_err_probe(dev, ret, "failed to attach dsi to host\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -2699,7 +2699,7 @@ static void it6505_extcon_work(struct work_struct *work)
|
|||
* pm_runtime_force_resume re-enables runtime power management.
|
||||
* Handling the error here to make sure the bridge is powered on.
|
||||
*/
|
||||
if (ret)
|
||||
if (ret < 0)
|
||||
it6505_poweron(it6505);
|
||||
|
||||
complete_all(&it6505->extcon_completion);
|
||||
|
|
|
@ -235,10 +235,10 @@ static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
|
|||
{
|
||||
struct drm_device *dev = buffer->client->dev;
|
||||
|
||||
drm_gem_vunmap_unlocked(buffer->gem, &buffer->map);
|
||||
|
||||
if (buffer->gem)
|
||||
if (buffer->gem) {
|
||||
drm_gem_vunmap_unlocked(buffer->gem, &buffer->map);
|
||||
drm_gem_object_put(buffer->gem);
|
||||
}
|
||||
|
||||
if (buffer->handle)
|
||||
drm_mode_destroy_dumb(dev, buffer->handle, buffer->client->file);
|
||||
|
|
|
@ -649,7 +649,7 @@ bool i915_ttm_resource_mappable(struct ttm_resource *res)
|
|||
if (!i915_ttm_cpu_maps_iomem(res))
|
||||
return true;
|
||||
|
||||
return bman_res->used_visible_size == bman_res->base.num_pages;
|
||||
return bman_res->used_visible_size == PFN_UP(bman_res->base.size);
|
||||
}
|
||||
|
||||
static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
|
||||
|
|
|
@ -158,7 +158,7 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
|
|||
u32 page_alignment)
|
||||
{
|
||||
struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
|
||||
const u64 size = res->num_pages << PAGE_SHIFT;
|
||||
const u64 size = res->size;
|
||||
const u32 max_segment = round_down(UINT_MAX, page_alignment);
|
||||
struct drm_buddy *mm = bman_res->mm;
|
||||
struct list_head *blocks = &bman_res->blocks;
|
||||
|
@ -177,7 +177,7 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
|
|||
|
||||
i915_refct_sgt_init(rsgt, size);
|
||||
st = &rsgt->table;
|
||||
if (sg_alloc_table(st, res->num_pages, GFP_KERNEL)) {
|
||||
if (sg_alloc_table(st, PFN_UP(res->size), GFP_KERNEL)) {
|
||||
i915_refct_sgt_put(rsgt);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
|
|
@ -62,8 +62,8 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
|
|||
if (place->fpfn || lpfn != man->size)
|
||||
bman_res->flags |= DRM_BUDDY_RANGE_ALLOCATION;
|
||||
|
||||
GEM_BUG_ON(!bman_res->base.num_pages);
|
||||
size = bman_res->base.num_pages << PAGE_SHIFT;
|
||||
GEM_BUG_ON(!bman_res->base.size);
|
||||
size = bman_res->base.size;
|
||||
|
||||
min_page_size = bman->default_page_size;
|
||||
if (bo->page_alignment)
|
||||
|
@ -72,7 +72,7 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
|
|||
GEM_BUG_ON(min_page_size < mm->chunk_size);
|
||||
GEM_BUG_ON(!IS_ALIGNED(size, min_page_size));
|
||||
|
||||
if (place->fpfn + bman_res->base.num_pages != place->lpfn &&
|
||||
if (place->fpfn + PFN_UP(bman_res->base.size) != place->lpfn &&
|
||||
place->flags & TTM_PL_FLAG_CONTIGUOUS) {
|
||||
unsigned long pages;
|
||||
|
||||
|
@ -108,7 +108,7 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
|
|||
goto err_free_blocks;
|
||||
|
||||
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
|
||||
u64 original_size = (u64)bman_res->base.num_pages << PAGE_SHIFT;
|
||||
u64 original_size = (u64)bman_res->base.size;
|
||||
|
||||
drm_buddy_block_trim(mm,
|
||||
original_size,
|
||||
|
@ -116,7 +116,7 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
|
|||
}
|
||||
|
||||
if (lpfn <= bman->visible_size) {
|
||||
bman_res->used_visible_size = bman_res->base.num_pages;
|
||||
bman_res->used_visible_size = PFN_UP(bman_res->base.size);
|
||||
} else {
|
||||
struct drm_buddy_block *block;
|
||||
|
||||
|
@ -228,7 +228,7 @@ static bool i915_ttm_buddy_man_compatible(struct ttm_resource_manager *man,
|
|||
|
||||
if (!place->fpfn &&
|
||||
place->lpfn == i915_ttm_buddy_man_visible_size(man))
|
||||
return bman_res->used_visible_size == res->num_pages;
|
||||
return bman_res->used_visible_size == PFN_UP(res->size);
|
||||
|
||||
/* Check each drm buddy block individually */
|
||||
list_for_each_entry(block, &bman_res->blocks, link) {
|
||||
|
|
|
@ -244,7 +244,7 @@ void intel_region_ttm_resource_free(struct intel_memory_region *mem,
|
|||
struct ttm_resource_manager *man = mem->region_private;
|
||||
struct ttm_buffer_object mock_bo = {};
|
||||
|
||||
mock_bo.base.size = res->num_pages << PAGE_SHIFT;
|
||||
mock_bo.base.size = res->size;
|
||||
mock_bo.bdev = &mem->i915->bdev;
|
||||
res->bo = &mock_bo;
|
||||
|
||||
|
|
|
@ -314,8 +314,18 @@ static void lcdif_set_mode(struct lcdif_drm_private *lcdif, u32 bus_flags)
|
|||
CTRLDESCL0_1_WIDTH(m->hdisplay),
|
||||
lcdif->base + LCDC_V8_CTRLDESCL0_1);
|
||||
|
||||
writel(CTRLDESCL0_3_PITCH(lcdif->crtc.primary->state->fb->pitches[0]),
|
||||
lcdif->base + LCDC_V8_CTRLDESCL0_3);
|
||||
/*
|
||||
* Undocumented P_SIZE and T_SIZE register but those written in the
|
||||
* downstream kernel those registers control the AXI burst size. As of
|
||||
* now there are two known values:
|
||||
* 1 - 128Byte
|
||||
* 2 - 256Byte
|
||||
* Downstream set it to 256B burst size to improve the memory
|
||||
* efficiency so set it here too.
|
||||
*/
|
||||
ctrl = CTRLDESCL0_3_P_SIZE(2) | CTRLDESCL0_3_T_SIZE(2) |
|
||||
CTRLDESCL0_3_PITCH(lcdif->crtc.primary->state->fb->pitches[0]);
|
||||
writel(ctrl, lcdif->base + LCDC_V8_CTRLDESCL0_3);
|
||||
}
|
||||
|
||||
static void lcdif_enable_controller(struct lcdif_drm_private *lcdif)
|
||||
|
|
|
@ -190,6 +190,10 @@
|
|||
#define CTRLDESCL0_1_WIDTH(n) ((n) & 0xffff)
|
||||
#define CTRLDESCL0_1_WIDTH_MASK GENMASK(15, 0)
|
||||
|
||||
#define CTRLDESCL0_3_P_SIZE(n) (((n) << 20) & CTRLDESCL0_3_P_SIZE_MASK)
|
||||
#define CTRLDESCL0_3_P_SIZE_MASK GENMASK(22, 20)
|
||||
#define CTRLDESCL0_3_T_SIZE(n) (((n) << 16) & CTRLDESCL0_3_T_SIZE_MASK)
|
||||
#define CTRLDESCL0_3_T_SIZE_MASK GENMASK(17, 16)
|
||||
#define CTRLDESCL0_3_PITCH(n) ((n) & 0xffff)
|
||||
#define CTRLDESCL0_3_PITCH_MASK GENMASK(15, 0)
|
||||
|
||||
|
|
|
@ -532,7 +532,7 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages, &nvbo->kmap);
|
||||
ret = ttm_bo_kmap(&nvbo->bo, 0, PFN_UP(nvbo->bo.base.size), &nvbo->kmap);
|
||||
|
||||
ttm_bo_unreserve(&nvbo->bo);
|
||||
return ret;
|
||||
|
@ -1236,7 +1236,7 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
|
|||
} else {
|
||||
/* make sure bo is in mappable vram */
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
|
||||
bo->resource->start + bo->resource->num_pages < mappable)
|
||||
bo->resource->start + PFN_UP(bo->resource->size) < mappable)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < nvbo->placement.num_placement; ++i) {
|
||||
|
|
|
@ -52,7 +52,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
|||
u32 src_offset = old_reg->start << PAGE_SHIFT;
|
||||
u32 dst_ctxdma = nouveau_bo_mem_ctxdma(bo, chan, new_reg);
|
||||
u32 dst_offset = new_reg->start << PAGE_SHIFT;
|
||||
u32 page_count = new_reg->num_pages;
|
||||
u32 page_count = PFN_UP(new_reg->size);
|
||||
int ret;
|
||||
|
||||
ret = PUSH_WAIT(push, 3);
|
||||
|
@ -62,7 +62,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
|||
PUSH_MTHD(push, NV039, SET_CONTEXT_DMA_BUFFER_IN, src_ctxdma,
|
||||
SET_CONTEXT_DMA_BUFFER_OUT, dst_ctxdma);
|
||||
|
||||
page_count = new_reg->num_pages;
|
||||
page_count = PFN_UP(new_reg->size);
|
||||
while (page_count) {
|
||||
int line_count = (page_count > 2047) ? 2047 : page_count;
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
|||
{
|
||||
struct nouveau_mem *mem = nouveau_mem(old_reg);
|
||||
struct nvif_push *push = chan->chan.push;
|
||||
u64 length = (new_reg->num_pages << PAGE_SHIFT);
|
||||
u64 length = new_reg->size;
|
||||
u64 src_offset = mem->vma[0].addr;
|
||||
u64 dst_offset = mem->vma[1].addr;
|
||||
int src_tiled = !!mem->kind;
|
||||
|
|
|
@ -44,7 +44,7 @@ nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
PUSH_NVSQ(push, NV74C1, 0x0304, new_reg->num_pages << PAGE_SHIFT,
|
||||
PUSH_NVSQ(push, NV74C1, 0x0304, new_reg->size,
|
||||
0x0308, upper_32_bits(mem->vma[0].addr),
|
||||
0x030c, lower_32_bits(mem->vma[0].addr),
|
||||
0x0310, upper_32_bits(mem->vma[1].addr),
|
||||
|
|
|
@ -44,10 +44,10 @@ nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
|||
struct nvif_push *push = chan->chan.push;
|
||||
u64 src_offset = mem->vma[0].addr;
|
||||
u64 dst_offset = mem->vma[1].addr;
|
||||
u32 page_count = new_reg->num_pages;
|
||||
u32 page_count = PFN_UP(new_reg->size);
|
||||
int ret;
|
||||
|
||||
page_count = new_reg->num_pages;
|
||||
page_count = PFN_UP(new_reg->size);
|
||||
while (page_count) {
|
||||
int line_count = (page_count > 8191) ? 8191 : page_count;
|
||||
|
||||
|
|
|
@ -42,10 +42,10 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
|||
struct nouveau_mem *mem = nouveau_mem(old_reg);
|
||||
u64 src_offset = mem->vma[0].addr;
|
||||
u64 dst_offset = mem->vma[1].addr;
|
||||
u32 page_count = new_reg->num_pages;
|
||||
u32 page_count = PFN_UP(new_reg->size);
|
||||
int ret;
|
||||
|
||||
page_count = new_reg->num_pages;
|
||||
page_count = PFN_UP(new_reg->size);
|
||||
while (page_count) {
|
||||
int line_count = (page_count > 2047) ? 2047 : page_count;
|
||||
|
||||
|
|
|
@ -37,10 +37,10 @@ nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
|||
struct nvif_push *push = chan->chan.push;
|
||||
u64 src_offset = mem->vma[0].addr;
|
||||
u64 dst_offset = mem->vma[1].addr;
|
||||
u32 page_count = new_reg->num_pages;
|
||||
u32 page_count = PFN_UP(new_reg->size);
|
||||
int ret;
|
||||
|
||||
page_count = new_reg->num_pages;
|
||||
page_count = PFN_UP(new_reg->size);
|
||||
while (page_count) {
|
||||
int line_count = (page_count > 8191) ? 8191 : page_count;
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
|||
PITCH_IN, PAGE_SIZE,
|
||||
PITCH_OUT, PAGE_SIZE,
|
||||
LINE_LENGTH_IN, PAGE_SIZE,
|
||||
LINE_COUNT, new_reg->num_pages);
|
||||
LINE_COUNT, PFN_UP(new_reg->size));
|
||||
|
||||
PUSH_IMMD(push, NVA0B5, LAUNCH_DMA,
|
||||
NVDEF(NVA0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED) |
|
||||
|
|
|
@ -679,7 +679,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
|
|||
}
|
||||
|
||||
if (!nvbo->kmap.virtual) {
|
||||
ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages,
|
||||
ret = ttm_bo_kmap(&nvbo->bo, 0, PFN_UP(nvbo->bo.base.size),
|
||||
&nvbo->kmap);
|
||||
if (ret) {
|
||||
NV_PRINTK(err, cli, "failed kmap for reloc\n");
|
||||
|
@ -868,8 +868,7 @@ revalidate:
|
|||
if (unlikely(cmd != req->suffix0)) {
|
||||
if (!nvbo->kmap.virtual) {
|
||||
ret = ttm_bo_kmap(&nvbo->bo, 0,
|
||||
nvbo->bo.resource->
|
||||
num_pages,
|
||||
PFN_UP(nvbo->bo.base.size),
|
||||
&nvbo->kmap);
|
||||
if (ret) {
|
||||
WIND_RING(chan);
|
||||
|
|
|
@ -115,7 +115,7 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt)
|
|||
|
||||
mutex_lock(&drm->master.lock);
|
||||
ret = nvif_mem_ctor_type(mmu, "ttmHostMem", cli->mem->oclass, type, PAGE_SHIFT,
|
||||
reg->num_pages << PAGE_SHIFT,
|
||||
reg->size,
|
||||
&args, sizeof(args), &mem->mem);
|
||||
mutex_unlock(&drm->master.lock);
|
||||
return ret;
|
||||
|
@ -128,7 +128,7 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
|
|||
struct nouveau_cli *cli = mem->cli;
|
||||
struct nouveau_drm *drm = cli->drm;
|
||||
struct nvif_mmu *mmu = &cli->mmu;
|
||||
u64 size = ALIGN(reg->num_pages << PAGE_SHIFT, 1 << page);
|
||||
u64 size = ALIGN(reg->size, 1 << page);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&drm->master.lock);
|
||||
|
|
|
@ -139,7 +139,7 @@ nv04_gart_manager_new(struct ttm_resource_manager *man,
|
|||
mem = nouveau_mem(*res);
|
||||
ttm_resource_init(bo, place, *res);
|
||||
ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
|
||||
(long)(*res)->num_pages << PAGE_SHIFT, &mem->vma[0]);
|
||||
(long)(*res)->size, &mem->vma[0]);
|
||||
if (ret) {
|
||||
nouveau_mem_del(man, *res);
|
||||
return ret;
|
||||
|
|
|
@ -19,6 +19,9 @@
|
|||
|
||||
/* Command2 BKx selection command */
|
||||
#define DSI_CMD2BKX_SEL 0xFF
|
||||
#define DSI_CMD1 0
|
||||
#define DSI_CMD2 BIT(4)
|
||||
#define DSI_CMD2BK_MASK GENMASK(3, 0)
|
||||
|
||||
/* Command2, BK0 commands */
|
||||
#define DSI_CMD2_BK0_PVGAMCTRL 0xB0 /* Positive Voltage Gamma Control */
|
||||
|
@ -39,21 +42,6 @@
|
|||
#define DSI_CMD2_BK1_SPD2 0xC2 /* Source EQ2 Setting */
|
||||
#define DSI_CMD2_BK1_MIPISET1 0xD0 /* MIPI Setting 1 */
|
||||
|
||||
/*
|
||||
* Command2 with BK function selection.
|
||||
*
|
||||
* BIT[4].....CN2
|
||||
* BIT[1:0]...BKXSEL
|
||||
* 1:00 = CMD2BK0, Command2 BK0
|
||||
* 1:01 = CMD2BK1, Command2 BK1
|
||||
* 1:11 = CMD2BK3, Command2 BK3
|
||||
* 0:00 = Command2 disable
|
||||
*/
|
||||
#define DSI_CMD2BK0_SEL 0x10
|
||||
#define DSI_CMD2BK1_SEL 0x11
|
||||
#define DSI_CMD2BK3_SEL 0x13
|
||||
#define DSI_CMD2BKX_SEL_NONE 0x00
|
||||
|
||||
/* Command2, BK0 bytes */
|
||||
#define DSI_CMD2_BK0_GAMCTRL_AJ_MASK GENMASK(7, 6)
|
||||
#define DSI_CMD2_BK0_GAMCTRL_VC0_MASK GENMASK(3, 0)
|
||||
|
@ -191,6 +179,18 @@ static u8 st7701_vgls_map(struct st7701 *st7701)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void st7701_switch_cmd_bkx(struct st7701 *st7701, bool cmd2, u8 bkx)
|
||||
{
|
||||
u8 val;
|
||||
|
||||
if (cmd2)
|
||||
val = DSI_CMD2 | FIELD_PREP(DSI_CMD2BK_MASK, bkx);
|
||||
else
|
||||
val = DSI_CMD1;
|
||||
|
||||
ST7701_DSI(st7701, DSI_CMD2BKX_SEL, 0x77, 0x01, 0x00, 0x00, val);
|
||||
}
|
||||
|
||||
static void st7701_init_sequence(struct st7701 *st7701)
|
||||
{
|
||||
const struct st7701_panel_desc *desc = st7701->desc;
|
||||
|
@ -208,8 +208,8 @@ static void st7701_init_sequence(struct st7701 *st7701)
|
|||
msleep(st7701->sleep_delay);
|
||||
|
||||
/* Command2, BK0 */
|
||||
ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
|
||||
0x77, 0x01, 0x00, 0x00, DSI_CMD2BK0_SEL);
|
||||
st7701_switch_cmd_bkx(st7701, true, 0);
|
||||
|
||||
mipi_dsi_dcs_write(st7701->dsi, DSI_CMD2_BK0_PVGAMCTRL,
|
||||
desc->pv_gamma, ARRAY_SIZE(desc->pv_gamma));
|
||||
mipi_dsi_dcs_write(st7701->dsi, DSI_CMD2_BK0_NVGAMCTRL,
|
||||
|
@ -244,11 +244,10 @@ static void st7701_init_sequence(struct st7701 *st7701)
|
|||
DSI_CMD2_BK0_INVSEL_ONES_MASK |
|
||||
FIELD_PREP(DSI_CMD2_BK0_INVSEL_NLINV_MASK, desc->nlinv),
|
||||
FIELD_PREP(DSI_CMD2_BK0_INVSEL_RTNI_MASK,
|
||||
DIV_ROUND_UP(mode->htotal, 16)));
|
||||
(clamp((u32)mode->htotal, 512U, 1008U) - 512) / 16));
|
||||
|
||||
/* Command2, BK1 */
|
||||
ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
|
||||
0x77, 0x01, 0x00, 0x00, DSI_CMD2BK1_SEL);
|
||||
st7701_switch_cmd_bkx(st7701, true, 1);
|
||||
|
||||
/* Vop = 3.5375V + (VRHA[7:0] * 0.0125V) */
|
||||
ST7701_DSI(st7701, DSI_CMD2_BK1_VRHS,
|
||||
|
@ -373,33 +372,27 @@ static void dmt028vghmcmi_1a_gip_sequence(struct st7701 *st7701)
|
|||
0x08, 0x08, 0x08, 0x40,
|
||||
0x3F, 0x64);
|
||||
|
||||
ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
|
||||
0x77, 0x01, 0x00, 0x00, DSI_CMD2BKX_SEL_NONE);
|
||||
st7701_switch_cmd_bkx(st7701, false, 0);
|
||||
|
||||
ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
|
||||
0x77, 0x01, 0x00, 0x00, DSI_CMD2BK3_SEL);
|
||||
st7701_switch_cmd_bkx(st7701, true, 3);
|
||||
ST7701_DSI(st7701, 0xE6, 0x7C);
|
||||
ST7701_DSI(st7701, 0xE8, 0x00, 0x0E);
|
||||
|
||||
ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
|
||||
0x77, 0x01, 0x00, 0x00, DSI_CMD2BKX_SEL_NONE);
|
||||
st7701_switch_cmd_bkx(st7701, false, 0);
|
||||
ST7701_DSI(st7701, 0x11);
|
||||
msleep(120);
|
||||
|
||||
ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
|
||||
0x77, 0x01, 0x00, 0x00, DSI_CMD2BK3_SEL);
|
||||
st7701_switch_cmd_bkx(st7701, true, 3);
|
||||
ST7701_DSI(st7701, 0xE8, 0x00, 0x0C);
|
||||
msleep(10);
|
||||
ST7701_DSI(st7701, 0xE8, 0x00, 0x00);
|
||||
|
||||
ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
|
||||
0x77, 0x01, 0x00, 0x00, DSI_CMD2BKX_SEL_NONE);
|
||||
st7701_switch_cmd_bkx(st7701, false, 0);
|
||||
ST7701_DSI(st7701, 0x11);
|
||||
msleep(120);
|
||||
ST7701_DSI(st7701, 0xE8, 0x00, 0x00);
|
||||
|
||||
ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
|
||||
0x77, 0x01, 0x00, 0x00, DSI_CMD2BKX_SEL_NONE);
|
||||
st7701_switch_cmd_bkx(st7701, false, 0);
|
||||
|
||||
ST7701_DSI(st7701, 0x3A, 0x70);
|
||||
}
|
||||
|
@ -426,8 +419,7 @@ static int st7701_prepare(struct drm_panel *panel)
|
|||
st7701->desc->gip_sequence(st7701);
|
||||
|
||||
/* Disable Command2 */
|
||||
ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
|
||||
0x77, 0x01, 0x00, 0x00, DSI_CMD2BKX_SEL_NONE);
|
||||
st7701_switch_cmd_bkx(st7701, false, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -762,7 +754,15 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
|
|||
st7701->dsi = dsi;
|
||||
st7701->desc = desc;
|
||||
|
||||
return mipi_dsi_attach(dsi);
|
||||
ret = mipi_dsi_attach(dsi);
|
||||
if (ret)
|
||||
goto err_attach;
|
||||
|
||||
return 0;
|
||||
|
||||
err_attach:
|
||||
drm_panel_remove(&st7701->panel);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void st7701_dsi_remove(struct mipi_dsi_device *dsi)
|
||||
|
|
|
@ -400,8 +400,11 @@ static int cmp_size_smaller_first(void *priv, const struct list_head *a,
|
|||
struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
|
||||
|
||||
/* Sort A before B if A is smaller. */
|
||||
return (int)la->robj->tbo.resource->num_pages -
|
||||
(int)lb->robj->tbo.resource->num_pages;
|
||||
if (la->robj->tbo.base.size > lb->robj->tbo.base.size)
|
||||
return 1;
|
||||
if (la->robj->tbo.base.size < lb->robj->tbo.base.size)
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -232,7 +232,7 @@ int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap);
|
||||
r = ttm_bo_kmap(&bo->tbo, 0, PFN_UP(bo->tbo.base.size), &bo->kmap);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
@ -737,7 +737,7 @@ vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
|
|||
if (bo->resource->mem_type != TTM_PL_VRAM)
|
||||
return 0;
|
||||
|
||||
size = bo->resource->num_pages << PAGE_SHIFT;
|
||||
size = bo->resource->size;
|
||||
offset = bo->resource->start << PAGE_SHIFT;
|
||||
if ((offset + size) <= rdev->mc.visible_vram_size)
|
||||
return 0;
|
||||
|
|
|
@ -22,7 +22,7 @@ TRACE_EVENT(radeon_bo_create,
|
|||
|
||||
TP_fast_assign(
|
||||
__entry->bo = bo;
|
||||
__entry->pages = bo->tbo.resource->num_pages;
|
||||
__entry->pages = PFN_UP(bo->tbo.resource->size);
|
||||
),
|
||||
TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
|
||||
);
|
||||
|
|
|
@ -181,7 +181,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
|
|||
|
||||
BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
|
||||
|
||||
num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
|
||||
num_pages = PFN_UP(new_mem->size) * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
|
||||
fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->base.resv);
|
||||
if (IS_ERR(fence))
|
||||
return PTR_ERR(fence);
|
||||
|
@ -268,7 +268,7 @@ out:
|
|||
static int radeon_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
|
||||
{
|
||||
struct radeon_device *rdev = radeon_get_rdev(bdev);
|
||||
size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
|
||||
size_t bus_size = (size_t)mem->size;
|
||||
|
||||
switch (mem->mem_type) {
|
||||
case TTM_PL_SYSTEM:
|
||||
|
|
|
@ -364,12 +364,6 @@ static inline u32 dsi_read(struct dw_mipi_dsi_rockchip *dsi, u32 reg)
|
|||
return readl(dsi->base + reg);
|
||||
}
|
||||
|
||||
static inline void dsi_update_bits(struct dw_mipi_dsi_rockchip *dsi, u32 reg,
|
||||
u32 mask, u32 val)
|
||||
{
|
||||
dsi_write(dsi, reg, (dsi_read(dsi, reg) & ~mask) | val);
|
||||
}
|
||||
|
||||
static void dw_mipi_dsi_phy_write(struct dw_mipi_dsi_rockchip *dsi,
|
||||
u8 test_code,
|
||||
u8 test_data)
|
||||
|
@ -1213,7 +1207,7 @@ static int dw_mipi_dsi_dphy_power_on(struct phy *phy)
|
|||
return i;
|
||||
}
|
||||
|
||||
ret = pm_runtime_get_sync(dsi->dev);
|
||||
ret = pm_runtime_resume_and_get(dsi->dev);
|
||||
if (ret < 0) {
|
||||
DRM_DEV_ERROR(dsi->dev, "failed to enable device: %d\n", ret);
|
||||
return ret;
|
||||
|
|
|
@ -25,35 +25,6 @@ static const struct drm_framebuffer_funcs rockchip_drm_fb_funcs = {
|
|||
.dirty = drm_atomic_helper_dirtyfb,
|
||||
};
|
||||
|
||||
static struct drm_framebuffer *
|
||||
rockchip_fb_alloc(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd,
|
||||
struct drm_gem_object **obj, unsigned int num_planes)
|
||||
{
|
||||
struct drm_framebuffer *fb;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
fb = kzalloc(sizeof(*fb), GFP_KERNEL);
|
||||
if (!fb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
|
||||
|
||||
for (i = 0; i < num_planes; i++)
|
||||
fb->obj[i] = obj[i];
|
||||
|
||||
ret = drm_framebuffer_init(dev, fb, &rockchip_drm_fb_funcs);
|
||||
if (ret) {
|
||||
DRM_DEV_ERROR(dev->dev,
|
||||
"Failed to initialize framebuffer: %d\n",
|
||||
ret);
|
||||
kfree(fb);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return fb;
|
||||
}
|
||||
|
||||
static const struct drm_mode_config_helper_funcs rockchip_mode_config_helpers = {
|
||||
.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
|
||||
};
|
||||
|
@ -106,20 +77,6 @@ static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
|
|||
.atomic_commit = drm_atomic_helper_commit,
|
||||
};
|
||||
|
||||
struct drm_framebuffer *
|
||||
rockchip_drm_framebuffer_init(struct drm_device *dev,
|
||||
const struct drm_mode_fb_cmd2 *mode_cmd,
|
||||
struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_framebuffer *fb;
|
||||
|
||||
fb = rockchip_fb_alloc(dev, mode_cmd, &obj, 1);
|
||||
if (IS_ERR(fb))
|
||||
return ERR_CAST(fb);
|
||||
|
||||
return fb;
|
||||
}
|
||||
|
||||
void rockchip_drm_mode_config_init(struct drm_device *dev)
|
||||
{
|
||||
dev->mode_config.min_width = 0;
|
||||
|
|
|
@ -7,11 +7,5 @@
|
|||
#ifndef _ROCKCHIP_DRM_FB_H
|
||||
#define _ROCKCHIP_DRM_FB_H
|
||||
|
||||
struct drm_framebuffer *
|
||||
rockchip_drm_framebuffer_init(struct drm_device *dev,
|
||||
const struct drm_mode_fb_cmd2 *mode_cmd,
|
||||
struct drm_gem_object *obj);
|
||||
void rockchip_drm_framebuffer_fini(struct drm_framebuffer *fb);
|
||||
|
||||
void rockchip_drm_mode_config_init(struct drm_device *dev);
|
||||
#endif /* _ROCKCHIP_DRM_FB_H */
|
||||
|
|
|
@ -602,7 +602,7 @@ static int vop_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state)
|
|||
struct vop *vop = to_vop(crtc);
|
||||
int ret, i;
|
||||
|
||||
ret = pm_runtime_get_sync(vop->dev);
|
||||
ret = pm_runtime_resume_and_get(vop->dev);
|
||||
if (ret < 0) {
|
||||
DRM_DEV_ERROR(vop->dev, "failed to get pm runtime: %d\n", ret);
|
||||
return ret;
|
||||
|
@ -1983,7 +1983,7 @@ static int vop_initial(struct vop *vop)
|
|||
return PTR_ERR(vop->dclk);
|
||||
}
|
||||
|
||||
ret = pm_runtime_get_sync(vop->dev);
|
||||
ret = pm_runtime_resume_and_get(vop->dev);
|
||||
if (ret < 0) {
|
||||
DRM_DEV_ERROR(vop->dev, "failed to get pm runtime: %d\n", ret);
|
||||
return ret;
|
||||
|
|
|
@ -822,7 +822,7 @@ static void vop2_enable(struct vop2 *vop2)
|
|||
{
|
||||
int ret;
|
||||
|
||||
ret = pm_runtime_get_sync(vop2->dev);
|
||||
ret = pm_runtime_resume_and_get(vop2->dev);
|
||||
if (ret < 0) {
|
||||
drm_err(vop2->drm, "failed to get pm runtime: %d\n", ret);
|
||||
return;
|
||||
|
|
|
@ -152,7 +152,7 @@ static int rk3288_lvds_poweron(struct rockchip_lvds *lvds)
|
|||
DRM_DEV_ERROR(lvds->dev, "failed to enable lvds pclk %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
ret = pm_runtime_get_sync(lvds->dev);
|
||||
ret = pm_runtime_resume_and_get(lvds->dev);
|
||||
if (ret < 0) {
|
||||
DRM_DEV_ERROR(lvds->dev, "failed to get pm runtime: %d\n", ret);
|
||||
clk_disable(lvds->pclk);
|
||||
|
@ -336,16 +336,20 @@ static int px30_lvds_poweron(struct rockchip_lvds *lvds)
|
|||
{
|
||||
int ret;
|
||||
|
||||
ret = pm_runtime_get_sync(lvds->dev);
|
||||
ret = pm_runtime_resume_and_get(lvds->dev);
|
||||
if (ret < 0) {
|
||||
DRM_DEV_ERROR(lvds->dev, "failed to get pm runtime: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Enable LVDS mode */
|
||||
return regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1,
|
||||
ret = regmap_update_bits(lvds->grf, PX30_LVDS_GRF_PD_VO_CON1,
|
||||
PX30_LVDS_MODE_EN(1) | PX30_LVDS_P2S_EN(1),
|
||||
PX30_LVDS_MODE_EN(1) | PX30_LVDS_P2S_EN(1));
|
||||
if (ret)
|
||||
pm_runtime_put(lvds->dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void px30_lvds_poweroff(struct rockchip_lvds *lvds)
|
||||
|
|
|
@ -188,7 +188,7 @@ static const struct vop2_win_data rk3568_vop_win_data[] = {
|
|||
.base = 0x1800,
|
||||
.layer_sel_id = 2,
|
||||
.supported_rotations = DRM_MODE_REFLECT_Y,
|
||||
.type = DRM_PLANE_TYPE_OVERLAY,
|
||||
.type = DRM_PLANE_TYPE_PRIMARY,
|
||||
.max_upscale_factor = 8,
|
||||
.max_downscale_factor = 8,
|
||||
.dly = { 20, 47, 41 },
|
||||
|
|
|
@ -726,11 +726,13 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
|
|||
drm_buddy_fini(&mm);
|
||||
}
|
||||
|
||||
static int drm_buddy_init_test(struct kunit *test)
|
||||
static int drm_buddy_suite_init(struct kunit_suite *suite)
|
||||
{
|
||||
while (!random_seed)
|
||||
random_seed = get_random_u32();
|
||||
|
||||
kunit_info(suite, "Testing DRM buddy manager, with random_seed=0x%x\n", random_seed);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -746,7 +748,7 @@ static struct kunit_case drm_buddy_tests[] = {
|
|||
|
||||
static struct kunit_suite drm_buddy_test_suite = {
|
||||
.name = "drm_buddy",
|
||||
.init = drm_buddy_init_test,
|
||||
.suite_init = drm_buddy_suite_init,
|
||||
.test_cases = drm_buddy_tests,
|
||||
};
|
||||
|
||||
|
|
|
@ -2209,11 +2209,15 @@ err_nodes:
|
|||
vfree(nodes);
|
||||
}
|
||||
|
||||
static int drm_mm_init_test(struct kunit *test)
|
||||
static int drm_mm_suite_init(struct kunit_suite *suite)
|
||||
{
|
||||
while (!random_seed)
|
||||
random_seed = get_random_u32();
|
||||
|
||||
kunit_info(suite,
|
||||
"Testing DRM range manager, with random_seed=0x%x max_iterations=%u max_prime=%u\n",
|
||||
random_seed, max_iterations, max_prime);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2246,7 +2250,7 @@ static struct kunit_case drm_mm_tests[] = {
|
|||
|
||||
static struct kunit_suite drm_mm_test_suite = {
|
||||
.name = "drm_mm",
|
||||
.init = drm_mm_init_test,
|
||||
.suite_init = drm_mm_suite_init,
|
||||
.test_cases = drm_mm_tests,
|
||||
};
|
||||
|
||||
|
|
|
@ -51,9 +51,6 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
|
|||
struct ttm_resource_manager *man;
|
||||
int i, mem_type;
|
||||
|
||||
drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n",
|
||||
bo, bo->resource->num_pages, bo->base.size >> 10,
|
||||
bo->base.size >> 20);
|
||||
for (i = 0; i < placement->num_placement; i++) {
|
||||
mem_type = placement->placement[i].mem_type;
|
||||
drm_printf(&p, " placement[%d]=0x%08X (%d)\n",
|
||||
|
|
|
@ -173,7 +173,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
|
|||
|
||||
clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm));
|
||||
if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
|
||||
ttm_move_memcpy(clear, dst_mem->num_pages, dst_iter, src_iter);
|
||||
ttm_move_memcpy(clear, ttm->num_pages, dst_iter, src_iter);
|
||||
|
||||
if (!src_iter->ops->maps_tt)
|
||||
ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
|
||||
|
@ -357,9 +357,9 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
|
|||
|
||||
map->virtual = NULL;
|
||||
map->bo = bo;
|
||||
if (num_pages > bo->resource->num_pages)
|
||||
if (num_pages > PFN_UP(bo->resource->size))
|
||||
return -EINVAL;
|
||||
if ((start_page + num_pages) > bo->resource->num_pages)
|
||||
if ((start_page + num_pages) > PFN_UP(bo->resource->size))
|
||||
return -EINVAL;
|
||||
|
||||
ret = ttm_mem_io_reserve(bo->bdev, bo->resource);
|
||||
|
|
|
@ -217,7 +217,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
|
|||
page_last = vma_pages(vma) + vma->vm_pgoff -
|
||||
drm_vma_node_start(&bo->base.vma_node);
|
||||
|
||||
if (unlikely(page_offset >= bo->resource->num_pages))
|
||||
if (unlikely(page_offset >= PFN_UP(bo->base.size)))
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
prot = ttm_io_prot(bo, bo->resource, prot);
|
||||
|
@ -412,7 +412,7 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
|
|||
<< PAGE_SHIFT);
|
||||
int ret;
|
||||
|
||||
if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->resource->num_pages)
|
||||
if (len < 1 || (offset + len) > bo->base.size)
|
||||
return -EIO;
|
||||
|
||||
ret = ttm_bo_reserve(bo, true, false, NULL);
|
||||
|
|
|
@ -83,7 +83,7 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man,
|
|||
|
||||
spin_lock(&rman->lock);
|
||||
ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
|
||||
node->base.num_pages,
|
||||
PFN_UP(node->base.size),
|
||||
bo->page_alignment, 0,
|
||||
place->fpfn, lpfn, mode);
|
||||
spin_unlock(&rman->lock);
|
||||
|
|
|
@ -177,7 +177,7 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
|
|||
struct ttm_resource_manager *man;
|
||||
|
||||
res->start = 0;
|
||||
res->num_pages = PFN_UP(bo->base.size);
|
||||
res->size = bo->base.size;
|
||||
res->mem_type = place->mem_type;
|
||||
res->placement = place->flags;
|
||||
res->bus.addr = NULL;
|
||||
|
@ -192,7 +192,7 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
|
|||
list_add_tail(&res->lru, &bo->bdev->pinned);
|
||||
else
|
||||
list_add_tail(&res->lru, &man->lru[bo->priority]);
|
||||
man->usage += res->num_pages << PAGE_SHIFT;
|
||||
man->usage += res->size;
|
||||
spin_unlock(&bo->bdev->lru_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_resource_init);
|
||||
|
@ -214,7 +214,7 @@ void ttm_resource_fini(struct ttm_resource_manager *man,
|
|||
|
||||
spin_lock(&bdev->lru_lock);
|
||||
list_del_init(&res->lru);
|
||||
man->usage -= res->num_pages << PAGE_SHIFT;
|
||||
man->usage -= res->size;
|
||||
spin_unlock(&bdev->lru_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_resource_fini);
|
||||
|
@ -665,17 +665,15 @@ ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io,
|
|||
iosys_map_set_vaddr(&iter_io->dmap, mem->bus.addr);
|
||||
iter_io->needs_unmap = false;
|
||||
} else {
|
||||
size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
|
||||
|
||||
iter_io->needs_unmap = true;
|
||||
memset(&iter_io->dmap, 0, sizeof(iter_io->dmap));
|
||||
if (mem->bus.caching == ttm_write_combined)
|
||||
iosys_map_set_vaddr_iomem(&iter_io->dmap,
|
||||
ioremap_wc(mem->bus.offset,
|
||||
bus_size));
|
||||
mem->size));
|
||||
else if (mem->bus.caching == ttm_cached)
|
||||
iosys_map_set_vaddr(&iter_io->dmap,
|
||||
memremap(mem->bus.offset, bus_size,
|
||||
memremap(mem->bus.offset, mem->size,
|
||||
MEMREMAP_WB |
|
||||
MEMREMAP_WT |
|
||||
MEMREMAP_WC));
|
||||
|
@ -684,7 +682,7 @@ ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io,
|
|||
if (iosys_map_is_null(&iter_io->dmap))
|
||||
iosys_map_set_vaddr_iomem(&iter_io->dmap,
|
||||
ioremap(mem->bus.offset,
|
||||
bus_size));
|
||||
mem->size));
|
||||
|
||||
if (iosys_map_is_null(&iter_io->dmap)) {
|
||||
ret = -ENOMEM;
|
||||
|
|
|
@ -326,6 +326,8 @@ struct vc4_hvs {
|
|||
|
||||
struct clk *core_clk;
|
||||
|
||||
unsigned long max_core_rate;
|
||||
|
||||
/* Memory manager for CRTCs to allocate space in the display
|
||||
* list. Units are dwords.
|
||||
*/
|
||||
|
@ -337,6 +339,20 @@ struct vc4_hvs {
|
|||
struct drm_mm_node mitchell_netravali_filter;
|
||||
|
||||
struct debugfs_regset32 regset;
|
||||
|
||||
/*
|
||||
* Even if HDMI0 on the RPi4 can output modes requiring a pixel
|
||||
* rate higher than 297MHz, it needs some adjustments in the
|
||||
* config.txt file to be able to do so and thus won't always be
|
||||
* available.
|
||||
*/
|
||||
bool vc5_hdmi_enable_hdmi_20;
|
||||
|
||||
/*
|
||||
* 4096x2160@60 requires a core overclock to work, so register
|
||||
* whether that is sufficient.
|
||||
*/
|
||||
bool vc5_hdmi_enable_4096by2160;
|
||||
};
|
||||
|
||||
struct vc4_plane {
|
||||
|
|
|
@ -459,6 +459,7 @@ static int vc4_hdmi_connector_detect_ctx(struct drm_connector *connector,
|
|||
static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
|
||||
struct vc4_dev *vc4 = to_vc4_dev(connector->dev);
|
||||
int ret = 0;
|
||||
struct edid *edid;
|
||||
|
||||
|
@ -482,7 +483,7 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
|
|||
ret = drm_add_edid_modes(connector, edid);
|
||||
kfree(edid);
|
||||
|
||||
if (vc4_hdmi->disable_4kp60) {
|
||||
if (!vc4->hvs->vc5_hdmi_enable_hdmi_20) {
|
||||
struct drm_device *drm = connector->dev;
|
||||
const struct drm_display_mode *mode;
|
||||
|
||||
|
@ -1752,15 +1753,23 @@ vc4_hdmi_sink_supports_format_bpc(const struct vc4_hdmi *vc4_hdmi,
|
|||
|
||||
static enum drm_mode_status
|
||||
vc4_hdmi_encoder_clock_valid(const struct vc4_hdmi *vc4_hdmi,
|
||||
const struct drm_display_mode *mode,
|
||||
unsigned long long clock)
|
||||
{
|
||||
const struct drm_connector *connector = &vc4_hdmi->connector;
|
||||
const struct drm_display_info *info = &connector->display_info;
|
||||
struct vc4_dev *vc4 = to_vc4_dev(connector->dev);
|
||||
|
||||
if (clock > vc4_hdmi->variant->max_pixel_clock)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
if (vc4_hdmi->disable_4kp60 && clock > HDMI_14_MAX_TMDS_CLK)
|
||||
if (!vc4->hvs->vc5_hdmi_enable_hdmi_20 && clock > HDMI_14_MAX_TMDS_CLK)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
/* 4096x2160@60 is not reliable without overclocking core */
|
||||
if (!vc4->hvs->vc5_hdmi_enable_4096by2160 &&
|
||||
mode->hdisplay > 3840 && mode->vdisplay >= 2160 &&
|
||||
drm_mode_vrefresh(mode) >= 50)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
if (info->max_tmds_clock && clock > (info->max_tmds_clock * 1000))
|
||||
|
@ -1797,7 +1806,7 @@ vc4_hdmi_encoder_compute_clock(const struct vc4_hdmi *vc4_hdmi,
|
|||
unsigned long long clock;
|
||||
|
||||
clock = vc4_hdmi_encoder_compute_mode_clock(mode, bpc, fmt);
|
||||
if (vc4_hdmi_encoder_clock_valid(vc4_hdmi, clock) != MODE_OK)
|
||||
if (vc4_hdmi_encoder_clock_valid(vc4_hdmi, mode, clock) != MODE_OK)
|
||||
return -EINVAL;
|
||||
|
||||
vc4_state->tmds_char_rate = clock;
|
||||
|
@ -1960,7 +1969,7 @@ vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder,
|
|||
(mode->hsync_end % 2) || (mode->htotal % 2)))
|
||||
return MODE_H_ILLEGAL;
|
||||
|
||||
return vc4_hdmi_encoder_clock_valid(vc4_hdmi, mode->clock * 1000);
|
||||
return vc4_hdmi_encoder_clock_valid(vc4_hdmi, mode, mode->clock * 1000);
|
||||
}
|
||||
|
||||
static const struct drm_encoder_helper_funcs vc4_hdmi_encoder_helper_funcs = {
|
||||
|
@ -3456,14 +3465,6 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
|
|||
vc4_hdmi->disable_wifi_frequencies =
|
||||
of_property_read_bool(dev->of_node, "wifi-2.4ghz-coexistence");
|
||||
|
||||
if (variant->max_pixel_clock == 600000000) {
|
||||
struct vc4_dev *vc4 = to_vc4_dev(drm);
|
||||
long max_rate = clk_round_rate(vc4->hvs->core_clk, 550000000);
|
||||
|
||||
if (max_rate < 550000000)
|
||||
vc4_hdmi->disable_4kp60 = true;
|
||||
}
|
||||
|
||||
ret = devm_pm_runtime_enable(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
|
|
@ -156,14 +156,6 @@ struct vc4_hdmi {
|
|||
*/
|
||||
bool disable_wifi_frequencies;
|
||||
|
||||
/*
|
||||
* Even if HDMI0 on the RPi4 can output modes requiring a pixel
|
||||
* rate higher than 297MHz, it needs some adjustments in the
|
||||
* config.txt file to be able to do so and thus won't always be
|
||||
* available.
|
||||
*/
|
||||
bool disable_4kp60;
|
||||
|
||||
struct cec_adapter *cec_adap;
|
||||
struct cec_msg cec_rx_msg;
|
||||
bool cec_tx_ok;
|
||||
|
|
|
@ -28,6 +28,8 @@
|
|||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
|
||||
#include <soc/bcm2835/raspberrypi-firmware.h>
|
||||
|
||||
#include "vc4_drv.h"
|
||||
#include "vc4_regs.h"
|
||||
|
||||
|
@ -791,12 +793,36 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
|
|||
hvs->regset.nregs = ARRAY_SIZE(hvs_regs);
|
||||
|
||||
if (vc4->is_vc5) {
|
||||
struct rpi_firmware *firmware;
|
||||
struct device_node *node;
|
||||
unsigned int max_rate;
|
||||
|
||||
node = rpi_firmware_find_node();
|
||||
if (!node)
|
||||
return -EINVAL;
|
||||
|
||||
firmware = rpi_firmware_get(node);
|
||||
of_node_put(node);
|
||||
if (!firmware)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
hvs->core_clk = devm_clk_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(hvs->core_clk)) {
|
||||
dev_err(&pdev->dev, "Couldn't get core clock\n");
|
||||
return PTR_ERR(hvs->core_clk);
|
||||
}
|
||||
|
||||
max_rate = rpi_firmware_clk_get_max_rate(firmware,
|
||||
RPI_FIRMWARE_CORE_CLK_ID);
|
||||
rpi_firmware_put(firmware);
|
||||
if (max_rate >= 550000000)
|
||||
hvs->vc5_hdmi_enable_hdmi_20 = true;
|
||||
|
||||
if (max_rate >= 600000000)
|
||||
hvs->vc5_hdmi_enable_4096by2160 = true;
|
||||
|
||||
hvs->max_core_rate = max_rate;
|
||||
|
||||
ret = clk_prepare_enable(hvs->core_clk);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Couldn't enable the core clock\n");
|
||||
|
|
|
@ -396,8 +396,8 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
|
|||
if (vc4->is_vc5) {
|
||||
unsigned long state_rate = max(old_hvs_state->core_clock_rate,
|
||||
new_hvs_state->core_clock_rate);
|
||||
unsigned long core_rate = max_t(unsigned long,
|
||||
500000000, state_rate);
|
||||
unsigned long core_rate = clamp_t(unsigned long, state_rate,
|
||||
500000000, hvs->max_core_rate);
|
||||
|
||||
drm_dbg(dev, "Raising the core clock at %lu Hz\n", core_rate);
|
||||
|
||||
|
@ -431,14 +431,17 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
|
|||
drm_atomic_helper_cleanup_planes(dev, state);
|
||||
|
||||
if (vc4->is_vc5) {
|
||||
drm_dbg(dev, "Running the core clock at %lu Hz\n",
|
||||
new_hvs_state->core_clock_rate);
|
||||
unsigned long core_rate = min_t(unsigned long,
|
||||
hvs->max_core_rate,
|
||||
new_hvs_state->core_clock_rate);
|
||||
|
||||
drm_dbg(dev, "Running the core clock at %lu Hz\n", core_rate);
|
||||
|
||||
/*
|
||||
* Request a clock rate based on the current HVS
|
||||
* requirements.
|
||||
*/
|
||||
WARN_ON(clk_set_min_rate(hvs->core_clk, new_hvs_state->core_clock_rate));
|
||||
WARN_ON(clk_set_min_rate(hvs->core_clk, core_rate));
|
||||
|
||||
drm_dbg(dev, "Core clock actual rate: %lu Hz\n",
|
||||
clk_get_rate(hvs->core_clk));
|
||||
|
|
|
@ -483,8 +483,8 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
|
|||
d.src_addr = NULL;
|
||||
d.dst_pages = dst->ttm->pages;
|
||||
d.src_pages = src->ttm->pages;
|
||||
d.dst_num_pages = dst->resource->num_pages;
|
||||
d.src_num_pages = src->resource->num_pages;
|
||||
d.dst_num_pages = PFN_UP(dst->resource->size);
|
||||
d.src_num_pages = PFN_UP(src->resource->size);
|
||||
d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL);
|
||||
d.src_prot = ttm_io_prot(src, src->resource, PAGE_KERNEL);
|
||||
d.diff = diff;
|
||||
|
|
|
@ -194,7 +194,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
|
|||
int ret = 0;
|
||||
|
||||
place = vmw_vram_placement.placement[0];
|
||||
place.lpfn = bo->resource->num_pages;
|
||||
place.lpfn = PFN_UP(bo->resource->size);
|
||||
placement.num_placement = 1;
|
||||
placement.placement = &place;
|
||||
placement.num_busy_placement = 1;
|
||||
|
@ -211,7 +211,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
|
|||
* that situation.
|
||||
*/
|
||||
if (bo->resource->mem_type == TTM_PL_VRAM &&
|
||||
bo->resource->start < bo->resource->num_pages &&
|
||||
bo->resource->start < PFN_UP(bo->resource->size) &&
|
||||
bo->resource->start > 0 &&
|
||||
buf->base.pin_count == 0) {
|
||||
ctx.interruptible = false;
|
||||
|
@ -352,7 +352,7 @@ void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
|
|||
if (virtual)
|
||||
return virtual;
|
||||
|
||||
ret = ttm_bo_kmap(bo, 0, bo->resource->num_pages, &vbo->map);
|
||||
ret = ttm_bo_kmap(bo, 0, PFN_UP(bo->base.size), &vbo->map);
|
||||
if (ret)
|
||||
DRM_ERROR("Buffer object map failed: %d.\n", ret);
|
||||
|
||||
|
|
|
@ -443,7 +443,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
|
|||
* Do a page by page copy of COTables. This eliminates slow vmap()s.
|
||||
* This should really be a TTM utility.
|
||||
*/
|
||||
for (i = 0; i < old_bo->resource->num_pages; ++i) {
|
||||
for (i = 0; i < PFN_UP(old_bo->resource->size); ++i) {
|
||||
bool dummy;
|
||||
|
||||
ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
|
||||
|
|
|
@ -98,6 +98,10 @@
|
|||
#define VMW_RES_SHADER ttm_driver_type4
|
||||
#define VMW_RES_HT_ORDER 12
|
||||
|
||||
#define VMW_CURSOR_SNOOP_FORMAT SVGA3D_A8R8G8B8
|
||||
#define VMW_CURSOR_SNOOP_WIDTH 64
|
||||
#define VMW_CURSOR_SNOOP_HEIGHT 64
|
||||
|
||||
#define MKSSTAT_CAPACITY_LOG2 5U
|
||||
#define MKSSTAT_CAPACITY (1U << MKSSTAT_CAPACITY_LOG2)
|
||||
|
||||
|
|
|
@ -1047,7 +1047,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
|
|||
|
||||
if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
|
||||
|
||||
if (unlikely(new_query_bo->base.resource->num_pages > 4)) {
|
||||
if (unlikely(PFN_UP(new_query_bo->base.resource->size) > 4)) {
|
||||
VMW_DEBUG_USER("Query buffer too large.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -71,7 +71,7 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
|
|||
spin_lock(&gman->lock);
|
||||
|
||||
if (gman->max_gmr_pages > 0) {
|
||||
gman->used_gmr_pages += (*res)->num_pages;
|
||||
gman->used_gmr_pages += PFN_UP((*res)->size);
|
||||
/*
|
||||
* Because the graphics memory is a soft limit we can try to
|
||||
* expand it instead of letting the userspace apps crash.
|
||||
|
@ -114,7 +114,7 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
|
|||
return 0;
|
||||
|
||||
nospace:
|
||||
gman->used_gmr_pages -= (*res)->num_pages;
|
||||
gman->used_gmr_pages -= PFN_UP((*res)->size);
|
||||
spin_unlock(&gman->lock);
|
||||
ida_free(&gman->gmr_ida, id);
|
||||
ttm_resource_fini(man, *res);
|
||||
|
@ -129,7 +129,7 @@ static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man,
|
|||
|
||||
ida_free(&gman->gmr_ida, res->start);
|
||||
spin_lock(&gman->lock);
|
||||
gman->used_gmr_pages -= res->num_pages;
|
||||
gman->used_gmr_pages -= PFN_UP(res->size);
|
||||
spin_unlock(&gman->lock);
|
||||
ttm_resource_fini(man, res);
|
||||
kfree(res);
|
||||
|
|
|
@ -25,6 +25,9 @@
|
|||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include "vmwgfx_kms.h"
|
||||
#include "vmw_surface_cache.h"
|
||||
|
||||
#include <drm/drm_atomic.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_damage_helper.h>
|
||||
|
@ -32,8 +35,6 @@
|
|||
#include <drm/drm_rect.h>
|
||||
#include <drm/drm_sysfs.h>
|
||||
|
||||
#include "vmwgfx_kms.h"
|
||||
|
||||
void vmw_du_cleanup(struct vmw_display_unit *du)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
|
||||
|
@ -351,7 +352,6 @@ static void vmw_cursor_update_position(struct vmw_private *dev_priv,
|
|||
spin_unlock(&dev_priv->cursor_lock);
|
||||
}
|
||||
|
||||
|
||||
void vmw_kms_cursor_snoop(struct vmw_surface *srf,
|
||||
struct ttm_object_file *tfile,
|
||||
struct ttm_buffer_object *bo,
|
||||
|
@ -369,6 +369,9 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
|
|||
SVGA3dCmdSurfaceDMA dma;
|
||||
} *cmd;
|
||||
int i, ret;
|
||||
const struct SVGA3dSurfaceDesc *desc =
|
||||
vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
|
||||
const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
|
||||
|
||||
cmd = container_of(header, struct vmw_dma_cmd, header);
|
||||
|
||||
|
@ -393,7 +396,8 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
|
|||
if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
|
||||
box->x != 0 || box->y != 0 || box->z != 0 ||
|
||||
box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
|
||||
box->d != 1 || box_count != 1) {
|
||||
box->d != 1 || box_count != 1 ||
|
||||
box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
|
||||
/* TODO handle none page aligned offsets */
|
||||
/* TODO handle more dst & src != 0 */
|
||||
/* TODO handle more then one copy */
|
||||
|
@ -407,7 +411,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
|
|||
}
|
||||
|
||||
kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
|
||||
kmap_num = (64*64*4) >> PAGE_SHIFT;
|
||||
kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT;
|
||||
|
||||
ret = ttm_bo_reserve(bo, true, false, NULL);
|
||||
if (unlikely(ret != 0)) {
|
||||
|
@ -421,14 +425,15 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
|
|||
|
||||
virtual = ttm_kmap_obj_virtual(&map, &dummy);
|
||||
|
||||
if (box->w == 64 && cmd->dma.guest.pitch == 64*4) {
|
||||
memcpy(srf->snooper.image, virtual, 64*64*4);
|
||||
if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
|
||||
memcpy(srf->snooper.image, virtual,
|
||||
VMW_CURSOR_SNOOP_HEIGHT*image_pitch);
|
||||
} else {
|
||||
/* Image is unsigned pointer. */
|
||||
for (i = 0; i < box->h; i++)
|
||||
memcpy(srf->snooper.image + i * 64,
|
||||
memcpy(srf->snooper.image + i * image_pitch,
|
||||
virtual + i * cmd->dma.guest.pitch,
|
||||
box->w * 4);
|
||||
box->w * desc->pitchBytesPerBlock);
|
||||
}
|
||||
|
||||
srf->snooper.age++;
|
||||
|
@ -479,7 +484,8 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
|
|||
du->cursor_age = du->cursor_surface->snooper.age;
|
||||
vmw_send_define_cursor_cmd(dev_priv,
|
||||
du->cursor_surface->snooper.image,
|
||||
64, 64,
|
||||
VMW_CURSOR_SNOOP_WIDTH,
|
||||
VMW_CURSOR_SNOOP_HEIGHT,
|
||||
du->hotspot_x + du->core_hotspot_x,
|
||||
du->hotspot_y + du->core_hotspot_y);
|
||||
}
|
||||
|
@ -1805,7 +1811,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
|
|||
if (IS_ERR(vfb)) {
|
||||
ret = PTR_ERR(vfb);
|
||||
goto err_out;
|
||||
}
|
||||
}
|
||||
|
||||
err_out:
|
||||
/* vmw_user_lookup_handle takes one ref so does new_fb */
|
||||
|
@ -2325,7 +2331,7 @@ retry:
|
|||
if (ret == -EDEADLK) {
|
||||
drm_modeset_backoff(&ctx);
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
goto out_fini;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -230,7 +230,7 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
|
|||
int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
|
||||
{
|
||||
struct vmw_bo_dirty *dirty = vbo->dirty;
|
||||
pgoff_t num_pages = vbo->base.resource->num_pages;
|
||||
pgoff_t num_pages = PFN_UP(vbo->base.resource->size);
|
||||
size_t size;
|
||||
int ret;
|
||||
|
||||
|
@ -395,7 +395,7 @@ vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf)
|
|||
return ret;
|
||||
|
||||
page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node);
|
||||
if (unlikely(page_offset >= bo->resource->num_pages)) {
|
||||
if (unlikely(page_offset >= PFN_UP(bo->resource->size))) {
|
||||
ret = VM_FAULT_SIGBUS;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
@ -438,7 +438,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
|
|||
|
||||
page_offset = vmf->pgoff -
|
||||
drm_vma_node_start(&bo->base.vma_node);
|
||||
if (page_offset >= bo->resource->num_pages ||
|
||||
if (page_offset >= PFN_UP(bo->resource->size) ||
|
||||
vmw_resources_clean(vbo, page_offset,
|
||||
page_offset + PAGE_SIZE,
|
||||
&allowed_prefault)) {
|
||||
|
|
|
@ -815,11 +815,15 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
|||
res->backup_size = cur_bo_offset;
|
||||
if (metadata->scanout &&
|
||||
metadata->num_sizes == 1 &&
|
||||
metadata->sizes[0].width == 64 &&
|
||||
metadata->sizes[0].height == 64 &&
|
||||
metadata->format == SVGA3D_A8R8G8B8) {
|
||||
|
||||
srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
|
||||
metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH &&
|
||||
metadata->sizes[0].height == VMW_CURSOR_SNOOP_HEIGHT &&
|
||||
metadata->format == VMW_CURSOR_SNOOP_FORMAT) {
|
||||
const struct SVGA3dSurfaceDesc *desc =
|
||||
vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
|
||||
const u32 cursor_size_bytes = VMW_CURSOR_SNOOP_WIDTH *
|
||||
VMW_CURSOR_SNOOP_HEIGHT *
|
||||
desc->pitchBytesPerBlock;
|
||||
srf->snooper.image = kzalloc(cursor_size_bytes, GFP_KERNEL);
|
||||
if (!srf->snooper.image) {
|
||||
DRM_ERROR("Failed to allocate cursor_image\n");
|
||||
ret = -ENOMEM;
|
||||
|
|
|
@ -197,7 +197,7 @@ struct ttm_bus_placement {
|
|||
* struct ttm_resource
|
||||
*
|
||||
* @start: Start of the allocation.
|
||||
* @num_pages: Actual size of resource in pages.
|
||||
* @size: Actual size of resource in bytes.
|
||||
* @mem_type: Resource type of the allocation.
|
||||
* @placement: Placement flags.
|
||||
* @bus: Placement on io bus accessible to the CPU
|
||||
|
@ -208,7 +208,7 @@ struct ttm_bus_placement {
|
|||
*/
|
||||
struct ttm_resource {
|
||||
unsigned long start;
|
||||
unsigned long num_pages;
|
||||
size_t size;
|
||||
uint32_t mem_type;
|
||||
uint32_t placement;
|
||||
struct ttm_bus_placement bus;
|
||||
|
|
|
@ -88,7 +88,7 @@ struct ttm_tt {
|
|||
#define TTM_TT_FLAG_EXTERNAL (1 << 2)
|
||||
#define TTM_TT_FLAG_EXTERNAL_MAPPABLE (1 << 3)
|
||||
|
||||
#define TTM_TT_FLAG_PRIV_POPULATED (1 << 31)
|
||||
#define TTM_TT_FLAG_PRIV_POPULATED (1U << 31)
|
||||
uint32_t page_flags;
|
||||
/** @num_pages: Number of pages in the page array. */
|
||||
uint32_t num_pages;
|
||||
|
|
|
@ -136,12 +136,52 @@ enum rpi_firmware_property_tag {
|
|||
RPI_FIRMWARE_GET_DMA_CHANNELS = 0x00060001,
|
||||
};
|
||||
|
||||
enum rpi_firmware_clk_id {
|
||||
RPI_FIRMWARE_EMMC_CLK_ID = 1,
|
||||
RPI_FIRMWARE_UART_CLK_ID,
|
||||
RPI_FIRMWARE_ARM_CLK_ID,
|
||||
RPI_FIRMWARE_CORE_CLK_ID,
|
||||
RPI_FIRMWARE_V3D_CLK_ID,
|
||||
RPI_FIRMWARE_H264_CLK_ID,
|
||||
RPI_FIRMWARE_ISP_CLK_ID,
|
||||
RPI_FIRMWARE_SDRAM_CLK_ID,
|
||||
RPI_FIRMWARE_PIXEL_CLK_ID,
|
||||
RPI_FIRMWARE_PWM_CLK_ID,
|
||||
RPI_FIRMWARE_HEVC_CLK_ID,
|
||||
RPI_FIRMWARE_EMMC2_CLK_ID,
|
||||
RPI_FIRMWARE_M2MC_CLK_ID,
|
||||
RPI_FIRMWARE_PIXEL_BVB_CLK_ID,
|
||||
RPI_FIRMWARE_VEC_CLK_ID,
|
||||
RPI_FIRMWARE_NUM_CLK_ID,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct rpi_firmware_clk_rate_request - Firmware Request for a rate
|
||||
* @id: ID of the clock being queried
|
||||
* @rate: Rate in Hertz. Set by the firmware.
|
||||
*
|
||||
* Used by @RPI_FIRMWARE_GET_CLOCK_RATE, @RPI_FIRMWARE_GET_CLOCK_MEASURED,
|
||||
* @RPI_FIRMWARE_GET_MAX_CLOCK_RATE and @RPI_FIRMWARE_GET_MIN_CLOCK_RATE.
|
||||
*/
|
||||
struct rpi_firmware_clk_rate_request {
|
||||
__le32 id;
|
||||
__le32 rate;
|
||||
} __packed;
|
||||
|
||||
#define RPI_FIRMWARE_CLK_RATE_REQUEST(_id) \
|
||||
{ \
|
||||
.id = _id, \
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE)
|
||||
int rpi_firmware_property(struct rpi_firmware *fw,
|
||||
u32 tag, void *data, size_t len);
|
||||
int rpi_firmware_property_list(struct rpi_firmware *fw,
|
||||
void *data, size_t tag_size);
|
||||
void rpi_firmware_put(struct rpi_firmware *fw);
|
||||
unsigned int rpi_firmware_clk_get_max_rate(struct rpi_firmware *fw,
|
||||
unsigned int id);
|
||||
struct device_node *rpi_firmware_find_node(void);
|
||||
struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node);
|
||||
struct rpi_firmware *devm_rpi_firmware_get(struct device *dev,
|
||||
struct device_node *firmware_node);
|
||||
|
@ -159,6 +199,18 @@ static inline int rpi_firmware_property_list(struct rpi_firmware *fw,
|
|||
}
|
||||
|
||||
static inline void rpi_firmware_put(struct rpi_firmware *fw) { }
|
||||
|
||||
static inline unsigned int rpi_firmware_clk_get_max_rate(struct rpi_firmware *fw,
|
||||
unsigned int id)
|
||||
{
|
||||
return UINT_MAX;
|
||||
}
|
||||
|
||||
static inline struct device_node *rpi_firmware_find_node(void)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node)
|
||||
{
|
||||
return NULL;
|
||||
|
|
Loading…
Reference in New Issue