drm-misc-fixes for v6.1-rc2:

- Fix a buffer overflow in format_helper_test.
 - Set DDC pointer in drmm_connector_init.
 - Compiler fixes for panfrost.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEuXvWqAysSYEJGuVH/lWMcqZwE8MFAmNRMiEACgkQ/lWMcqZw
 E8MJExAAhVv/bEcg9Ib1e2gIirtOKB3w0V20rEvkvplzsfKALMH3oXHx0cIbk2sj
 YBICPiuc2x4sDQW9xlQmPa5Gv+6aoqjSOpu+Zpc5MQAb8qnO/vxDCOlYDJwcicjP
 DdxXwJcJ48+x3FFWmrg6gcU8fmH6Rckb2BgBsF3fyzOhIMF2ME6a02S+bYuHNOxK
 sQfo1Tlbibi5pfrxHFBE9V+Wjf3ohQwxQHmcpAC6wwgtGKOLQSxkU7o+wCFUNsn1
 dlWTVe9+2xgNQenTue091PAkFP5R4T3wv654EPudyUYjybQL8ci8aFjaFTQgR3BT
 s4LsViTViTbm5OrSga2hGA+GGH2OGkW70yN+tqXPVxWyPaMA9GuoTh9xKhY3zVsB
 69HlWBzfhAoyixp0rWmilZIGAvKXn8xf+HzOxQ/ihDk6a/VsyLbEdYP72AQTRCi8
 A+h6vBNxP6AVPwDCA//hpCupG75bI8h/Plf1R/W7uzVTKXCSPAGdzROI8dxjsFAX
 Y4OR9kd8yn+bpf6G6D0q+tJV8BqUAzs5AUVkXWU5i1XaAK6fNpqh066yFQt8xCHX
 hFRcr7StYqI9ZliGP1ZEjE8nsiaqPZfDLMqSQlHl392JEfmf5uwskKwVadGHVfuC
 L+iATevGiNsY4JHHk+YBXHMSWrX9XIRNDio+LZQgFttr/KTK4ac=
 =rzib
 -----END PGP SIGNATURE-----

Merge tag 'drm-misc-fixes-2022-10-20' of git://anongit.freedesktop.org/drm/drm-misc into drm-fixes

drm-misc-fixes for v6.1-rc2:
- Fix a buffer overflow in format_helper_test.
- Set DDC pointer in drmm_connector_init.
- Compiler fixes for panfrost.

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/c4d05683-8ebe-93b8-d24c-d1d2c68f12c4@linux.intel.com
This commit is contained in:
Dave Airlie 2022-10-21 09:56:07 +10:00
commit cbc543c59e
7 changed files with 59 additions and 38 deletions

View File

@ -116,8 +116,15 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
DMA_RESV_USAGE_BOOKKEEP);
}
if (fence && !p->immediate)
if (fence && !p->immediate) {
/*
* Most hw generations now have a separate queue for page table
* updates, but when the queue is shared with userspace we need
* the extra CPU round trip to correctly flush the TLB.
*/
set_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &f->flags);
swap(*fence, f);
}
dma_fence_put(f);
return 0;

View File

@ -435,7 +435,7 @@ int drmm_connector_init(struct drm_device *dev,
if (drm_WARN_ON(dev, funcs && funcs->destroy))
return -EINVAL;
ret = __drm_connector_init(dev, connector, funcs, connector_type, NULL);
ret = __drm_connector_init(dev, connector, funcs, connector_type, ddc);
if (ret)
return ret;

View File

@ -63,13 +63,13 @@ static void panfrost_core_dump_header(struct panfrost_dump_iterator *iter,
{
struct panfrost_dump_object_header *hdr = iter->hdr;
hdr->magic = cpu_to_le32(PANFROSTDUMP_MAGIC);
hdr->type = cpu_to_le32(type);
hdr->file_offset = cpu_to_le32(iter->data - iter->start);
hdr->file_size = cpu_to_le32(data_end - iter->data);
hdr->magic = PANFROSTDUMP_MAGIC;
hdr->type = type;
hdr->file_offset = iter->data - iter->start;
hdr->file_size = data_end - iter->data;
iter->hdr++;
iter->data += le32_to_cpu(hdr->file_size);
iter->data += hdr->file_size;
}
static void
@ -93,8 +93,8 @@ panfrost_core_dump_registers(struct panfrost_dump_iterator *iter,
reg = panfrost_dump_registers[i] + js_as_offset;
dumpreg->reg = cpu_to_le32(reg);
dumpreg->value = cpu_to_le32(gpu_read(pfdev, reg));
dumpreg->reg = reg;
dumpreg->value = gpu_read(pfdev, reg);
}
panfrost_core_dump_header(iter, PANFROSTDUMP_BUF_REG, dumpreg);
@ -106,7 +106,7 @@ void panfrost_core_dump(struct panfrost_job *job)
struct panfrost_dump_iterator iter;
struct drm_gem_object *dbo;
unsigned int n_obj, n_bomap_pages;
__le64 *bomap, *bomap_start;
u64 *bomap, *bomap_start;
size_t file_size;
u32 as_nr;
int slot;
@ -177,11 +177,11 @@ void panfrost_core_dump(struct panfrost_job *job)
* For now, we write the job identifier in the register dump header,
* so that we can decode the entire dump later with pandecode
*/
iter.hdr->reghdr.jc = cpu_to_le64(job->jc);
iter.hdr->reghdr.major = cpu_to_le32(PANFROSTDUMP_MAJOR);
iter.hdr->reghdr.minor = cpu_to_le32(PANFROSTDUMP_MINOR);
iter.hdr->reghdr.gpu_id = cpu_to_le32(pfdev->features.id);
iter.hdr->reghdr.nbos = cpu_to_le64(job->bo_count);
iter.hdr->reghdr.jc = job->jc;
iter.hdr->reghdr.major = PANFROSTDUMP_MAJOR;
iter.hdr->reghdr.minor = PANFROSTDUMP_MINOR;
iter.hdr->reghdr.gpu_id = pfdev->features.id;
iter.hdr->reghdr.nbos = job->bo_count;
panfrost_core_dump_registers(&iter, pfdev, as_nr, slot);
@ -218,27 +218,27 @@ void panfrost_core_dump(struct panfrost_job *job)
WARN_ON(!mapping->active);
iter.hdr->bomap.data[0] = cpu_to_le32((bomap - bomap_start));
iter.hdr->bomap.data[0] = bomap - bomap_start;
for_each_sgtable_page(bo->base.sgt, &page_iter, 0) {
struct page *page = sg_page_iter_page(&page_iter);
if (!IS_ERR(page)) {
*bomap++ = cpu_to_le64(page_to_phys(page));
*bomap++ = page_to_phys(page);
} else {
dev_err(pfdev->dev, "Panfrost Dump: wrong page\n");
*bomap++ = ~cpu_to_le64(0);
*bomap++ = 0;
}
}
iter.hdr->bomap.iova = cpu_to_le64(mapping->mmnode.start << PAGE_SHIFT);
iter.hdr->bomap.iova = mapping->mmnode.start << PAGE_SHIFT;
vaddr = map.vaddr;
memcpy(iter.data, vaddr, bo->base.base.size);
drm_gem_shmem_vunmap(&bo->base, &map);
iter.hdr->bomap.valid = cpu_to_le32(1);
iter.hdr->bomap.valid = 1;
dump_header: panfrost_core_dump_header(&iter, PANFROSTDUMP_BUF_BO, iter.data +
bo->base.base.size);

View File

@ -385,7 +385,8 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
}
s_fence = to_drm_sched_fence(fence);
if (s_fence && s_fence->sched == sched) {
if (s_fence && s_fence->sched == sched &&
!test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) {
/*
* Fence is from the same scheduler, only need to wait for

View File

@ -438,7 +438,7 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
iosys_map_set_vaddr(&src, xrgb8888);
drm_fb_xrgb8888_to_xrgb2101010(&dst, &result->dst_pitch, &src, &fb, &params->clip);
buf = le32buf_to_cpu(test, buf, TEST_BUF_SIZE);
buf = le32buf_to_cpu(test, buf, dst_size / sizeof(u32));
KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
}

View File

@ -32,6 +32,15 @@
#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
/**
* DRM_SCHED_FENCE_DONT_PIPELINE - Prefent dependency pipelining
*
* Setting this flag on a scheduler fence prevents pipelining of jobs depending
* on this fence. In other words we always insert a full CPU round trip before
* dependen jobs are pushed to the hw queue.
*/
#define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS
struct drm_gem_object;
struct drm_gpu_scheduler;

View File

@ -235,25 +235,29 @@ struct drm_panfrost_madvise {
#define PANFROSTDUMP_BUF_BO (PANFROSTDUMP_BUF_BOMAP + 1)
#define PANFROSTDUMP_BUF_TRAILER (PANFROSTDUMP_BUF_BO + 1)
/*
* This structure is the native endianness of the dumping machine, tools can
* detect the endianness by looking at the value in 'magic'.
*/
struct panfrost_dump_object_header {
__le32 magic;
__le32 type;
__le32 file_size;
__le32 file_offset;
__u32 magic;
__u32 type;
__u32 file_size;
__u32 file_offset;
union {
struct pan_reg_hdr {
__le64 jc;
__le32 gpu_id;
__le32 major;
__le32 minor;
__le64 nbos;
struct {
__u64 jc;
__u32 gpu_id;
__u32 major;
__u32 minor;
__u64 nbos;
} reghdr;
struct pan_bomap_hdr {
__le32 valid;
__le64 iova;
__le32 data[2];
__u32 valid;
__u64 iova;
__u32 data[2];
} bomap;
/*
@ -261,14 +265,14 @@ struct panfrost_dump_object_header {
* with new fields and also keep it 512-byte aligned
*/
__le32 sizer[496];
__u32 sizer[496];
};
};
/* Registers object, an array of these */
struct panfrost_dump_registers {
__le32 reg;
__le32 value;
__u32 reg;
__u32 value;
};
#if defined(__cplusplus)