2019-02-06 22:01:16 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0+
|
|
|
|
|
2018-08-02 09:10:26 +08:00
|
|
|
#include <linux/crc32.h>
|
2019-06-30 14:19:01 +08:00
|
|
|
|
2018-09-06 13:18:26 +08:00
|
|
|
#include <drm/drm_atomic.h>
|
|
|
|
#include <drm/drm_atomic_helper.h>
|
2018-08-02 09:10:26 +08:00
|
|
|
#include <drm/drm_gem_framebuffer_helper.h>
|
2019-06-30 14:19:01 +08:00
|
|
|
#include <drm/drm_vblank.h>
|
|
|
|
|
|
|
|
#include "vkms_drv.h"
|
2018-08-02 09:10:26 +08:00
|
|
|
|
2018-09-06 13:18:26 +08:00
|
|
|
/**
|
|
|
|
* compute_crc - Compute CRC value on output frame
|
|
|
|
*
|
|
|
|
* @vaddr_out: address to final framebuffer
|
2019-06-26 09:37:05 +08:00
|
|
|
* @composer: framebuffer's metadata
|
2018-09-06 13:18:26 +08:00
|
|
|
*
|
|
|
|
* returns CRC value computed using crc32 on the visible portion of
|
|
|
|
* the final framebuffer at vaddr_out
|
|
|
|
*/
|
2019-06-26 09:37:05 +08:00
|
|
|
static uint32_t compute_crc(void *vaddr_out, struct vkms_composer *composer)
|
2018-09-06 13:18:26 +08:00
|
|
|
{
|
|
|
|
int i, j, src_offset;
|
2019-06-26 09:37:05 +08:00
|
|
|
int x_src = composer->src.x1 >> 16;
|
|
|
|
int y_src = composer->src.y1 >> 16;
|
|
|
|
int h_src = drm_rect_height(&composer->src) >> 16;
|
|
|
|
int w_src = drm_rect_width(&composer->src) >> 16;
|
2018-09-06 13:18:26 +08:00
|
|
|
u32 crc = 0;
|
|
|
|
|
|
|
|
for (i = y_src; i < y_src + h_src; ++i) {
|
|
|
|
for (j = x_src; j < x_src + w_src; ++j) {
|
2019-06-26 09:37:05 +08:00
|
|
|
src_offset = composer->offset
|
|
|
|
+ (i * composer->pitch)
|
|
|
|
+ (j * composer->cpp);
|
2018-09-06 13:18:26 +08:00
|
|
|
crc = crc32_le(crc, vaddr_out + src_offset,
|
|
|
|
sizeof(u32));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return crc;
|
|
|
|
}
|
|
|
|
|
2020-08-25 19:45:32 +08:00
|
|
|
static u8 blend_channel(u8 src, u8 dst, u8 alpha)
|
|
|
|
{
|
|
|
|
u32 pre_blend;
|
|
|
|
u8 new_color;
|
|
|
|
|
|
|
|
pre_blend = (src * 255 + dst * (255 - alpha));
|
|
|
|
|
|
|
|
/* Faster div by 255 */
|
|
|
|
new_color = ((pre_blend + ((pre_blend + 257) >> 8)) >> 8);
|
|
|
|
|
|
|
|
return new_color;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void alpha_blending(const u8 *argb_src, u8 *argb_dst)
|
|
|
|
{
|
|
|
|
u8 alpha;
|
|
|
|
|
|
|
|
alpha = argb_src[3];
|
|
|
|
argb_dst[0] = blend_channel(argb_src[0], argb_dst[0], alpha);
|
|
|
|
argb_dst[1] = blend_channel(argb_src[1], argb_dst[1], alpha);
|
|
|
|
argb_dst[2] = blend_channel(argb_src[2], argb_dst[2], alpha);
|
|
|
|
/* Opaque primary */
|
|
|
|
argb_dst[3] = 0xFF;
|
|
|
|
}
|
|
|
|
|
2018-09-06 13:18:26 +08:00
|
|
|
/**
|
2019-11-05 00:14:24 +08:00
|
|
|
* blend - blend value at vaddr_src with value at vaddr_dst
|
2018-09-06 13:18:26 +08:00
|
|
|
* @vaddr_dst: destination address
|
|
|
|
* @vaddr_src: source address
|
2020-08-25 19:45:32 +08:00
|
|
|
* @dst_composer: destination framebuffer's metadata
|
2019-06-26 09:37:05 +08:00
|
|
|
* @src_composer: source framebuffer's metadata
|
2018-09-06 13:18:26 +08:00
|
|
|
*
|
2020-08-25 19:45:32 +08:00
|
|
|
* Blend the vaddr_src value with the vaddr_dst value using the pre-multiplied
|
|
|
|
* alpha blending equation, since DRM currently assumes that the pixel color
|
|
|
|
* values have already been pre-multiplied with the alpha channel values. See
|
|
|
|
* more drm_plane_create_blend_mode_property(). This function uses buffer's
|
|
|
|
* metadata to locate the new composite values at vaddr_dst.
|
2018-09-06 13:18:26 +08:00
|
|
|
*/
|
|
|
|
static void blend(void *vaddr_dst, void *vaddr_src,
|
2020-08-25 19:45:32 +08:00
|
|
|
struct vkms_composer *dst_composer,
|
2019-06-26 09:37:05 +08:00
|
|
|
struct vkms_composer *src_composer)
|
2018-08-02 09:10:26 +08:00
|
|
|
{
|
2018-09-06 13:18:26 +08:00
|
|
|
int i, j, j_dst, i_dst;
|
|
|
|
int offset_src, offset_dst;
|
2020-08-25 19:45:32 +08:00
|
|
|
u8 *pixel_dst, *pixel_src;
|
2018-09-06 13:18:26 +08:00
|
|
|
|
2019-06-26 09:37:05 +08:00
|
|
|
int x_src = src_composer->src.x1 >> 16;
|
|
|
|
int y_src = src_composer->src.y1 >> 16;
|
2018-09-06 13:18:26 +08:00
|
|
|
|
2019-06-26 09:37:05 +08:00
|
|
|
int x_dst = src_composer->dst.x1;
|
|
|
|
int y_dst = src_composer->dst.y1;
|
|
|
|
int h_dst = drm_rect_height(&src_composer->dst);
|
|
|
|
int w_dst = drm_rect_width(&src_composer->dst);
|
2018-09-06 13:18:26 +08:00
|
|
|
|
|
|
|
int y_limit = y_src + h_dst;
|
|
|
|
int x_limit = x_src + w_dst;
|
|
|
|
|
|
|
|
for (i = y_src, i_dst = y_dst; i < y_limit; ++i) {
|
|
|
|
for (j = x_src, j_dst = x_dst; j < x_limit; ++j) {
|
2020-08-25 19:45:32 +08:00
|
|
|
offset_dst = dst_composer->offset
|
|
|
|
+ (i_dst * dst_composer->pitch)
|
|
|
|
+ (j_dst++ * dst_composer->cpp);
|
2019-06-26 09:37:05 +08:00
|
|
|
offset_src = src_composer->offset
|
|
|
|
+ (i * src_composer->pitch)
|
|
|
|
+ (j * src_composer->cpp);
|
2018-09-06 13:18:26 +08:00
|
|
|
|
2020-08-25 19:45:32 +08:00
|
|
|
pixel_src = (u8 *)(vaddr_src + offset_src);
|
|
|
|
pixel_dst = (u8 *)(vaddr_dst + offset_dst);
|
|
|
|
alpha_blending(pixel_src, pixel_dst);
|
2018-09-06 13:18:26 +08:00
|
|
|
}
|
|
|
|
i_dst++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-26 09:37:05 +08:00
|
|
|
static void compose_cursor(struct vkms_composer *cursor_composer,
|
|
|
|
struct vkms_composer *primary_composer,
|
|
|
|
void *vaddr_out)
|
2018-09-06 13:18:26 +08:00
|
|
|
{
|
|
|
|
struct drm_gem_object *cursor_obj;
|
|
|
|
struct vkms_gem_object *cursor_vkms_obj;
|
|
|
|
|
2019-06-26 09:37:05 +08:00
|
|
|
cursor_obj = drm_gem_fb_get_obj(&cursor_composer->fb, 0);
|
2018-09-06 13:18:26 +08:00
|
|
|
cursor_vkms_obj = drm_gem_to_vkms_gem(cursor_obj);
|
|
|
|
|
2019-06-07 06:27:51 +08:00
|
|
|
if (WARN_ON(!cursor_vkms_obj->vaddr))
|
|
|
|
return;
|
2018-09-06 13:18:26 +08:00
|
|
|
|
2019-06-26 09:37:05 +08:00
|
|
|
blend(vaddr_out, cursor_vkms_obj->vaddr,
|
|
|
|
primary_composer, cursor_composer);
|
2018-09-06 13:18:26 +08:00
|
|
|
}
|
|
|
|
|
2019-06-26 09:37:05 +08:00
|
|
|
static uint32_t _vkms_get_crc(struct vkms_composer *primary_composer,
|
|
|
|
struct vkms_composer *cursor_composer)
|
2018-09-06 13:18:26 +08:00
|
|
|
{
|
2019-06-26 09:37:05 +08:00
|
|
|
struct drm_framebuffer *fb = &primary_composer->fb;
|
2018-08-02 09:10:26 +08:00
|
|
|
struct drm_gem_object *gem_obj = drm_gem_fb_get_obj(fb, 0);
|
|
|
|
struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(gem_obj);
|
2018-09-06 13:18:26 +08:00
|
|
|
void *vaddr_out = kzalloc(vkms_obj->gem.size, GFP_KERNEL);
|
2018-08-02 09:10:26 +08:00
|
|
|
u32 crc = 0;
|
|
|
|
|
2018-09-06 13:18:26 +08:00
|
|
|
if (!vaddr_out) {
|
|
|
|
DRM_ERROR("Failed to allocate memory for output frame.");
|
|
|
|
return 0;
|
|
|
|
}
|
2018-08-02 09:10:26 +08:00
|
|
|
|
2018-09-06 13:18:26 +08:00
|
|
|
if (WARN_ON(!vkms_obj->vaddr)) {
|
2018-09-15 09:53:19 +08:00
|
|
|
kfree(vaddr_out);
|
2018-09-06 13:18:26 +08:00
|
|
|
return crc;
|
2018-08-02 09:10:26 +08:00
|
|
|
}
|
|
|
|
|
2018-09-06 13:18:26 +08:00
|
|
|
memcpy(vaddr_out, vkms_obj->vaddr, vkms_obj->gem.size);
|
|
|
|
|
2019-06-26 09:37:05 +08:00
|
|
|
if (cursor_composer)
|
|
|
|
compose_cursor(cursor_composer, primary_composer, vaddr_out);
|
2018-09-06 13:18:26 +08:00
|
|
|
|
2019-06-26 09:37:05 +08:00
|
|
|
crc = compute_crc(vaddr_out, primary_composer);
|
2018-09-06 13:18:26 +08:00
|
|
|
|
|
|
|
kfree(vaddr_out);
|
|
|
|
|
2018-08-02 09:10:26 +08:00
|
|
|
return crc;
|
|
|
|
}
|
|
|
|
|
2018-09-04 05:18:17 +08:00
|
|
|
/**
|
2019-06-26 09:37:05 +08:00
|
|
|
* vkms_composer_worker - ordered work_struct to compute CRC
|
2018-09-04 05:18:17 +08:00
|
|
|
*
|
|
|
|
* @work: work_struct
|
|
|
|
*
|
2019-06-26 09:37:05 +08:00
|
|
|
* Work handler for composing and computing CRCs. work_struct scheduled in
|
2018-09-04 05:18:17 +08:00
|
|
|
* an ordered workqueue that's periodically scheduled to run by
|
|
|
|
* _vblank_handle() and flushed at vkms_atomic_crtc_destroy_state().
|
|
|
|
*/
|
2019-06-26 09:37:05 +08:00
|
|
|
void vkms_composer_worker(struct work_struct *work)
|
2018-08-02 09:10:26 +08:00
|
|
|
{
|
|
|
|
struct vkms_crtc_state *crtc_state = container_of(work,
|
|
|
|
struct vkms_crtc_state,
|
2019-06-26 09:37:05 +08:00
|
|
|
composer_work);
|
2018-08-02 09:10:26 +08:00
|
|
|
struct drm_crtc *crtc = crtc_state->base.crtc;
|
|
|
|
struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
|
2019-06-26 09:37:05 +08:00
|
|
|
struct vkms_composer *primary_composer = NULL;
|
|
|
|
struct vkms_composer *cursor_composer = NULL;
|
2018-08-02 09:10:26 +08:00
|
|
|
u32 crc32 = 0;
|
2018-09-04 05:18:17 +08:00
|
|
|
u64 frame_start, frame_end;
|
drm/vkms: Fix crc worker races
The issue we have is that the crc worker might fall behind. We've
tried to handle this by tracking both the earliest frame for which it
still needs to compute a crc, and the last one. Plus when the
crtc_state changes, we have a new work item, which are all run in
order due to the ordered workqueue we allocate for each vkms crtc.
Trouble is there's been a few small issues in the current code:
- we need to capture frame_end in the vblank hrtimer, not in the
worker. The worker might run much later, and then we generate a lot
of crc for which there's already a different worker queued up.
- frame number might be 0, so create a new crc_pending boolean to
track this without confusion.
- we need to atomically grab frame_start/end and clear it, so do that
all in one go. This is not going to create a new race, because if we
race with the hrtimer then our work will be re-run.
- only race that can happen is the following:
1. worker starts
2. hrtimer runs and updates frame_end
3. worker grabs frame_start/end, already reading the new frame_end,
and clears crc_pending
4. hrtimer calls queue_work()
5. worker completes
6. worker gets re-run, crc_pending is false
Explain this case a bit better by rewording the comment.
v2: Demote warning level output to debug when we fail to requeue, this
is expected under high load when the crc worker can't quite keep up.
Cc: Shayenne Moura <shayenneluzmoura@gmail.com>
Cc: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Cc: Haneen Mohammed <hamohammed.sa@gmail.com>
Cc: Daniel Vetter <daniel@ffwll.ch>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Reviewed-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Tested-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Signed-off-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190606222751.32567-2-daniel.vetter@ffwll.ch
2019-06-07 06:27:42 +08:00
|
|
|
bool crc_pending;
|
2018-09-04 05:18:17 +08:00
|
|
|
|
2019-06-26 09:37:05 +08:00
|
|
|
spin_lock_irq(&out->composer_lock);
|
2018-09-04 05:18:17 +08:00
|
|
|
frame_start = crtc_state->frame_start;
|
|
|
|
frame_end = crtc_state->frame_end;
|
drm/vkms: Fix crc worker races
The issue we have is that the crc worker might fall behind. We've
tried to handle this by tracking both the earliest frame for which it
still needs to compute a crc, and the last one. Plus when the
crtc_state changes, we have a new work item, which are all run in
order due to the ordered workqueue we allocate for each vkms crtc.
Trouble is there's been a few small issues in the current code:
- we need to capture frame_end in the vblank hrtimer, not in the
worker. The worker might run much later, and then we generate a lot
of crc for which there's already a different worker queued up.
- frame number might be 0, so create a new crc_pending boolean to
track this without confusion.
- we need to atomically grab frame_start/end and clear it, so do that
all in one go. This is not going to create a new race, because if we
race with the hrtimer then our work will be re-run.
- only race that can happen is the following:
1. worker starts
2. hrtimer runs and updates frame_end
3. worker grabs frame_start/end, already reading the new frame_end,
and clears crc_pending
4. hrtimer calls queue_work()
5. worker completes
6. worker gets re-run, crc_pending is false
Explain this case a bit better by rewording the comment.
v2: Demote warning level output to debug when we fail to requeue, this
is expected under high load when the crc worker can't quite keep up.
Cc: Shayenne Moura <shayenneluzmoura@gmail.com>
Cc: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Cc: Haneen Mohammed <hamohammed.sa@gmail.com>
Cc: Daniel Vetter <daniel@ffwll.ch>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Reviewed-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Tested-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Signed-off-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190606222751.32567-2-daniel.vetter@ffwll.ch
2019-06-07 06:27:42 +08:00
|
|
|
crc_pending = crtc_state->crc_pending;
|
|
|
|
crtc_state->frame_start = 0;
|
|
|
|
crtc_state->frame_end = 0;
|
|
|
|
crtc_state->crc_pending = false;
|
2019-06-26 09:37:05 +08:00
|
|
|
spin_unlock_irq(&out->composer_lock);
|
2018-09-04 05:18:17 +08:00
|
|
|
|
drm/vkms: Fix crc worker races
The issue we have is that the crc worker might fall behind. We've
tried to handle this by tracking both the earliest frame for which it
still needs to compute a crc, and the last one. Plus when the
crtc_state changes, we have a new work item, which are all run in
order due to the ordered workqueue we allocate for each vkms crtc.
Trouble is there's been a few small issues in the current code:
- we need to capture frame_end in the vblank hrtimer, not in the
worker. The worker might run much later, and then we generate a lot
of crc for which there's already a different worker queued up.
- frame number might be 0, so create a new crc_pending boolean to
track this without confusion.
- we need to atomically grab frame_start/end and clear it, so do that
all in one go. This is not going to create a new race, because if we
race with the hrtimer then our work will be re-run.
- only race that can happen is the following:
1. worker starts
2. hrtimer runs and updates frame_end
3. worker grabs frame_start/end, already reading the new frame_end,
and clears crc_pending
4. hrtimer calls queue_work()
5. worker completes
6. worker gets re-run, crc_pending is false
Explain this case a bit better by rewording the comment.
v2: Demote warning level output to debug when we fail to requeue, this
is expected under high load when the crc worker can't quite keep up.
Cc: Shayenne Moura <shayenneluzmoura@gmail.com>
Cc: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Cc: Haneen Mohammed <hamohammed.sa@gmail.com>
Cc: Daniel Vetter <daniel@ffwll.ch>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Reviewed-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Tested-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Signed-off-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190606222751.32567-2-daniel.vetter@ffwll.ch
2019-06-07 06:27:42 +08:00
|
|
|
/*
|
|
|
|
* We raced with the vblank hrtimer and previous work already computed
|
|
|
|
* the crc, nothing to do.
|
|
|
|
*/
|
|
|
|
if (!crc_pending)
|
|
|
|
return;
|
2018-08-02 09:10:26 +08:00
|
|
|
|
drm/vkms: totally reworked crc data tracking
The crc computation worker needs to be able to get at some data
structures and framebuffer mappings, while potentially more atomic
updates are going on. The solution thus far is to copy relevant bits
around, but that's very tedious.
Here's a new approach, which tries to be more clever, but relies on a
few not-so-obvious things:
- crtc_state is always updated when a plane_state changes. Therefore
we can just stuff plane_state pointers into a crtc_state. That
solves the problem of easily getting at the needed plane_states.
- with the flushing changes from previous patches the above also holds
without races due to the next atomic update being a bit eager with
cleaning up pending work - we always wait for all crc work items to
complete before unmapping framebuffers.
- we also need to make sure that the hrtimer fires off the right
worker. Keep a new distinct crc_state pointer, under the
vkms_output->lock protection for this. Note that crtc->state is
updated very early in the atomic commit, way before we arm the
vblank event - the vblank event should always match the buffers we
use to compute the crc. This also solves an issue in the hrtimer,
where we've accessed drm_crtc->state without holding the right locks
(we held none - oops).
- in the worker itself we can then just access the plane states we
need, again solving a bunch of ordering and locking issues.
Accessing plane->state requires locks, accessing the private
vkms_crtc_state->active_planes pointer only requires that the memory
doesn't get freed too early.
The idea behind vkms_crtc_state->active_planes is that this would
contain all visible planes, in z-order, as a first step towards a more
generic blending implementation.
Note that this patch also fixes races between prepare_fb/cleanup_fb
and the crc worker accessing ->vaddr.
Cc: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Cc: Haneen Mohammed <hamohammed.sa@gmail.com>
Cc: Daniel Vetter <daniel@ffwll.ch>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Reviewed-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Tested-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Signed-off-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190606222751.32567-10-daniel.vetter@ffwll.ch
2019-06-07 06:27:50 +08:00
|
|
|
if (crtc_state->num_active_planes >= 1)
|
2019-06-26 09:37:05 +08:00
|
|
|
primary_composer = crtc_state->active_planes[0]->composer;
|
2018-08-02 09:10:26 +08:00
|
|
|
|
drm/vkms: totally reworked crc data tracking
The crc computation worker needs to be able to get at some data
structures and framebuffer mappings, while potentially more atomic
updates are going on. The solution thus far is to copy relevant bits
around, but that's very tedious.
Here's a new approach, which tries to be more clever, but relies on a
few not-so-obvious things:
- crtc_state is always updated when a plane_state changes. Therefore
we can just stuff plane_state pointers into a crtc_state. That
solves the problem of easily getting at the needed plane_states.
- with the flushing changes from previous patches the above also holds
without races due to the next atomic update being a bit eager with
cleaning up pending work - we always wait for all crc work items to
complete before unmapping framebuffers.
- we also need to make sure that the hrtimer fires off the right
worker. Keep a new distinct crc_state pointer, under the
vkms_output->lock protection for this. Note that crtc->state is
updated very early in the atomic commit, way before we arm the
vblank event - the vblank event should always match the buffers we
use to compute the crc. This also solves an issue in the hrtimer,
where we've accessed drm_crtc->state without holding the right locks
(we held none - oops).
- in the worker itself we can then just access the plane states we
need, again solving a bunch of ordering and locking issues.
Accessing plane->state requires locks, accessing the private
vkms_crtc_state->active_planes pointer only requires that the memory
doesn't get freed too early.
The idea behind vkms_crtc_state->active_planes is that this would
contain all visible planes, in z-order, as a first step towards a more
generic blending implementation.
Note that this patch also fixes races between prepare_fb/cleanup_fb
and the crc worker accessing ->vaddr.
Cc: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Cc: Haneen Mohammed <hamohammed.sa@gmail.com>
Cc: Daniel Vetter <daniel@ffwll.ch>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Reviewed-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Tested-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Signed-off-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190606222751.32567-10-daniel.vetter@ffwll.ch
2019-06-07 06:27:50 +08:00
|
|
|
if (crtc_state->num_active_planes == 2)
|
2019-06-26 09:37:05 +08:00
|
|
|
cursor_composer = crtc_state->active_planes[1]->composer;
|
2018-08-02 09:10:26 +08:00
|
|
|
|
2019-06-26 09:37:05 +08:00
|
|
|
if (primary_composer)
|
|
|
|
crc32 = _vkms_get_crc(primary_composer, cursor_composer);
|
2018-08-02 09:10:26 +08:00
|
|
|
|
drm/vkms: Fix crc worker races
The issue we have is that the crc worker might fall behind. We've
tried to handle this by tracking both the earliest frame for which it
still needs to compute a crc, and the last one. Plus when the
crtc_state changes, we have a new work item, which are all run in
order due to the ordered workqueue we allocate for each vkms crtc.
Trouble is there's been a few small issues in the current code:
- we need to capture frame_end in the vblank hrtimer, not in the
worker. The worker might run much later, and then we generate a lot
of crc for which there's already a different worker queued up.
- frame number might be 0, so create a new crc_pending boolean to
track this without confusion.
- we need to atomically grab frame_start/end and clear it, so do that
all in one go. This is not going to create a new race, because if we
race with the hrtimer then our work will be re-run.
- only race that can happen is the following:
1. worker starts
2. hrtimer runs and updates frame_end
3. worker grabs frame_start/end, already reading the new frame_end,
and clears crc_pending
4. hrtimer calls queue_work()
5. worker completes
6. worker gets re-run, crc_pending is false
Explain this case a bit better by rewording the comment.
v2: Demote warning level output to debug when we fail to requeue, this
is expected under high load when the crc worker can't quite keep up.
Cc: Shayenne Moura <shayenneluzmoura@gmail.com>
Cc: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Cc: Haneen Mohammed <hamohammed.sa@gmail.com>
Cc: Daniel Vetter <daniel@ffwll.ch>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Reviewed-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Tested-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Signed-off-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190606222751.32567-2-daniel.vetter@ffwll.ch
2019-06-07 06:27:42 +08:00
|
|
|
/*
|
|
|
|
* The worker can fall behind the vblank hrtimer, make sure we catch up.
|
2018-09-04 05:18:17 +08:00
|
|
|
*/
|
|
|
|
while (frame_start <= frame_end)
|
|
|
|
drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32);
|
2018-08-02 09:10:26 +08:00
|
|
|
}
|
|
|
|
|
2019-06-13 20:18:02 +08:00
|
|
|
static const char * const pipe_crc_sources[] = {"auto"};
|
|
|
|
|
|
|
|
const char *const *vkms_get_crc_sources(struct drm_crtc *crtc,
|
|
|
|
size_t *count)
|
|
|
|
{
|
|
|
|
*count = ARRAY_SIZE(pipe_crc_sources);
|
|
|
|
return pipe_crc_sources;
|
|
|
|
}
|
|
|
|
|
2018-08-21 16:38:55 +08:00
|
|
|
static int vkms_crc_parse_source(const char *src_name, bool *enabled)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (!src_name) {
|
|
|
|
*enabled = false;
|
|
|
|
} else if (strcmp(src_name, "auto") == 0) {
|
|
|
|
*enabled = true;
|
|
|
|
} else {
|
|
|
|
*enabled = false;
|
|
|
|
ret = -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int vkms_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
|
|
|
|
size_t *values_cnt)
|
|
|
|
{
|
|
|
|
bool enabled;
|
|
|
|
|
|
|
|
if (vkms_crc_parse_source(src_name, &enabled) < 0) {
|
|
|
|
DRM_DEBUG_DRIVER("unknown source %s\n", src_name);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
*values_cnt = 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
drm/vkms: guarantee vblank when capturing crc
VKMS needs vblank interrupts enabled to capture CRC. When vblank is
disabled, tests like kms_cursor_crc and kms_pipe_crc_basic getting stuck
waiting for a capture that will not occur until vkms wakes up. This patch
adds a helper to set composer and ensure that vblank remains enabled as
long as the CRC capture is needed.
It clears the execution of the following kms_cursor_crc subtests:
1. pipe-A-cursor-[size,alpha-opaque, NxN-(on-screen, off-screen, sliding,
random, fast-moving])] - successful when running individually.
2. pipe-A-cursor-dpms passes again
3. pipe-A-cursor-suspend also passes
The issue was initially tracked in the sequential execution of IGT
kms_cursor_crc subtests: when running the test sequence or one of its
subtests twice, the odd execs complete and the pairs get stuck in an
endless wait. In the IGT code, calling a wait_for_vblank on preparing for
CRC capture prevented the busy-wait. But the problem persisted in the
pipe-A-cursor-dpms and -suspend subtests.
Checking the history, the pipe-A-cursor-dpms subtest was successful when,
in vkms_atomic_commit_tail, instead of using the flip_done op, it used
wait_for_vblanks. Another way to prevent blocking was wait_one_vblank when
enabling crtc. However, in both cases, pipe-A-cursor-suspend persisted
blocking in the 2nd start of CRC capture, which may indicate that
something got stuck in the step of CRC setup. Indeed, wait_one_vblank in
the crc setup was able to sync things and free all kms_cursor_crc
subtests. Besides, other alternatives to force enabling vblanks or prevent
disabling them such as calling drm_crtc_put_vblank or modeset_enables
before commit_planes + offdelay = 0, also unlock all subtests executions.
Finally, due to vkms's dependence on vblank interruptions to perform
tasks, this patch uses refcount to ensure that vblanks happen when
enabling composer and while crc capture is needed.
Cc: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Cc: Haneen Mohammed <hamohammed.sa@gmail.com>
v2:
- extract a vkms_set_composer helper
- fix vblank refcounting for the disabling case
v3:
- make the vkms_set_composer helper static
- review the credit tags
Co-debugged-by: Sidong Yang <realwakka@gmail.com>
Signed-off-by: Sidong Yang <realwakka@gmail.com>
Signed-off-by: Melissa Wen <melissa.srw@gmail.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
[danvet: add changelog back in]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20200808120900.pudwwrfz44g3rqx7@smtp.gmail.com
2020-08-08 20:09:00 +08:00
|
|
|
static void vkms_set_composer(struct vkms_output *out, bool enabled)
|
|
|
|
{
|
|
|
|
bool old_enabled;
|
|
|
|
|
|
|
|
if (enabled)
|
|
|
|
drm_crtc_vblank_get(&out->crtc);
|
|
|
|
|
|
|
|
spin_lock_irq(&out->lock);
|
|
|
|
old_enabled = out->composer_enabled;
|
|
|
|
out->composer_enabled = enabled;
|
|
|
|
spin_unlock_irq(&out->lock);
|
|
|
|
|
|
|
|
if (old_enabled)
|
|
|
|
drm_crtc_vblank_put(&out->crtc);
|
|
|
|
}
|
|
|
|
|
2018-08-21 16:38:56 +08:00
|
|
|
int vkms_set_crc_source(struct drm_crtc *crtc, const char *src_name)
|
2018-08-02 09:10:26 +08:00
|
|
|
{
|
|
|
|
struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
|
|
|
|
bool enabled = false;
|
|
|
|
int ret = 0;
|
|
|
|
|
2018-08-21 16:38:55 +08:00
|
|
|
ret = vkms_crc_parse_source(src_name, &enabled);
|
2018-08-02 09:10:26 +08:00
|
|
|
|
drm/vkms: guarantee vblank when capturing crc
VKMS needs vblank interrupts enabled to capture CRC. When vblank is
disabled, tests like kms_cursor_crc and kms_pipe_crc_basic getting stuck
waiting for a capture that will not occur until vkms wakes up. This patch
adds a helper to set composer and ensure that vblank remains enabled as
long as the CRC capture is needed.
It clears the execution of the following kms_cursor_crc subtests:
1. pipe-A-cursor-[size,alpha-opaque, NxN-(on-screen, off-screen, sliding,
random, fast-moving])] - successful when running individually.
2. pipe-A-cursor-dpms passes again
3. pipe-A-cursor-suspend also passes
The issue was initially tracked in the sequential execution of IGT
kms_cursor_crc subtests: when running the test sequence or one of its
subtests twice, the odd execs complete and the pairs get stuck in an
endless wait. In the IGT code, calling a wait_for_vblank on preparing for
CRC capture prevented the busy-wait. But the problem persisted in the
pipe-A-cursor-dpms and -suspend subtests.
Checking the history, the pipe-A-cursor-dpms subtest was successful when,
in vkms_atomic_commit_tail, instead of using the flip_done op, it used
wait_for_vblanks. Another way to prevent blocking was wait_one_vblank when
enabling crtc. However, in both cases, pipe-A-cursor-suspend persisted
blocking in the 2nd start of CRC capture, which may indicate that
something got stuck in the step of CRC setup. Indeed, wait_one_vblank in
the crc setup was able to sync things and free all kms_cursor_crc
subtests. Besides, other alternatives to force enabling vblanks or prevent
disabling them such as calling drm_crtc_put_vblank or modeset_enables
before commit_planes + offdelay = 0, also unlock all subtests executions.
Finally, due to vkms's dependence on vblank interruptions to perform
tasks, this patch uses refcount to ensure that vblanks happen when
enabling composer and while crc capture is needed.
Cc: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Cc: Haneen Mohammed <hamohammed.sa@gmail.com>
v2:
- extract a vkms_set_composer helper
- fix vblank refcounting for the disabling case
v3:
- make the vkms_set_composer helper static
- review the credit tags
Co-debugged-by: Sidong Yang <realwakka@gmail.com>
Signed-off-by: Sidong Yang <realwakka@gmail.com>
Signed-off-by: Melissa Wen <melissa.srw@gmail.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
[danvet: add changelog back in]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20200808120900.pudwwrfz44g3rqx7@smtp.gmail.com
2020-08-08 20:09:00 +08:00
|
|
|
vkms_set_composer(out, enabled);
|
2018-08-02 09:10:26 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|