2019-02-06 22:01:16 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0+
|
|
|
|
|
2018-08-02 09:10:26 +08:00
|
|
|
#include <linux/crc32.h>
|
2019-06-30 14:19:01 +08:00
|
|
|
|
2018-09-06 13:18:26 +08:00
|
|
|
#include <drm/drm_atomic.h>
|
|
|
|
#include <drm/drm_atomic_helper.h>
|
2018-08-02 09:10:26 +08:00
|
|
|
#include <drm/drm_gem_framebuffer_helper.h>
|
2019-06-30 14:19:01 +08:00
|
|
|
#include <drm/drm_vblank.h>
|
|
|
|
|
|
|
|
#include "vkms_drv.h"
|
2018-08-02 09:10:26 +08:00
|
|
|
|
2018-09-06 13:18:26 +08:00
|
|
|
/**
|
|
|
|
* compute_crc - Compute CRC value on output frame
|
|
|
|
*
|
|
|
|
* @vaddr_out: address to final framebuffer
|
2019-06-26 09:37:05 +08:00
|
|
|
* @composer: framebuffer's metadata
|
2018-09-06 13:18:26 +08:00
|
|
|
*
|
|
|
|
* returns CRC value computed using crc32 on the visible portion of
|
|
|
|
* the final framebuffer at vaddr_out
|
|
|
|
*/
|
2019-06-26 09:37:05 +08:00
|
|
|
static uint32_t compute_crc(void *vaddr_out, struct vkms_composer *composer)
|
2018-09-06 13:18:26 +08:00
|
|
|
{
|
|
|
|
int i, j, src_offset;
|
2019-06-26 09:37:05 +08:00
|
|
|
int x_src = composer->src.x1 >> 16;
|
|
|
|
int y_src = composer->src.y1 >> 16;
|
|
|
|
int h_src = drm_rect_height(&composer->src) >> 16;
|
|
|
|
int w_src = drm_rect_width(&composer->src) >> 16;
|
2018-09-06 13:18:26 +08:00
|
|
|
u32 crc = 0;
|
|
|
|
|
|
|
|
for (i = y_src; i < y_src + h_src; ++i) {
|
|
|
|
for (j = x_src; j < x_src + w_src; ++j) {
|
2019-06-26 09:37:05 +08:00
|
|
|
src_offset = composer->offset
|
|
|
|
+ (i * composer->pitch)
|
|
|
|
+ (j * composer->cpp);
|
2018-09-06 13:18:26 +08:00
|
|
|
/* XRGB format ignores Alpha channel */
|
|
|
|
memset(vaddr_out + src_offset + 24, 0, 8);
|
|
|
|
crc = crc32_le(crc, vaddr_out + src_offset,
|
|
|
|
sizeof(u32));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return crc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* blend - belnd value at vaddr_src with value at vaddr_dst
|
|
|
|
* @vaddr_dst: destination address
|
|
|
|
* @vaddr_src: source address
|
2019-06-26 09:37:05 +08:00
|
|
|
* @dest_composer: destination framebuffer's metadata
|
|
|
|
* @src_composer: source framebuffer's metadata
|
2018-09-06 13:18:26 +08:00
|
|
|
*
|
|
|
|
* Blend value at vaddr_src with value at vaddr_dst.
|
|
|
|
* Currently, this function write value at vaddr_src on value
|
|
|
|
* at vaddr_dst using buffer's metadata to locate the new values
|
|
|
|
* from vaddr_src and their distenation at vaddr_dst.
|
|
|
|
*
|
|
|
|
* Todo: Use the alpha value to blend vaddr_src with vaddr_dst
|
|
|
|
* instead of overwriting it.
|
|
|
|
*/
|
|
|
|
static void blend(void *vaddr_dst, void *vaddr_src,
|
2019-06-26 09:37:05 +08:00
|
|
|
struct vkms_composer *dest_composer,
|
|
|
|
struct vkms_composer *src_composer)
|
2018-08-02 09:10:26 +08:00
|
|
|
{
|
2018-09-06 13:18:26 +08:00
|
|
|
int i, j, j_dst, i_dst;
|
|
|
|
int offset_src, offset_dst;
|
|
|
|
|
2019-06-26 09:37:05 +08:00
|
|
|
int x_src = src_composer->src.x1 >> 16;
|
|
|
|
int y_src = src_composer->src.y1 >> 16;
|
2018-09-06 13:18:26 +08:00
|
|
|
|
2019-06-26 09:37:05 +08:00
|
|
|
int x_dst = src_composer->dst.x1;
|
|
|
|
int y_dst = src_composer->dst.y1;
|
|
|
|
int h_dst = drm_rect_height(&src_composer->dst);
|
|
|
|
int w_dst = drm_rect_width(&src_composer->dst);
|
2018-09-06 13:18:26 +08:00
|
|
|
|
|
|
|
int y_limit = y_src + h_dst;
|
|
|
|
int x_limit = x_src + w_dst;
|
|
|
|
|
|
|
|
for (i = y_src, i_dst = y_dst; i < y_limit; ++i) {
|
|
|
|
for (j = x_src, j_dst = x_dst; j < x_limit; ++j) {
|
2019-06-26 09:37:05 +08:00
|
|
|
offset_dst = dest_composer->offset
|
|
|
|
+ (i_dst * dest_composer->pitch)
|
|
|
|
+ (j_dst++ * dest_composer->cpp);
|
|
|
|
offset_src = src_composer->offset
|
|
|
|
+ (i * src_composer->pitch)
|
|
|
|
+ (j * src_composer->cpp);
|
2018-09-06 13:18:26 +08:00
|
|
|
|
|
|
|
memcpy(vaddr_dst + offset_dst,
|
|
|
|
vaddr_src + offset_src, sizeof(u32));
|
|
|
|
}
|
|
|
|
i_dst++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-26 09:37:05 +08:00
|
|
|
static void compose_cursor(struct vkms_composer *cursor_composer,
|
|
|
|
struct vkms_composer *primary_composer,
|
|
|
|
void *vaddr_out)
|
2018-09-06 13:18:26 +08:00
|
|
|
{
|
|
|
|
struct drm_gem_object *cursor_obj;
|
|
|
|
struct vkms_gem_object *cursor_vkms_obj;
|
|
|
|
|
2019-06-26 09:37:05 +08:00
|
|
|
cursor_obj = drm_gem_fb_get_obj(&cursor_composer->fb, 0);
|
2018-09-06 13:18:26 +08:00
|
|
|
cursor_vkms_obj = drm_gem_to_vkms_gem(cursor_obj);
|
|
|
|
|
2019-06-07 06:27:51 +08:00
|
|
|
if (WARN_ON(!cursor_vkms_obj->vaddr))
|
|
|
|
return;
|
2018-09-06 13:18:26 +08:00
|
|
|
|
2019-06-26 09:37:05 +08:00
|
|
|
blend(vaddr_out, cursor_vkms_obj->vaddr,
|
|
|
|
primary_composer, cursor_composer);
|
2018-09-06 13:18:26 +08:00
|
|
|
}
|
|
|
|
|
2019-06-26 09:37:05 +08:00
|
|
|
static uint32_t _vkms_get_crc(struct vkms_composer *primary_composer,
|
|
|
|
struct vkms_composer *cursor_composer)
|
2018-09-06 13:18:26 +08:00
|
|
|
{
|
2019-06-26 09:37:05 +08:00
|
|
|
struct drm_framebuffer *fb = &primary_composer->fb;
|
2018-08-02 09:10:26 +08:00
|
|
|
struct drm_gem_object *gem_obj = drm_gem_fb_get_obj(fb, 0);
|
|
|
|
struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(gem_obj);
|
2018-09-06 13:18:26 +08:00
|
|
|
void *vaddr_out = kzalloc(vkms_obj->gem.size, GFP_KERNEL);
|
2018-08-02 09:10:26 +08:00
|
|
|
u32 crc = 0;
|
|
|
|
|
2018-09-06 13:18:26 +08:00
|
|
|
if (!vaddr_out) {
|
|
|
|
DRM_ERROR("Failed to allocate memory for output frame.");
|
|
|
|
return 0;
|
|
|
|
}
|
2018-08-02 09:10:26 +08:00
|
|
|
|
2018-09-06 13:18:26 +08:00
|
|
|
if (WARN_ON(!vkms_obj->vaddr)) {
|
2018-09-15 09:53:19 +08:00
|
|
|
kfree(vaddr_out);
|
2018-09-06 13:18:26 +08:00
|
|
|
return crc;
|
2018-08-02 09:10:26 +08:00
|
|
|
}
|
|
|
|
|
2018-09-06 13:18:26 +08:00
|
|
|
memcpy(vaddr_out, vkms_obj->vaddr, vkms_obj->gem.size);
|
|
|
|
|
2019-06-26 09:37:05 +08:00
|
|
|
if (cursor_composer)
|
|
|
|
compose_cursor(cursor_composer, primary_composer, vaddr_out);
|
2018-09-06 13:18:26 +08:00
|
|
|
|
2019-06-26 09:37:05 +08:00
|
|
|
crc = compute_crc(vaddr_out, primary_composer);
|
2018-09-06 13:18:26 +08:00
|
|
|
|
|
|
|
kfree(vaddr_out);
|
|
|
|
|
2018-08-02 09:10:26 +08:00
|
|
|
return crc;
|
|
|
|
}
|
|
|
|
|
2018-09-04 05:18:17 +08:00
|
|
|
/**
|
2019-06-26 09:37:05 +08:00
|
|
|
* vkms_composer_worker - ordered work_struct to compute CRC
|
2018-09-04 05:18:17 +08:00
|
|
|
*
|
|
|
|
* @work: work_struct
|
|
|
|
*
|
2019-06-26 09:37:05 +08:00
|
|
|
* Work handler for composing and computing CRCs. work_struct scheduled in
|
2018-09-04 05:18:17 +08:00
|
|
|
* an ordered workqueue that's periodically scheduled to run by
|
|
|
|
* _vblank_handle() and flushed at vkms_atomic_crtc_destroy_state().
|
|
|
|
*/
|
2019-06-26 09:37:05 +08:00
|
|
|
void vkms_composer_worker(struct work_struct *work)
|
2018-08-02 09:10:26 +08:00
|
|
|
{
|
|
|
|
struct vkms_crtc_state *crtc_state = container_of(work,
|
|
|
|
struct vkms_crtc_state,
|
2019-06-26 09:37:05 +08:00
|
|
|
composer_work);
|
2018-08-02 09:10:26 +08:00
|
|
|
struct drm_crtc *crtc = crtc_state->base.crtc;
|
|
|
|
struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
|
2019-06-26 09:37:05 +08:00
|
|
|
struct vkms_composer *primary_composer = NULL;
|
|
|
|
struct vkms_composer *cursor_composer = NULL;
|
2018-08-02 09:10:26 +08:00
|
|
|
u32 crc32 = 0;
|
2018-09-04 05:18:17 +08:00
|
|
|
u64 frame_start, frame_end;
|
drm/vkms: Fix crc worker races
The issue we have is that the crc worker might fall behind. We've
tried to handle this by tracking both the earliest frame for which it
still needs to compute a crc, and the last one. Plus when the
crtc_state changes, we have a new work item, which are all run in
order due to the ordered workqueue we allocate for each vkms crtc.
Trouble is there's been a few small issues in the current code:
- we need to capture frame_end in the vblank hrtimer, not in the
worker. The worker might run much later, and then we generate a lot
of crc for which there's already a different worker queued up.
- frame number might be 0, so create a new crc_pending boolean to
track this without confusion.
- we need to atomically grab frame_start/end and clear it, so do that
all in one go. This is not going to create a new race, because if we
race with the hrtimer then our work will be re-run.
- only race that can happen is the following:
1. worker starts
2. hrtimer runs and updates frame_end
3. worker grabs frame_start/end, already reading the new frame_end,
and clears crc_pending
4. hrtimer calls queue_work()
5. worker completes
6. worker gets re-run, crc_pending is false
Explain this case a bit better by rewording the comment.
v2: Demote warning level output to debug when we fail to requeue, this
is expected under high load when the crc worker can't quite keep up.
Cc: Shayenne Moura <shayenneluzmoura@gmail.com>
Cc: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Cc: Haneen Mohammed <hamohammed.sa@gmail.com>
Cc: Daniel Vetter <daniel@ffwll.ch>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Reviewed-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Tested-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Signed-off-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190606222751.32567-2-daniel.vetter@ffwll.ch
2019-06-07 06:27:42 +08:00
|
|
|
bool crc_pending;
|
2018-09-04 05:18:17 +08:00
|
|
|
|
2019-06-26 09:37:05 +08:00
|
|
|
spin_lock_irq(&out->composer_lock);
|
2018-09-04 05:18:17 +08:00
|
|
|
frame_start = crtc_state->frame_start;
|
|
|
|
frame_end = crtc_state->frame_end;
|
drm/vkms: Fix crc worker races
The issue we have is that the crc worker might fall behind. We've
tried to handle this by tracking both the earliest frame for which it
still needs to compute a crc, and the last one. Plus when the
crtc_state changes, we have a new work item, which are all run in
order due to the ordered workqueue we allocate for each vkms crtc.
Trouble is there's been a few small issues in the current code:
- we need to capture frame_end in the vblank hrtimer, not in the
worker. The worker might run much later, and then we generate a lot
of crc for which there's already a different worker queued up.
- frame number might be 0, so create a new crc_pending boolean to
track this without confusion.
- we need to atomically grab frame_start/end and clear it, so do that
all in one go. This is not going to create a new race, because if we
race with the hrtimer then our work will be re-run.
- only race that can happen is the following:
1. worker starts
2. hrtimer runs and updates frame_end
3. worker grabs frame_start/end, already reading the new frame_end,
and clears crc_pending
4. hrtimer calls queue_work()
5. worker completes
6. worker gets re-run, crc_pending is false
Explain this case a bit better by rewording the comment.
v2: Demote warning level output to debug when we fail to requeue, this
is expected under high load when the crc worker can't quite keep up.
Cc: Shayenne Moura <shayenneluzmoura@gmail.com>
Cc: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Cc: Haneen Mohammed <hamohammed.sa@gmail.com>
Cc: Daniel Vetter <daniel@ffwll.ch>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Reviewed-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Tested-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Signed-off-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190606222751.32567-2-daniel.vetter@ffwll.ch
2019-06-07 06:27:42 +08:00
|
|
|
crc_pending = crtc_state->crc_pending;
|
|
|
|
crtc_state->frame_start = 0;
|
|
|
|
crtc_state->frame_end = 0;
|
|
|
|
crtc_state->crc_pending = false;
|
2019-06-26 09:37:05 +08:00
|
|
|
spin_unlock_irq(&out->composer_lock);
|
2018-09-04 05:18:17 +08:00
|
|
|
|
drm/vkms: Fix crc worker races
The issue we have is that the crc worker might fall behind. We've
tried to handle this by tracking both the earliest frame for which it
still needs to compute a crc, and the last one. Plus when the
crtc_state changes, we have a new work item, which are all run in
order due to the ordered workqueue we allocate for each vkms crtc.
Trouble is there's been a few small issues in the current code:
- we need to capture frame_end in the vblank hrtimer, not in the
worker. The worker might run much later, and then we generate a lot
of crc for which there's already a different worker queued up.
- frame number might be 0, so create a new crc_pending boolean to
track this without confusion.
- we need to atomically grab frame_start/end and clear it, so do that
all in one go. This is not going to create a new race, because if we
race with the hrtimer then our work will be re-run.
- only race that can happen is the following:
1. worker starts
2. hrtimer runs and updates frame_end
3. worker grabs frame_start/end, already reading the new frame_end,
and clears crc_pending
4. hrtimer calls queue_work()
5. worker completes
6. worker gets re-run, crc_pending is false
Explain this case a bit better by rewording the comment.
v2: Demote warning level output to debug when we fail to requeue, this
is expected under high load when the crc worker can't quite keep up.
Cc: Shayenne Moura <shayenneluzmoura@gmail.com>
Cc: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Cc: Haneen Mohammed <hamohammed.sa@gmail.com>
Cc: Daniel Vetter <daniel@ffwll.ch>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Reviewed-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Tested-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Signed-off-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190606222751.32567-2-daniel.vetter@ffwll.ch
2019-06-07 06:27:42 +08:00
|
|
|
/*
|
|
|
|
* We raced with the vblank hrtimer and previous work already computed
|
|
|
|
* the crc, nothing to do.
|
|
|
|
*/
|
|
|
|
if (!crc_pending)
|
|
|
|
return;
|
2018-08-02 09:10:26 +08:00
|
|
|
|
drm/vkms: totally reworked crc data tracking
The crc computation worker needs to be able to get at some data
structures and framebuffer mappings, while potentially more atomic
updates are going on. The solution thus far is to copy relevant bits
around, but that's very tedious.
Here's a new approach, which tries to be more clever, but relies on a
few not-so-obvious things:
- crtc_state is always updated when a plane_state changes. Therefore
we can just stuff plane_state pointers into a crtc_state. That
solves the problem of easily getting at the needed plane_states.
- with the flushing changes from previous patches the above also holds
without races due to the next atomic update being a bit eager with
cleaning up pending work - we always wait for all crc work items to
complete before unmapping framebuffers.
- we also need to make sure that the hrtimer fires off the right
worker. Keep a new distinct crc_state pointer, under the
vkms_output->lock protection for this. Note that crtc->state is
updated very early in the atomic commit, way before we arm the
vblank event - the vblank event should always match the buffers we
use to compute the crc. This also solves an issue in the hrtimer,
where we've accessed drm_crtc->state without holding the right locks
(we held none - oops).
- in the worker itself we can then just access the plane states we
need, again solving a bunch of ordering and locking issues.
Accessing plane->state requires locks, accessing the private
vkms_crtc_state->active_planes pointer only requires that the memory
doesn't get freed too early.
The idea behind vkms_crtc_state->active_planes is that this would
contain all visible planes, in z-order, as a first step towards a more
generic blending implementation.
Note that this patch also fixes races between prepare_fb/cleanup_fb
and the crc worker accessing ->vaddr.
Cc: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Cc: Haneen Mohammed <hamohammed.sa@gmail.com>
Cc: Daniel Vetter <daniel@ffwll.ch>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Reviewed-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Tested-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Signed-off-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190606222751.32567-10-daniel.vetter@ffwll.ch
2019-06-07 06:27:50 +08:00
|
|
|
if (crtc_state->num_active_planes >= 1)
|
2019-06-26 09:37:05 +08:00
|
|
|
primary_composer = crtc_state->active_planes[0]->composer;
|
2018-08-02 09:10:26 +08:00
|
|
|
|
drm/vkms: totally reworked crc data tracking
The crc computation worker needs to be able to get at some data
structures and framebuffer mappings, while potentially more atomic
updates are going on. The solution thus far is to copy relevant bits
around, but that's very tedious.
Here's a new approach, which tries to be more clever, but relies on a
few not-so-obvious things:
- crtc_state is always updated when a plane_state changes. Therefore
we can just stuff plane_state pointers into a crtc_state. That
solves the problem of easily getting at the needed plane_states.
- with the flushing changes from previous patches the above also holds
without races due to the next atomic update being a bit eager with
cleaning up pending work - we always wait for all crc work items to
complete before unmapping framebuffers.
- we also need to make sure that the hrtimer fires off the right
worker. Keep a new distinct crc_state pointer, under the
vkms_output->lock protection for this. Note that crtc->state is
updated very early in the atomic commit, way before we arm the
vblank event - the vblank event should always match the buffers we
use to compute the crc. This also solves an issue in the hrtimer,
where we've accessed drm_crtc->state without holding the right locks
(we held none - oops).
- in the worker itself we can then just access the plane states we
need, again solving a bunch of ordering and locking issues.
Accessing plane->state requires locks, accessing the private
vkms_crtc_state->active_planes pointer only requires that the memory
doesn't get freed too early.
The idea behind vkms_crtc_state->active_planes is that this would
contain all visible planes, in z-order, as a first step towards a more
generic blending implementation.
Note that this patch also fixes races between prepare_fb/cleanup_fb
and the crc worker accessing ->vaddr.
Cc: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Cc: Haneen Mohammed <hamohammed.sa@gmail.com>
Cc: Daniel Vetter <daniel@ffwll.ch>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Reviewed-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Tested-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Signed-off-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190606222751.32567-10-daniel.vetter@ffwll.ch
2019-06-07 06:27:50 +08:00
|
|
|
if (crtc_state->num_active_planes == 2)
|
2019-06-26 09:37:05 +08:00
|
|
|
cursor_composer = crtc_state->active_planes[1]->composer;
|
2018-08-02 09:10:26 +08:00
|
|
|
|
2019-06-26 09:37:05 +08:00
|
|
|
if (primary_composer)
|
|
|
|
crc32 = _vkms_get_crc(primary_composer, cursor_composer);
|
2018-08-02 09:10:26 +08:00
|
|
|
|
drm/vkms: Fix crc worker races
The issue we have is that the crc worker might fall behind. We've
tried to handle this by tracking both the earliest frame for which it
still needs to compute a crc, and the last one. Plus when the
crtc_state changes, we have a new work item, which are all run in
order due to the ordered workqueue we allocate for each vkms crtc.
Trouble is there's been a few small issues in the current code:
- we need to capture frame_end in the vblank hrtimer, not in the
worker. The worker might run much later, and then we generate a lot
of crc for which there's already a different worker queued up.
- frame number might be 0, so create a new crc_pending boolean to
track this without confusion.
- we need to atomically grab frame_start/end and clear it, so do that
all in one go. This is not going to create a new race, because if we
race with the hrtimer then our work will be re-run.
- only race that can happen is the following:
1. worker starts
2. hrtimer runs and updates frame_end
3. worker grabs frame_start/end, already reading the new frame_end,
and clears crc_pending
4. hrtimer calls queue_work()
5. worker completes
6. worker gets re-run, crc_pending is false
Explain this case a bit better by rewording the comment.
v2: Demote warning level output to debug when we fail to requeue, this
is expected under high load when the crc worker can't quite keep up.
Cc: Shayenne Moura <shayenneluzmoura@gmail.com>
Cc: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Cc: Haneen Mohammed <hamohammed.sa@gmail.com>
Cc: Daniel Vetter <daniel@ffwll.ch>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Reviewed-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Tested-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Signed-off-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190606222751.32567-2-daniel.vetter@ffwll.ch
2019-06-07 06:27:42 +08:00
|
|
|
/*
|
|
|
|
* The worker can fall behind the vblank hrtimer, make sure we catch up.
|
2018-09-04 05:18:17 +08:00
|
|
|
*/
|
|
|
|
while (frame_start <= frame_end)
|
|
|
|
drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32);
|
2018-08-02 09:10:26 +08:00
|
|
|
}
|
|
|
|
|
2019-06-13 20:18:02 +08:00
|
|
|
static const char * const pipe_crc_sources[] = {"auto"};
|
|
|
|
|
|
|
|
const char *const *vkms_get_crc_sources(struct drm_crtc *crtc,
|
|
|
|
size_t *count)
|
|
|
|
{
|
|
|
|
*count = ARRAY_SIZE(pipe_crc_sources);
|
|
|
|
return pipe_crc_sources;
|
|
|
|
}
|
|
|
|
|
2018-08-21 16:38:55 +08:00
|
|
|
static int vkms_crc_parse_source(const char *src_name, bool *enabled)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (!src_name) {
|
|
|
|
*enabled = false;
|
|
|
|
} else if (strcmp(src_name, "auto") == 0) {
|
|
|
|
*enabled = true;
|
|
|
|
} else {
|
|
|
|
*enabled = false;
|
|
|
|
ret = -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int vkms_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
|
|
|
|
size_t *values_cnt)
|
|
|
|
{
|
|
|
|
bool enabled;
|
|
|
|
|
|
|
|
if (vkms_crc_parse_source(src_name, &enabled) < 0) {
|
|
|
|
DRM_DEBUG_DRIVER("unknown source %s\n", src_name);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
*values_cnt = 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-08-21 16:38:56 +08:00
|
|
|
int vkms_set_crc_source(struct drm_crtc *crtc, const char *src_name)
|
2018-08-02 09:10:26 +08:00
|
|
|
{
|
|
|
|
struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
|
|
|
|
bool enabled = false;
|
|
|
|
int ret = 0;
|
|
|
|
|
2018-08-21 16:38:55 +08:00
|
|
|
ret = vkms_crc_parse_source(src_name, &enabled);
|
2018-08-02 09:10:26 +08:00
|
|
|
|
2019-06-07 06:27:43 +08:00
|
|
|
spin_lock_irq(&out->lock);
|
2019-06-26 09:37:05 +08:00
|
|
|
out->composer_enabled = enabled;
|
2019-06-07 06:27:43 +08:00
|
|
|
spin_unlock_irq(&out->lock);
|
2018-08-02 09:10:26 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|