drm: use seqlock for vblank time/count

This patch aims to replace the roll-your-own seqlock implementation with
full-blown seqlock'. We also remove the timestamp ring-buffer in favour
of single timestamp/count pair protected by a seqlock. In turn this
means we can now increment the vblank freely without the need for
clamping.

v2:
  - reduce the scope of the seqlock, keeping vblank_time_lock
  - make the seqlock per vblank_crtc, so multiple readers aren't blocked by
    the writer

Cc: Mario Kleiner <mario.kleiner@tuebingen.mpg.de>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Mario Kleiner <mario.kleiner.de@gmail.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1462890088-18194-1-git-send-email-matthew.auld@intel.com
This commit is contained in:
Matthew Auld 2016-05-10 15:21:28 +01:00 committed by Daniel Vetter
parent b80d394232
commit d4055a9b20
2 changed files with 17 additions and 87 deletions

View File

@ -42,10 +42,6 @@
#include <linux/vgaarb.h>
#include <linux/export.h>
/* Access macro for slots in vblank timestamp ringbuffer. */
#define vblanktimestamp(dev, pipe, count) \
((dev)->vblank[pipe].time[(count) % DRM_VBLANKTIME_RBSIZE])
/* Retry timestamp calculation up to 3 times to satisfy
* drm_timestamp_precision before giving up.
*/
@ -82,29 +78,15 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe,
struct timeval *t_vblank, u32 last)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
u32 tslot;
assert_spin_locked(&dev->vblank_time_lock);
vblank->last = last;
/* All writers hold the spinlock, but readers are serialized by
* the latching of vblank->count below.
*/
tslot = vblank->count + vblank_count_inc;
vblanktimestamp(dev, pipe, tslot) = *t_vblank;
/*
* vblank timestamp updates are protected on the write side with
* vblank_time_lock, but on the read side done locklessly using a
* sequence-lock on the vblank counter. Ensure correct ordering using
* memory barrriers. We need the barrier both before and also after the
* counter update to synchronize with the next timestamp write.
* The read-side barriers for this are in drm_vblank_count_and_time.
*/
smp_wmb();
write_seqlock(&vblank->seqlock);
vblank->time = *t_vblank;
vblank->count += vblank_count_inc;
smp_wmb();
write_sequnlock(&vblank->seqlock);
}
/**
@ -205,7 +187,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
const struct timeval *t_old;
u64 diff_ns;
t_old = &vblanktimestamp(dev, pipe, vblank->count);
t_old = &vblank->time;
diff_ns = timeval_to_ns(&t_vblank) - timeval_to_ns(t_old);
/*
@ -239,49 +221,6 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
diff = 1;
}
/*
* FIMXE: Need to replace this hack with proper seqlocks.
*
* Restrict the bump of the software vblank counter to a safe maximum
* value of +1 whenever there is the possibility that concurrent readers
* of vblank timestamps could be active at the moment, as the current
* implementation of the timestamp caching and updating is not safe
* against concurrent readers for calls to store_vblank() with a bump
* of anything but +1. A bump != 1 would very likely return corrupted
* timestamps to userspace, because the same slot in the cache could
* be concurrently written by store_vblank() and read by one of those
* readers without the read-retry logic detecting the collision.
*
* Concurrent readers can exist when we are called from the
* drm_vblank_off() or drm_vblank_on() functions and other non-vblank-
* irq callers. However, all those calls to us are happening with the
* vbl_lock locked to prevent drm_vblank_get(), so the vblank refcount
* can't increase while we are executing. Therefore a zero refcount at
* this point is safe for arbitrary counter bumps if we are called
* outside vblank irq, a non-zero count is not 100% safe. Unfortunately
* we must also accept a refcount of 1, as whenever we are called from
* drm_vblank_get() -> drm_vblank_enable() the refcount will be 1 and
* we must let that one pass through in order to not lose vblank counts
* during vblank irq off - which would completely defeat the whole
* point of this routine.
*
* Whenever we are called from vblank irq, we have to assume concurrent
* readers exist or can show up any time during our execution, even if
* the refcount is currently zero, as vblank irqs are usually only
* enabled due to the presence of readers, and because when we are called
* from vblank irq we can't hold the vbl_lock to protect us from sudden
* bumps in vblank refcount. Therefore also restrict bumps to +1 when
* called from vblank irq.
*/
if ((diff > 1) && (atomic_read(&vblank->refcount) > 1 ||
(flags & DRM_CALLED_FROM_VBLIRQ))) {
DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u "
"refcount %u, vblirq %u\n", pipe, diff,
atomic_read(&vblank->refcount),
(flags & DRM_CALLED_FROM_VBLIRQ) != 0);
diff = 1;
}
DRM_DEBUG_VBL("updating vblank count on crtc %u:"
" current=%u, diff=%u, hw=%u hw_last=%u\n",
pipe, vblank->count, diff, cur_vblank, vblank->last);
@ -417,6 +356,7 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
init_waitqueue_head(&vblank->queue);
setup_timer(&vblank->disable_timer, vblank_disable_fn,
(unsigned long)vblank);
seqlock_init(&vblank->seqlock);
}
DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
@ -986,25 +926,19 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
struct timeval *vblanktime)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
int count = DRM_TIMESTAMP_MAXRETRIES;
u32 cur_vblank;
u32 vblank_count;
unsigned int seq;
if (WARN_ON(pipe >= dev->num_crtcs))
return 0;
/*
* Vblank timestamps are read lockless. To ensure consistency the vblank
* counter is rechecked and ordering is ensured using memory barriers.
* This works like a seqlock. The write-side barriers are in store_vblank.
*/
do {
cur_vblank = vblank->count;
smp_rmb();
*vblanktime = vblanktimestamp(dev, pipe, cur_vblank);
smp_rmb();
} while (cur_vblank != vblank->count && --count > 0);
seq = read_seqbegin(&vblank->seqlock);
vblank_count = vblank->count;
*vblanktime = vblank->time;
} while (read_seqretry(&vblank->seqlock, seq));
return cur_vblank;
return vblank_count;
}
EXPORT_SYMBOL(drm_vblank_count_and_time);

View File

@ -52,6 +52,7 @@
#include <linux/poll.h>
#include <linux/ratelimit.h>
#include <linux/sched.h>
#include <linux/seqlock.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
@ -392,11 +393,6 @@ struct drm_master {
void *driver_priv;
};
/* Size of ringbuffer for vblank timestamps. Just double-buffer
* in initial implementation.
*/
#define DRM_VBLANKTIME_RBSIZE 2
/* Flags and return codes for get_vblank_timestamp() driver function. */
#define DRM_CALLED_FROM_VBLIRQ 1
#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
@ -725,10 +721,10 @@ struct drm_vblank_crtc {
wait_queue_head_t queue; /**< VBLANK wait queue */
struct timer_list disable_timer; /* delayed disable timer */
/* vblank counter, protected by dev->vblank_time_lock for writes */
u32 count;
/* vblank timestamps, protected by dev->vblank_time_lock for writes */
struct timeval time[DRM_VBLANKTIME_RBSIZE];
seqlock_t seqlock; /* protects vblank count and time */
u32 count; /* vblank counter */
struct timeval time; /* vblank timestamp */
atomic_t refcount; /* number of users of vblank interruptsper crtc */
u32 last; /* protected by dev->vbl_lock, used */