OpenCloudOS-Kernel/drivers/pps/generators/pps_gen_parport.c

284 lines
7.2 KiB
C
Raw Normal View History

/*
* pps_gen_parport.c -- kernel parallel port PPS signal generator
*
*
* Copyright (C) 2009 Alexander Gordeev <lasaine@lvk.cs.msu.su>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
* TODO:
* fix issues when realtime clock is adjusted in a leap
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/time.h>
#include <linux/hrtimer.h>
#include <linux/parport.h>
#define DRVDESC "parallel port PPS signal generator"
#define SIGNAL 0
#define NO_SIGNAL PARPORT_CONTROL_STROBE
/* module parameters */
#define SEND_DELAY_MAX 100000
static unsigned int send_delay = 30000;
MODULE_PARM_DESC(delay,
"Delay between setting and dropping the signal (ns)");
module_param_named(delay, send_delay, uint, 0);
#define SAFETY_INTERVAL 3000 /* set the hrtimer earlier for safety (ns) */
/* internal per port structure */
struct pps_generator_pp {
struct pardevice *pardev; /* parport device */
struct hrtimer timer;
long port_write_time; /* calibrated port write time (ns) */
};
static struct pps_generator_pp device = {
.pardev = NULL,
};
static int attached;
/* calibrated time between a hrtimer event and the reaction */
static long hrtimer_error = SAFETY_INTERVAL;
/* the kernel hrtimer event */
static enum hrtimer_restart hrtimer_event(struct hrtimer *timer)
{
pps: parport: use timespec64 instead of timespec getnstimeofday() is deprecated, so I'm converting this to use ktime_get_real_ts64() as a safe replacement. I considered using ktime_get_real() instead, but since the algorithm here depends on the exact timing, I decided to introduce fewer changes and leave the code that determines the nanoseconds since the last seconds wrap untouched. It's not entirely clear to me whether we should also change the time base to CLOCK_BOOTTIME or CLOCK_TAI. With boottime, we would be independent of changes due to settimeofday() and only see the speed adjustment from the upstream clock source, with the downside of having the signal be at an arbirary offset from the start of the UTC second signal. With CLOCK_TAI, we would use the same offset from the UTC second as before and still suffer from settimeofday() adjustments, but would be less confused during leap seconds. Both boottime and tai only offer usable (i.e. avoiding ktime_t to timespec64 conversion) interfaces for ktime_t though, so either way, changing it wouldn't take significantly more work. CLOCK_MONOTONIC could be used with ktime_get_ts64(), but would lose synchronization across a suspend/resume cycle, which seems worse. Link: http://lkml.kernel.org/r/20180116171451.3095620-1-arnd@arndb.de Signed-off-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Rodolfo Giometti <giometti@enneenne.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-02-07 07:40:21 +08:00
struct timespec64 expire_time, ts1, ts2, ts3, dts;
struct pps_generator_pp *dev;
struct parport *port;
long lim, delta;
unsigned long flags;
/* We have to disable interrupts here. The idea is to prevent
* other interrupts on the same processor to introduce random
pps: parport: use timespec64 instead of timespec getnstimeofday() is deprecated, so I'm converting this to use ktime_get_real_ts64() as a safe replacement. I considered using ktime_get_real() instead, but since the algorithm here depends on the exact timing, I decided to introduce fewer changes and leave the code that determines the nanoseconds since the last seconds wrap untouched. It's not entirely clear to me whether we should also change the time base to CLOCK_BOOTTIME or CLOCK_TAI. With boottime, we would be independent of changes due to settimeofday() and only see the speed adjustment from the upstream clock source, with the downside of having the signal be at an arbirary offset from the start of the UTC second signal. With CLOCK_TAI, we would use the same offset from the UTC second as before and still suffer from settimeofday() adjustments, but would be less confused during leap seconds. Both boottime and tai only offer usable (i.e. avoiding ktime_t to timespec64 conversion) interfaces for ktime_t though, so either way, changing it wouldn't take significantly more work. CLOCK_MONOTONIC could be used with ktime_get_ts64(), but would lose synchronization across a suspend/resume cycle, which seems worse. Link: http://lkml.kernel.org/r/20180116171451.3095620-1-arnd@arndb.de Signed-off-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Rodolfo Giometti <giometti@enneenne.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-02-07 07:40:21 +08:00
* lags while polling the clock. ktime_get_real_ts64() takes <1us on
* most machines while other interrupt handlers can take much
* more potentially.
*
* NB: approx time with blocked interrupts =
* send_delay + 3 * SAFETY_INTERVAL
*/
local_irq_save(flags);
/* first of all we get the time stamp... */
pps: parport: use timespec64 instead of timespec getnstimeofday() is deprecated, so I'm converting this to use ktime_get_real_ts64() as a safe replacement. I considered using ktime_get_real() instead, but since the algorithm here depends on the exact timing, I decided to introduce fewer changes and leave the code that determines the nanoseconds since the last seconds wrap untouched. It's not entirely clear to me whether we should also change the time base to CLOCK_BOOTTIME or CLOCK_TAI. With boottime, we would be independent of changes due to settimeofday() and only see the speed adjustment from the upstream clock source, with the downside of having the signal be at an arbirary offset from the start of the UTC second signal. With CLOCK_TAI, we would use the same offset from the UTC second as before and still suffer from settimeofday() adjustments, but would be less confused during leap seconds. Both boottime and tai only offer usable (i.e. avoiding ktime_t to timespec64 conversion) interfaces for ktime_t though, so either way, changing it wouldn't take significantly more work. CLOCK_MONOTONIC could be used with ktime_get_ts64(), but would lose synchronization across a suspend/resume cycle, which seems worse. Link: http://lkml.kernel.org/r/20180116171451.3095620-1-arnd@arndb.de Signed-off-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Rodolfo Giometti <giometti@enneenne.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-02-07 07:40:21 +08:00
ktime_get_real_ts64(&ts1);
expire_time = ktime_to_timespec64(hrtimer_get_softexpires(timer));
dev = container_of(timer, struct pps_generator_pp, timer);
lim = NSEC_PER_SEC - send_delay - dev->port_write_time;
/* check if we are late */
if (expire_time.tv_sec != ts1.tv_sec || ts1.tv_nsec > lim) {
local_irq_restore(flags);
pps: parport: use timespec64 instead of timespec getnstimeofday() is deprecated, so I'm converting this to use ktime_get_real_ts64() as a safe replacement. I considered using ktime_get_real() instead, but since the algorithm here depends on the exact timing, I decided to introduce fewer changes and leave the code that determines the nanoseconds since the last seconds wrap untouched. It's not entirely clear to me whether we should also change the time base to CLOCK_BOOTTIME or CLOCK_TAI. With boottime, we would be independent of changes due to settimeofday() and only see the speed adjustment from the upstream clock source, with the downside of having the signal be at an arbirary offset from the start of the UTC second signal. With CLOCK_TAI, we would use the same offset from the UTC second as before and still suffer from settimeofday() adjustments, but would be less confused during leap seconds. Both boottime and tai only offer usable (i.e. avoiding ktime_t to timespec64 conversion) interfaces for ktime_t though, so either way, changing it wouldn't take significantly more work. CLOCK_MONOTONIC could be used with ktime_get_ts64(), but would lose synchronization across a suspend/resume cycle, which seems worse. Link: http://lkml.kernel.org/r/20180116171451.3095620-1-arnd@arndb.de Signed-off-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Rodolfo Giometti <giometti@enneenne.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-02-07 07:40:21 +08:00
pr_err("we are late this time %lld.%09ld\n",
(s64)ts1.tv_sec, ts1.tv_nsec);
goto done;
}
/* busy loop until the time is right for an assert edge */
do {
pps: parport: use timespec64 instead of timespec getnstimeofday() is deprecated, so I'm converting this to use ktime_get_real_ts64() as a safe replacement. I considered using ktime_get_real() instead, but since the algorithm here depends on the exact timing, I decided to introduce fewer changes and leave the code that determines the nanoseconds since the last seconds wrap untouched. It's not entirely clear to me whether we should also change the time base to CLOCK_BOOTTIME or CLOCK_TAI. With boottime, we would be independent of changes due to settimeofday() and only see the speed adjustment from the upstream clock source, with the downside of having the signal be at an arbirary offset from the start of the UTC second signal. With CLOCK_TAI, we would use the same offset from the UTC second as before and still suffer from settimeofday() adjustments, but would be less confused during leap seconds. Both boottime and tai only offer usable (i.e. avoiding ktime_t to timespec64 conversion) interfaces for ktime_t though, so either way, changing it wouldn't take significantly more work. CLOCK_MONOTONIC could be used with ktime_get_ts64(), but would lose synchronization across a suspend/resume cycle, which seems worse. Link: http://lkml.kernel.org/r/20180116171451.3095620-1-arnd@arndb.de Signed-off-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Rodolfo Giometti <giometti@enneenne.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-02-07 07:40:21 +08:00
ktime_get_real_ts64(&ts2);
} while (expire_time.tv_sec == ts2.tv_sec && ts2.tv_nsec < lim);
/* set the signal */
port = dev->pardev->port;
port->ops->write_control(port, SIGNAL);
/* busy loop until the time is right for a clear edge */
lim = NSEC_PER_SEC - dev->port_write_time;
do {
pps: parport: use timespec64 instead of timespec getnstimeofday() is deprecated, so I'm converting this to use ktime_get_real_ts64() as a safe replacement. I considered using ktime_get_real() instead, but since the algorithm here depends on the exact timing, I decided to introduce fewer changes and leave the code that determines the nanoseconds since the last seconds wrap untouched. It's not entirely clear to me whether we should also change the time base to CLOCK_BOOTTIME or CLOCK_TAI. With boottime, we would be independent of changes due to settimeofday() and only see the speed adjustment from the upstream clock source, with the downside of having the signal be at an arbirary offset from the start of the UTC second signal. With CLOCK_TAI, we would use the same offset from the UTC second as before and still suffer from settimeofday() adjustments, but would be less confused during leap seconds. Both boottime and tai only offer usable (i.e. avoiding ktime_t to timespec64 conversion) interfaces for ktime_t though, so either way, changing it wouldn't take significantly more work. CLOCK_MONOTONIC could be used with ktime_get_ts64(), but would lose synchronization across a suspend/resume cycle, which seems worse. Link: http://lkml.kernel.org/r/20180116171451.3095620-1-arnd@arndb.de Signed-off-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Rodolfo Giometti <giometti@enneenne.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-02-07 07:40:21 +08:00
ktime_get_real_ts64(&ts2);
} while (expire_time.tv_sec == ts2.tv_sec && ts2.tv_nsec < lim);
/* unset the signal */
port->ops->write_control(port, NO_SIGNAL);
pps: parport: use timespec64 instead of timespec getnstimeofday() is deprecated, so I'm converting this to use ktime_get_real_ts64() as a safe replacement. I considered using ktime_get_real() instead, but since the algorithm here depends on the exact timing, I decided to introduce fewer changes and leave the code that determines the nanoseconds since the last seconds wrap untouched. It's not entirely clear to me whether we should also change the time base to CLOCK_BOOTTIME or CLOCK_TAI. With boottime, we would be independent of changes due to settimeofday() and only see the speed adjustment from the upstream clock source, with the downside of having the signal be at an arbirary offset from the start of the UTC second signal. With CLOCK_TAI, we would use the same offset from the UTC second as before and still suffer from settimeofday() adjustments, but would be less confused during leap seconds. Both boottime and tai only offer usable (i.e. avoiding ktime_t to timespec64 conversion) interfaces for ktime_t though, so either way, changing it wouldn't take significantly more work. CLOCK_MONOTONIC could be used with ktime_get_ts64(), but would lose synchronization across a suspend/resume cycle, which seems worse. Link: http://lkml.kernel.org/r/20180116171451.3095620-1-arnd@arndb.de Signed-off-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Rodolfo Giometti <giometti@enneenne.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-02-07 07:40:21 +08:00
ktime_get_real_ts64(&ts3);
local_irq_restore(flags);
/* update calibrated port write time */
pps: parport: use timespec64 instead of timespec getnstimeofday() is deprecated, so I'm converting this to use ktime_get_real_ts64() as a safe replacement. I considered using ktime_get_real() instead, but since the algorithm here depends on the exact timing, I decided to introduce fewer changes and leave the code that determines the nanoseconds since the last seconds wrap untouched. It's not entirely clear to me whether we should also change the time base to CLOCK_BOOTTIME or CLOCK_TAI. With boottime, we would be independent of changes due to settimeofday() and only see the speed adjustment from the upstream clock source, with the downside of having the signal be at an arbirary offset from the start of the UTC second signal. With CLOCK_TAI, we would use the same offset from the UTC second as before and still suffer from settimeofday() adjustments, but would be less confused during leap seconds. Both boottime and tai only offer usable (i.e. avoiding ktime_t to timespec64 conversion) interfaces for ktime_t though, so either way, changing it wouldn't take significantly more work. CLOCK_MONOTONIC could be used with ktime_get_ts64(), but would lose synchronization across a suspend/resume cycle, which seems worse. Link: http://lkml.kernel.org/r/20180116171451.3095620-1-arnd@arndb.de Signed-off-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Rodolfo Giometti <giometti@enneenne.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-02-07 07:40:21 +08:00
dts = timespec64_sub(ts3, ts2);
dev->port_write_time =
pps: parport: use timespec64 instead of timespec getnstimeofday() is deprecated, so I'm converting this to use ktime_get_real_ts64() as a safe replacement. I considered using ktime_get_real() instead, but since the algorithm here depends on the exact timing, I decided to introduce fewer changes and leave the code that determines the nanoseconds since the last seconds wrap untouched. It's not entirely clear to me whether we should also change the time base to CLOCK_BOOTTIME or CLOCK_TAI. With boottime, we would be independent of changes due to settimeofday() and only see the speed adjustment from the upstream clock source, with the downside of having the signal be at an arbirary offset from the start of the UTC second signal. With CLOCK_TAI, we would use the same offset from the UTC second as before and still suffer from settimeofday() adjustments, but would be less confused during leap seconds. Both boottime and tai only offer usable (i.e. avoiding ktime_t to timespec64 conversion) interfaces for ktime_t though, so either way, changing it wouldn't take significantly more work. CLOCK_MONOTONIC could be used with ktime_get_ts64(), but would lose synchronization across a suspend/resume cycle, which seems worse. Link: http://lkml.kernel.org/r/20180116171451.3095620-1-arnd@arndb.de Signed-off-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Rodolfo Giometti <giometti@enneenne.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-02-07 07:40:21 +08:00
(dev->port_write_time + timespec64_to_ns(&dts)) >> 1;
done:
/* update calibrated hrtimer error */
pps: parport: use timespec64 instead of timespec getnstimeofday() is deprecated, so I'm converting this to use ktime_get_real_ts64() as a safe replacement. I considered using ktime_get_real() instead, but since the algorithm here depends on the exact timing, I decided to introduce fewer changes and leave the code that determines the nanoseconds since the last seconds wrap untouched. It's not entirely clear to me whether we should also change the time base to CLOCK_BOOTTIME or CLOCK_TAI. With boottime, we would be independent of changes due to settimeofday() and only see the speed adjustment from the upstream clock source, with the downside of having the signal be at an arbirary offset from the start of the UTC second signal. With CLOCK_TAI, we would use the same offset from the UTC second as before and still suffer from settimeofday() adjustments, but would be less confused during leap seconds. Both boottime and tai only offer usable (i.e. avoiding ktime_t to timespec64 conversion) interfaces for ktime_t though, so either way, changing it wouldn't take significantly more work. CLOCK_MONOTONIC could be used with ktime_get_ts64(), but would lose synchronization across a suspend/resume cycle, which seems worse. Link: http://lkml.kernel.org/r/20180116171451.3095620-1-arnd@arndb.de Signed-off-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Rodolfo Giometti <giometti@enneenne.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-02-07 07:40:21 +08:00
dts = timespec64_sub(ts1, expire_time);
delta = timespec64_to_ns(&dts);
/* If the new error value is bigger then the old, use the new
* value, if not then slowly move towards the new value. This
* way it should be safe in bad conditions and efficient in
* good conditions.
*/
if (delta >= hrtimer_error)
hrtimer_error = delta;
else
hrtimer_error = (3 * hrtimer_error + delta) >> 2;
/* update the hrtimer expire time */
hrtimer_set_expires(timer,
ktime_set(expire_time.tv_sec + 1,
NSEC_PER_SEC - (send_delay +
dev->port_write_time + SAFETY_INTERVAL +
2 * hrtimer_error)));
return HRTIMER_RESTART;
}
/* calibrate port write time */
#define PORT_NTESTS_SHIFT 5
static void calibrate_port(struct pps_generator_pp *dev)
{
struct parport *port = dev->pardev->port;
int i;
long acc = 0;
for (i = 0; i < (1 << PORT_NTESTS_SHIFT); i++) {
pps: parport: use timespec64 instead of timespec getnstimeofday() is deprecated, so I'm converting this to use ktime_get_real_ts64() as a safe replacement. I considered using ktime_get_real() instead, but since the algorithm here depends on the exact timing, I decided to introduce fewer changes and leave the code that determines the nanoseconds since the last seconds wrap untouched. It's not entirely clear to me whether we should also change the time base to CLOCK_BOOTTIME or CLOCK_TAI. With boottime, we would be independent of changes due to settimeofday() and only see the speed adjustment from the upstream clock source, with the downside of having the signal be at an arbirary offset from the start of the UTC second signal. With CLOCK_TAI, we would use the same offset from the UTC second as before and still suffer from settimeofday() adjustments, but would be less confused during leap seconds. Both boottime and tai only offer usable (i.e. avoiding ktime_t to timespec64 conversion) interfaces for ktime_t though, so either way, changing it wouldn't take significantly more work. CLOCK_MONOTONIC could be used with ktime_get_ts64(), but would lose synchronization across a suspend/resume cycle, which seems worse. Link: http://lkml.kernel.org/r/20180116171451.3095620-1-arnd@arndb.de Signed-off-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Rodolfo Giometti <giometti@enneenne.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-02-07 07:40:21 +08:00
struct timespec64 a, b;
unsigned long irq_flags;
local_irq_save(irq_flags);
pps: parport: use timespec64 instead of timespec getnstimeofday() is deprecated, so I'm converting this to use ktime_get_real_ts64() as a safe replacement. I considered using ktime_get_real() instead, but since the algorithm here depends on the exact timing, I decided to introduce fewer changes and leave the code that determines the nanoseconds since the last seconds wrap untouched. It's not entirely clear to me whether we should also change the time base to CLOCK_BOOTTIME or CLOCK_TAI. With boottime, we would be independent of changes due to settimeofday() and only see the speed adjustment from the upstream clock source, with the downside of having the signal be at an arbirary offset from the start of the UTC second signal. With CLOCK_TAI, we would use the same offset from the UTC second as before and still suffer from settimeofday() adjustments, but would be less confused during leap seconds. Both boottime and tai only offer usable (i.e. avoiding ktime_t to timespec64 conversion) interfaces for ktime_t though, so either way, changing it wouldn't take significantly more work. CLOCK_MONOTONIC could be used with ktime_get_ts64(), but would lose synchronization across a suspend/resume cycle, which seems worse. Link: http://lkml.kernel.org/r/20180116171451.3095620-1-arnd@arndb.de Signed-off-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Rodolfo Giometti <giometti@enneenne.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-02-07 07:40:21 +08:00
ktime_get_real_ts64(&a);
port->ops->write_control(port, NO_SIGNAL);
pps: parport: use timespec64 instead of timespec getnstimeofday() is deprecated, so I'm converting this to use ktime_get_real_ts64() as a safe replacement. I considered using ktime_get_real() instead, but since the algorithm here depends on the exact timing, I decided to introduce fewer changes and leave the code that determines the nanoseconds since the last seconds wrap untouched. It's not entirely clear to me whether we should also change the time base to CLOCK_BOOTTIME or CLOCK_TAI. With boottime, we would be independent of changes due to settimeofday() and only see the speed adjustment from the upstream clock source, with the downside of having the signal be at an arbirary offset from the start of the UTC second signal. With CLOCK_TAI, we would use the same offset from the UTC second as before and still suffer from settimeofday() adjustments, but would be less confused during leap seconds. Both boottime and tai only offer usable (i.e. avoiding ktime_t to timespec64 conversion) interfaces for ktime_t though, so either way, changing it wouldn't take significantly more work. CLOCK_MONOTONIC could be used with ktime_get_ts64(), but would lose synchronization across a suspend/resume cycle, which seems worse. Link: http://lkml.kernel.org/r/20180116171451.3095620-1-arnd@arndb.de Signed-off-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Rodolfo Giometti <giometti@enneenne.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-02-07 07:40:21 +08:00
ktime_get_real_ts64(&b);
local_irq_restore(irq_flags);
pps: parport: use timespec64 instead of timespec getnstimeofday() is deprecated, so I'm converting this to use ktime_get_real_ts64() as a safe replacement. I considered using ktime_get_real() instead, but since the algorithm here depends on the exact timing, I decided to introduce fewer changes and leave the code that determines the nanoseconds since the last seconds wrap untouched. It's not entirely clear to me whether we should also change the time base to CLOCK_BOOTTIME or CLOCK_TAI. With boottime, we would be independent of changes due to settimeofday() and only see the speed adjustment from the upstream clock source, with the downside of having the signal be at an arbirary offset from the start of the UTC second signal. With CLOCK_TAI, we would use the same offset from the UTC second as before and still suffer from settimeofday() adjustments, but would be less confused during leap seconds. Both boottime and tai only offer usable (i.e. avoiding ktime_t to timespec64 conversion) interfaces for ktime_t though, so either way, changing it wouldn't take significantly more work. CLOCK_MONOTONIC could be used with ktime_get_ts64(), but would lose synchronization across a suspend/resume cycle, which seems worse. Link: http://lkml.kernel.org/r/20180116171451.3095620-1-arnd@arndb.de Signed-off-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Rodolfo Giometti <giometti@enneenne.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-02-07 07:40:21 +08:00
b = timespec64_sub(b, a);
acc += timespec64_to_ns(&b);
}
dev->port_write_time = acc >> PORT_NTESTS_SHIFT;
pr_info("port write takes %ldns\n", dev->port_write_time);
}
static inline ktime_t next_intr_time(struct pps_generator_pp *dev)
{
pps: parport: use timespec64 instead of timespec getnstimeofday() is deprecated, so I'm converting this to use ktime_get_real_ts64() as a safe replacement. I considered using ktime_get_real() instead, but since the algorithm here depends on the exact timing, I decided to introduce fewer changes and leave the code that determines the nanoseconds since the last seconds wrap untouched. It's not entirely clear to me whether we should also change the time base to CLOCK_BOOTTIME or CLOCK_TAI. With boottime, we would be independent of changes due to settimeofday() and only see the speed adjustment from the upstream clock source, with the downside of having the signal be at an arbirary offset from the start of the UTC second signal. With CLOCK_TAI, we would use the same offset from the UTC second as before and still suffer from settimeofday() adjustments, but would be less confused during leap seconds. Both boottime and tai only offer usable (i.e. avoiding ktime_t to timespec64 conversion) interfaces for ktime_t though, so either way, changing it wouldn't take significantly more work. CLOCK_MONOTONIC could be used with ktime_get_ts64(), but would lose synchronization across a suspend/resume cycle, which seems worse. Link: http://lkml.kernel.org/r/20180116171451.3095620-1-arnd@arndb.de Signed-off-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Rodolfo Giometti <giometti@enneenne.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-02-07 07:40:21 +08:00
struct timespec64 ts;
pps: parport: use timespec64 instead of timespec getnstimeofday() is deprecated, so I'm converting this to use ktime_get_real_ts64() as a safe replacement. I considered using ktime_get_real() instead, but since the algorithm here depends on the exact timing, I decided to introduce fewer changes and leave the code that determines the nanoseconds since the last seconds wrap untouched. It's not entirely clear to me whether we should also change the time base to CLOCK_BOOTTIME or CLOCK_TAI. With boottime, we would be independent of changes due to settimeofday() and only see the speed adjustment from the upstream clock source, with the downside of having the signal be at an arbirary offset from the start of the UTC second signal. With CLOCK_TAI, we would use the same offset from the UTC second as before and still suffer from settimeofday() adjustments, but would be less confused during leap seconds. Both boottime and tai only offer usable (i.e. avoiding ktime_t to timespec64 conversion) interfaces for ktime_t though, so either way, changing it wouldn't take significantly more work. CLOCK_MONOTONIC could be used with ktime_get_ts64(), but would lose synchronization across a suspend/resume cycle, which seems worse. Link: http://lkml.kernel.org/r/20180116171451.3095620-1-arnd@arndb.de Signed-off-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Rodolfo Giometti <giometti@enneenne.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-02-07 07:40:21 +08:00
ktime_get_real_ts64(&ts);
return ktime_set(ts.tv_sec +
((ts.tv_nsec > 990 * NSEC_PER_MSEC) ? 1 : 0),
NSEC_PER_SEC - (send_delay +
dev->port_write_time + 3 * SAFETY_INTERVAL));
}
static void parport_attach(struct parport *port)
{
struct pardev_cb pps_cb;
if (attached) {
/* we already have a port */
return;
}
memset(&pps_cb, 0, sizeof(pps_cb));
pps_cb.private = &device;
pps_cb.flags = PARPORT_FLAG_EXCL;
device.pardev = parport_register_dev_model(port, KBUILD_MODNAME,
&pps_cb, 0);
if (!device.pardev) {
pr_err("couldn't register with %s\n", port->name);
return;
}
if (parport_claim_or_block(device.pardev) < 0) {
pr_err("couldn't claim %s\n", port->name);
goto err_unregister_dev;
}
pr_info("attached to %s\n", port->name);
attached = 1;
calibrate_port(&device);
hrtimer_init(&device.timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
device.timer.function = hrtimer_event;
hrtimer_start(&device.timer, next_intr_time(&device), HRTIMER_MODE_ABS);
return;
err_unregister_dev:
parport_unregister_device(device.pardev);
}
static void parport_detach(struct parport *port)
{
if (port->cad != device.pardev)
return; /* not our port */
hrtimer_cancel(&device.timer);
parport_release(device.pardev);
parport_unregister_device(device.pardev);
}
static struct parport_driver pps_gen_parport_driver = {
.name = KBUILD_MODNAME,
.match_port = parport_attach,
.detach = parport_detach,
.devmodel = true,
};
/* module staff */
static int __init pps_gen_parport_init(void)
{
int ret;
pr_info(DRVDESC "\n");
if (send_delay > SEND_DELAY_MAX) {
pr_err("delay value should be not greater"
" then %d\n", SEND_DELAY_MAX);
return -EINVAL;
}
ret = parport_register_driver(&pps_gen_parport_driver);
if (ret) {
pr_err("unable to register with parport\n");
return ret;
}
return 0;
}
static void __exit pps_gen_parport_exit(void)
{
parport_unregister_driver(&pps_gen_parport_driver);
pr_info("hrtimer avg error is %ldns\n", hrtimer_error);
}
module_init(pps_gen_parport_init);
module_exit(pps_gen_parport_exit);
MODULE_AUTHOR("Alexander Gordeev <lasaine@lvk.cs.msu.su>");
MODULE_DESCRIPTION(DRVDESC);
MODULE_LICENSE("GPL");