635 lines
17 KiB
C
635 lines
17 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Fast Ethernet Controller (ENET) PTP driver for MX6x.
|
|
*
|
|
* Copyright (C) 2012 Freescale Semiconductor, Inc.
|
|
*/
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/string.h>
|
|
#include <linux/ptrace.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/ioport.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/pci.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/netdevice.h>
|
|
#include <linux/etherdevice.h>
|
|
#include <linux/skbuff.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/workqueue.h>
|
|
#include <linux/bitops.h>
|
|
#include <linux/io.h>
|
|
#include <linux/irq.h>
|
|
#include <linux/clk.h>
|
|
#include <linux/platform_device.h>
|
|
#include <linux/phy.h>
|
|
#include <linux/fec.h>
|
|
#include <linux/of.h>
|
|
#include <linux/of_device.h>
|
|
#include <linux/of_gpio.h>
|
|
#include <linux/of_net.h>
|
|
|
|
#include "fec.h"
|
|
|
|
/* FEC 1588 register bits */
|
|
#define FEC_T_CTRL_SLAVE 0x00002000
|
|
#define FEC_T_CTRL_CAPTURE 0x00000800
|
|
#define FEC_T_CTRL_RESTART 0x00000200
|
|
#define FEC_T_CTRL_PERIOD_RST 0x00000030
|
|
#define FEC_T_CTRL_PERIOD_EN 0x00000010
|
|
#define FEC_T_CTRL_ENABLE 0x00000001
|
|
|
|
#define FEC_T_INC_MASK 0x0000007f
|
|
#define FEC_T_INC_OFFSET 0
|
|
#define FEC_T_INC_CORR_MASK 0x00007f00
|
|
#define FEC_T_INC_CORR_OFFSET 8
|
|
|
|
#define FEC_T_CTRL_PINPER 0x00000080
|
|
#define FEC_T_TF0_MASK 0x00000001
|
|
#define FEC_T_TF0_OFFSET 0
|
|
#define FEC_T_TF1_MASK 0x00000002
|
|
#define FEC_T_TF1_OFFSET 1
|
|
#define FEC_T_TF2_MASK 0x00000004
|
|
#define FEC_T_TF2_OFFSET 2
|
|
#define FEC_T_TF3_MASK 0x00000008
|
|
#define FEC_T_TF3_OFFSET 3
|
|
#define FEC_T_TDRE_MASK 0x00000001
|
|
#define FEC_T_TDRE_OFFSET 0
|
|
#define FEC_T_TMODE_MASK 0x0000003C
|
|
#define FEC_T_TMODE_OFFSET 2
|
|
#define FEC_T_TIE_MASK 0x00000040
|
|
#define FEC_T_TIE_OFFSET 6
|
|
#define FEC_T_TF_MASK 0x00000080
|
|
#define FEC_T_TF_OFFSET 7
|
|
|
|
#define FEC_ATIME_CTRL 0x400
|
|
#define FEC_ATIME 0x404
|
|
#define FEC_ATIME_EVT_OFFSET 0x408
|
|
#define FEC_ATIME_EVT_PERIOD 0x40c
|
|
#define FEC_ATIME_CORR 0x410
|
|
#define FEC_ATIME_INC 0x414
|
|
#define FEC_TS_TIMESTAMP 0x418
|
|
|
|
#define FEC_TGSR 0x604
|
|
#define FEC_TCSR(n) (0x608 + n * 0x08)
|
|
#define FEC_TCCR(n) (0x60C + n * 0x08)
|
|
#define MAX_TIMER_CHANNEL 3
|
|
#define FEC_TMODE_TOGGLE 0x05
|
|
#define FEC_HIGH_PULSE 0x0F
|
|
|
|
#define FEC_CC_MULT (1 << 31)
|
|
#define FEC_COUNTER_PERIOD (1 << 31)
|
|
#define PPS_OUPUT_RELOAD_PERIOD NSEC_PER_SEC
|
|
#define FEC_CHANNLE_0 0
|
|
#define DEFAULT_PPS_CHANNEL FEC_CHANNLE_0
|
|
|
|
/**
|
|
* fec_ptp_enable_pps
|
|
* @fep: the fec_enet_private structure handle
|
|
* @enable: enable the channel pps output
|
|
*
|
|
* This function enble the PPS ouput on the timer channel.
|
|
*/
|
|
static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
|
|
{
|
|
unsigned long flags;
|
|
u32 val, tempval;
|
|
struct timespec64 ts;
|
|
u64 ns;
|
|
val = 0;
|
|
|
|
if (!(fep->hwts_tx_en || fep->hwts_rx_en)) {
|
|
dev_err(&fep->pdev->dev, "No ptp stack is running\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (fep->pps_enable == enable)
|
|
return 0;
|
|
|
|
fep->pps_channel = DEFAULT_PPS_CHANNEL;
|
|
fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
|
|
|
|
spin_lock_irqsave(&fep->tmreg_lock, flags);
|
|
|
|
if (enable) {
|
|
/* clear capture or output compare interrupt status if have.
|
|
*/
|
|
writel(FEC_T_TF_MASK, fep->hwp + FEC_TCSR(fep->pps_channel));
|
|
|
|
/* It is recommended to double check the TMODE field in the
|
|
* TCSR register to be cleared before the first compare counter
|
|
* is written into TCCR register. Just add a double check.
|
|
*/
|
|
val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
|
|
do {
|
|
val &= ~(FEC_T_TMODE_MASK);
|
|
writel(val, fep->hwp + FEC_TCSR(fep->pps_channel));
|
|
val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
|
|
} while (val & FEC_T_TMODE_MASK);
|
|
|
|
/* Dummy read counter to update the counter */
|
|
timecounter_read(&fep->tc);
|
|
/* We want to find the first compare event in the next
|
|
* second point. So we need to know what the ptp time
|
|
* is now and how many nanoseconds is ahead to get next second.
|
|
* The remaining nanosecond ahead before the next second would be
|
|
* NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
|
|
* to current timer would be next second.
|
|
*/
|
|
tempval = readl(fep->hwp + FEC_ATIME_CTRL);
|
|
tempval |= FEC_T_CTRL_CAPTURE;
|
|
writel(tempval, fep->hwp + FEC_ATIME_CTRL);
|
|
|
|
tempval = readl(fep->hwp + FEC_ATIME);
|
|
/* Convert the ptp local counter to 1588 timestamp */
|
|
ns = timecounter_cyc2time(&fep->tc, tempval);
|
|
ts = ns_to_timespec64(ns);
|
|
|
|
/* The tempval is less than 3 seconds, and so val is less than
|
|
* 4 seconds. No overflow for 32bit calculation.
|
|
*/
|
|
val = NSEC_PER_SEC - (u32)ts.tv_nsec + tempval;
|
|
|
|
/* Need to consider the situation that the current time is
|
|
* very close to the second point, which means NSEC_PER_SEC
|
|
* - ts.tv_nsec is close to be zero(For example 20ns); Since the timer
|
|
* is still running when we calculate the first compare event, it is
|
|
* possible that the remaining nanoseonds run out before the compare
|
|
* counter is calculated and written into TCCR register. To avoid
|
|
* this possibility, we will set the compare event to be the next
|
|
* of next second. The current setting is 31-bit timer and wrap
|
|
* around over 2 seconds. So it is okay to set the next of next
|
|
* seond for the timer.
|
|
*/
|
|
val += NSEC_PER_SEC;
|
|
|
|
/* We add (2 * NSEC_PER_SEC - (u32)ts.tv_nsec) to current
|
|
* ptp counter, which maybe cause 32-bit wrap. Since the
|
|
* (NSEC_PER_SEC - (u32)ts.tv_nsec) is less than 2 second.
|
|
* We can ensure the wrap will not cause issue. If the offset
|
|
* is bigger than fep->cc.mask would be a error.
|
|
*/
|
|
val &= fep->cc.mask;
|
|
writel(val, fep->hwp + FEC_TCCR(fep->pps_channel));
|
|
|
|
/* Calculate the second the compare event timestamp */
|
|
fep->next_counter = (val + fep->reload_period) & fep->cc.mask;
|
|
|
|
/* * Enable compare event when overflow */
|
|
val = readl(fep->hwp + FEC_ATIME_CTRL);
|
|
val |= FEC_T_CTRL_PINPER;
|
|
writel(val, fep->hwp + FEC_ATIME_CTRL);
|
|
|
|
/* Compare channel setting. */
|
|
val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
|
|
val |= (1 << FEC_T_TF_OFFSET | 1 << FEC_T_TIE_OFFSET);
|
|
val &= ~(1 << FEC_T_TDRE_OFFSET);
|
|
val &= ~(FEC_T_TMODE_MASK);
|
|
val |= (FEC_HIGH_PULSE << FEC_T_TMODE_OFFSET);
|
|
writel(val, fep->hwp + FEC_TCSR(fep->pps_channel));
|
|
|
|
/* Write the second compare event timestamp and calculate
|
|
* the third timestamp. Refer the TCCR register detail in the spec.
|
|
*/
|
|
writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel));
|
|
fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask;
|
|
} else {
|
|
writel(0, fep->hwp + FEC_TCSR(fep->pps_channel));
|
|
}
|
|
|
|
fep->pps_enable = enable;
|
|
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* fec_ptp_read - read raw cycle counter (to be used by time counter)
|
|
* @cc: the cyclecounter structure
|
|
*
|
|
* this function reads the cyclecounter registers and is called by the
|
|
* cyclecounter structure used to construct a ns counter from the
|
|
* arbitrary fixed point registers
|
|
*/
|
|
static u64 fec_ptp_read(const struct cyclecounter *cc)
|
|
{
|
|
struct fec_enet_private *fep =
|
|
container_of(cc, struct fec_enet_private, cc);
|
|
const struct platform_device_id *id_entry =
|
|
platform_get_device_id(fep->pdev);
|
|
u32 tempval;
|
|
|
|
tempval = readl(fep->hwp + FEC_ATIME_CTRL);
|
|
tempval |= FEC_T_CTRL_CAPTURE;
|
|
writel(tempval, fep->hwp + FEC_ATIME_CTRL);
|
|
|
|
if (id_entry->driver_data & FEC_QUIRK_BUG_CAPTURE)
|
|
udelay(1);
|
|
|
|
return readl(fep->hwp + FEC_ATIME);
|
|
}
|
|
|
|
/**
|
|
* fec_ptp_start_cyclecounter - create the cycle counter from hw
|
|
* @ndev: network device
|
|
*
|
|
* this function initializes the timecounter and cyclecounter
|
|
* structures for use in generated a ns counter from the arbitrary
|
|
* fixed point cycles registers in the hardware.
|
|
*/
|
|
void fec_ptp_start_cyclecounter(struct net_device *ndev)
|
|
{
|
|
struct fec_enet_private *fep = netdev_priv(ndev);
|
|
unsigned long flags;
|
|
int inc;
|
|
|
|
inc = 1000000000 / fep->cycle_speed;
|
|
|
|
/* grab the ptp lock */
|
|
spin_lock_irqsave(&fep->tmreg_lock, flags);
|
|
|
|
/* 1ns counter */
|
|
writel(inc << FEC_T_INC_OFFSET, fep->hwp + FEC_ATIME_INC);
|
|
|
|
/* use 31-bit timer counter */
|
|
writel(FEC_COUNTER_PERIOD, fep->hwp + FEC_ATIME_EVT_PERIOD);
|
|
|
|
writel(FEC_T_CTRL_ENABLE | FEC_T_CTRL_PERIOD_RST,
|
|
fep->hwp + FEC_ATIME_CTRL);
|
|
|
|
memset(&fep->cc, 0, sizeof(fep->cc));
|
|
fep->cc.read = fec_ptp_read;
|
|
fep->cc.mask = CLOCKSOURCE_MASK(31);
|
|
fep->cc.shift = 31;
|
|
fep->cc.mult = FEC_CC_MULT;
|
|
|
|
/* reset the ns time counter */
|
|
timecounter_init(&fep->tc, &fep->cc, ktime_to_ns(ktime_get_real()));
|
|
|
|
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
|
|
}
|
|
|
|
/**
|
|
* fec_ptp_adjfreq - adjust ptp cycle frequency
|
|
* @ptp: the ptp clock structure
|
|
* @ppb: parts per billion adjustment from base
|
|
*
|
|
* Adjust the frequency of the ptp cycle counter by the
|
|
* indicated ppb from the base frequency.
|
|
*
|
|
* Because ENET hardware frequency adjust is complex,
|
|
* using software method to do that.
|
|
*/
|
|
static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
|
|
{
|
|
unsigned long flags;
|
|
int neg_adj = 0;
|
|
u32 i, tmp;
|
|
u32 corr_inc, corr_period;
|
|
u32 corr_ns;
|
|
u64 lhs, rhs;
|
|
|
|
struct fec_enet_private *fep =
|
|
container_of(ptp, struct fec_enet_private, ptp_caps);
|
|
|
|
if (ppb == 0)
|
|
return 0;
|
|
|
|
if (ppb < 0) {
|
|
ppb = -ppb;
|
|
neg_adj = 1;
|
|
}
|
|
|
|
/* In theory, corr_inc/corr_period = ppb/NSEC_PER_SEC;
|
|
* Try to find the corr_inc between 1 to fep->ptp_inc to
|
|
* meet adjustment requirement.
|
|
*/
|
|
lhs = NSEC_PER_SEC;
|
|
rhs = (u64)ppb * (u64)fep->ptp_inc;
|
|
for (i = 1; i <= fep->ptp_inc; i++) {
|
|
if (lhs >= rhs) {
|
|
corr_inc = i;
|
|
corr_period = div_u64(lhs, rhs);
|
|
break;
|
|
}
|
|
lhs += NSEC_PER_SEC;
|
|
}
|
|
/* Not found? Set it to high value - double speed
|
|
* correct in every clock step.
|
|
*/
|
|
if (i > fep->ptp_inc) {
|
|
corr_inc = fep->ptp_inc;
|
|
corr_period = 1;
|
|
}
|
|
|
|
if (neg_adj)
|
|
corr_ns = fep->ptp_inc - corr_inc;
|
|
else
|
|
corr_ns = fep->ptp_inc + corr_inc;
|
|
|
|
spin_lock_irqsave(&fep->tmreg_lock, flags);
|
|
|
|
tmp = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
|
|
tmp |= corr_ns << FEC_T_INC_CORR_OFFSET;
|
|
writel(tmp, fep->hwp + FEC_ATIME_INC);
|
|
corr_period = corr_period > 1 ? corr_period - 1 : corr_period;
|
|
writel(corr_period, fep->hwp + FEC_ATIME_CORR);
|
|
/* dummy read to update the timer. */
|
|
timecounter_read(&fep->tc);
|
|
|
|
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* fec_ptp_adjtime
|
|
* @ptp: the ptp clock structure
|
|
* @delta: offset to adjust the cycle counter by
|
|
*
|
|
* adjust the timer by resetting the timecounter structure.
|
|
*/
|
|
static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
|
|
{
|
|
struct fec_enet_private *fep =
|
|
container_of(ptp, struct fec_enet_private, ptp_caps);
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&fep->tmreg_lock, flags);
|
|
timecounter_adjtime(&fep->tc, delta);
|
|
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* fec_ptp_gettime
|
|
* @ptp: the ptp clock structure
|
|
* @ts: timespec structure to hold the current time value
|
|
*
|
|
* read the timecounter and return the correct value on ns,
|
|
* after converting it into a struct timespec.
|
|
*/
|
|
static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
|
|
{
|
|
struct fec_enet_private *adapter =
|
|
container_of(ptp, struct fec_enet_private, ptp_caps);
|
|
u64 ns;
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&adapter->tmreg_lock, flags);
|
|
ns = timecounter_read(&adapter->tc);
|
|
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
|
|
|
|
*ts = ns_to_timespec64(ns);
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* fec_ptp_settime
|
|
* @ptp: the ptp clock structure
|
|
* @ts: the timespec containing the new time for the cycle counter
|
|
*
|
|
* reset the timecounter to use a new base value instead of the kernel
|
|
* wall timer value.
|
|
*/
|
|
static int fec_ptp_settime(struct ptp_clock_info *ptp,
|
|
const struct timespec64 *ts)
|
|
{
|
|
struct fec_enet_private *fep =
|
|
container_of(ptp, struct fec_enet_private, ptp_caps);
|
|
|
|
u64 ns;
|
|
unsigned long flags;
|
|
u32 counter;
|
|
|
|
mutex_lock(&fep->ptp_clk_mutex);
|
|
/* Check the ptp clock */
|
|
if (!fep->ptp_clk_on) {
|
|
mutex_unlock(&fep->ptp_clk_mutex);
|
|
return -EINVAL;
|
|
}
|
|
|
|
ns = timespec64_to_ns(ts);
|
|
/* Get the timer value based on timestamp.
|
|
* Update the counter with the masked value.
|
|
*/
|
|
counter = ns & fep->cc.mask;
|
|
|
|
spin_lock_irqsave(&fep->tmreg_lock, flags);
|
|
writel(counter, fep->hwp + FEC_ATIME);
|
|
timecounter_init(&fep->tc, &fep->cc, ns);
|
|
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
|
|
mutex_unlock(&fep->ptp_clk_mutex);
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* fec_ptp_enable
|
|
* @ptp: the ptp clock structure
|
|
* @rq: the requested feature to change
|
|
* @on: whether to enable or disable the feature
|
|
*
|
|
*/
|
|
static int fec_ptp_enable(struct ptp_clock_info *ptp,
|
|
struct ptp_clock_request *rq, int on)
|
|
{
|
|
struct fec_enet_private *fep =
|
|
container_of(ptp, struct fec_enet_private, ptp_caps);
|
|
int ret = 0;
|
|
|
|
if (rq->type == PTP_CLK_REQ_PPS) {
|
|
ret = fec_ptp_enable_pps(fep, on);
|
|
|
|
return ret;
|
|
}
|
|
return -EOPNOTSUPP;
|
|
}
|
|
|
|
int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
|
|
{
|
|
struct fec_enet_private *fep = netdev_priv(ndev);
|
|
|
|
struct hwtstamp_config config;
|
|
|
|
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
|
|
return -EFAULT;
|
|
|
|
/* reserved for future extensions */
|
|
if (config.flags)
|
|
return -EINVAL;
|
|
|
|
switch (config.tx_type) {
|
|
case HWTSTAMP_TX_OFF:
|
|
fep->hwts_tx_en = 0;
|
|
break;
|
|
case HWTSTAMP_TX_ON:
|
|
fep->hwts_tx_en = 1;
|
|
break;
|
|
default:
|
|
return -ERANGE;
|
|
}
|
|
|
|
switch (config.rx_filter) {
|
|
case HWTSTAMP_FILTER_NONE:
|
|
if (fep->hwts_rx_en)
|
|
fep->hwts_rx_en = 0;
|
|
config.rx_filter = HWTSTAMP_FILTER_NONE;
|
|
break;
|
|
|
|
default:
|
|
fep->hwts_rx_en = 1;
|
|
config.rx_filter = HWTSTAMP_FILTER_ALL;
|
|
break;
|
|
}
|
|
|
|
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
|
|
-EFAULT : 0;
|
|
}
|
|
|
|
int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr)
|
|
{
|
|
struct fec_enet_private *fep = netdev_priv(ndev);
|
|
struct hwtstamp_config config;
|
|
|
|
config.flags = 0;
|
|
config.tx_type = fep->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
|
|
config.rx_filter = (fep->hwts_rx_en ?
|
|
HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
|
|
|
|
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
|
|
-EFAULT : 0;
|
|
}
|
|
|
|
/**
|
|
* fec_time_keep - call timecounter_read every second to avoid timer overrun
|
|
* because ENET just support 32bit counter, will timeout in 4s
|
|
*/
|
|
static void fec_time_keep(struct work_struct *work)
|
|
{
|
|
struct delayed_work *dwork = to_delayed_work(work);
|
|
struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep);
|
|
u64 ns;
|
|
unsigned long flags;
|
|
|
|
mutex_lock(&fep->ptp_clk_mutex);
|
|
if (fep->ptp_clk_on) {
|
|
spin_lock_irqsave(&fep->tmreg_lock, flags);
|
|
ns = timecounter_read(&fep->tc);
|
|
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
|
|
}
|
|
mutex_unlock(&fep->ptp_clk_mutex);
|
|
|
|
schedule_delayed_work(&fep->time_keep, HZ);
|
|
}
|
|
|
|
/* This function checks the pps event and reloads the timer compare counter. */
|
|
static irqreturn_t fec_pps_interrupt(int irq, void *dev_id)
|
|
{
|
|
struct net_device *ndev = dev_id;
|
|
struct fec_enet_private *fep = netdev_priv(ndev);
|
|
u32 val;
|
|
u8 channel = fep->pps_channel;
|
|
struct ptp_clock_event event;
|
|
|
|
val = readl(fep->hwp + FEC_TCSR(channel));
|
|
if (val & FEC_T_TF_MASK) {
|
|
/* Write the next next compare(not the next according the spec)
|
|
* value to the register
|
|
*/
|
|
writel(fep->next_counter, fep->hwp + FEC_TCCR(channel));
|
|
do {
|
|
writel(val, fep->hwp + FEC_TCSR(channel));
|
|
} while (readl(fep->hwp + FEC_TCSR(channel)) & FEC_T_TF_MASK);
|
|
|
|
/* Update the counter; */
|
|
fep->next_counter = (fep->next_counter + fep->reload_period) &
|
|
fep->cc.mask;
|
|
|
|
event.type = PTP_CLOCK_PPS;
|
|
ptp_clock_event(fep->ptp_clock, &event);
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
return IRQ_NONE;
|
|
}
|
|
|
|
/**
|
|
* fec_ptp_init
|
|
* @ndev: The FEC network adapter
|
|
*
|
|
* This function performs the required steps for enabling ptp
|
|
* support. If ptp support has already been loaded it simply calls the
|
|
* cyclecounter init routine and exits.
|
|
*/
|
|
|
|
void fec_ptp_init(struct platform_device *pdev, int irq_idx)
|
|
{
|
|
struct net_device *ndev = platform_get_drvdata(pdev);
|
|
struct fec_enet_private *fep = netdev_priv(ndev);
|
|
int irq;
|
|
int ret;
|
|
|
|
fep->ptp_caps.owner = THIS_MODULE;
|
|
snprintf(fep->ptp_caps.name, 16, "fec ptp");
|
|
|
|
fep->ptp_caps.max_adj = 250000000;
|
|
fep->ptp_caps.n_alarm = 0;
|
|
fep->ptp_caps.n_ext_ts = 0;
|
|
fep->ptp_caps.n_per_out = 0;
|
|
fep->ptp_caps.n_pins = 0;
|
|
fep->ptp_caps.pps = 1;
|
|
fep->ptp_caps.adjfreq = fec_ptp_adjfreq;
|
|
fep->ptp_caps.adjtime = fec_ptp_adjtime;
|
|
fep->ptp_caps.gettime64 = fec_ptp_gettime;
|
|
fep->ptp_caps.settime64 = fec_ptp_settime;
|
|
fep->ptp_caps.enable = fec_ptp_enable;
|
|
|
|
fep->cycle_speed = clk_get_rate(fep->clk_ptp);
|
|
fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
|
|
|
|
spin_lock_init(&fep->tmreg_lock);
|
|
|
|
fec_ptp_start_cyclecounter(ndev);
|
|
|
|
INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
|
|
|
|
irq = platform_get_irq_byname_optional(pdev, "pps");
|
|
if (irq < 0)
|
|
irq = platform_get_irq_optional(pdev, irq_idx);
|
|
/* Failure to get an irq is not fatal,
|
|
* only the PTP_CLOCK_PPS clock events should stop
|
|
*/
|
|
if (irq >= 0) {
|
|
ret = devm_request_irq(&pdev->dev, irq, fec_pps_interrupt,
|
|
0, pdev->name, ndev);
|
|
if (ret < 0)
|
|
dev_warn(&pdev->dev, "request for pps irq failed(%d)\n",
|
|
ret);
|
|
}
|
|
|
|
fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev);
|
|
if (IS_ERR(fep->ptp_clock)) {
|
|
fep->ptp_clock = NULL;
|
|
dev_err(&pdev->dev, "ptp_clock_register failed\n");
|
|
}
|
|
|
|
schedule_delayed_work(&fep->time_keep, HZ);
|
|
}
|
|
|
|
void fec_ptp_stop(struct platform_device *pdev)
|
|
{
|
|
struct net_device *ndev = platform_get_drvdata(pdev);
|
|
struct fec_enet_private *fep = netdev_priv(ndev);
|
|
|
|
cancel_delayed_work_sync(&fep->time_keep);
|
|
if (fep->ptp_clock)
|
|
ptp_clock_unregister(fep->ptp_clock);
|
|
}
|