|
|
|
@ -490,56 +490,6 @@ ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
|
|
|
|
|
return ((u64)hi << 32) | lo;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* ice_ptp_update_cached_phctime - Update the cached PHC time values
|
|
|
|
|
* @pf: Board specific private structure
|
|
|
|
|
*
|
|
|
|
|
* This function updates the system time values which are cached in the PF
|
|
|
|
|
* structure and the Rx rings.
|
|
|
|
|
*
|
|
|
|
|
* This function must be called periodically to ensure that the cached value
|
|
|
|
|
* is never more than 2 seconds old. It must also be called whenever the PHC
|
|
|
|
|
* time has been changed.
|
|
|
|
|
*
|
|
|
|
|
* Return:
|
|
|
|
|
* * 0 - OK, successfully updated
|
|
|
|
|
* * -EAGAIN - PF was busy, need to reschedule the update
|
|
|
|
|
*/
|
|
|
|
|
static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
|
|
|
|
|
{
|
|
|
|
|
u64 systime;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
|
|
|
|
|
return -EAGAIN;
|
|
|
|
|
|
|
|
|
|
/* Read the current PHC time */
|
|
|
|
|
systime = ice_ptp_read_src_clk_reg(pf, NULL);
|
|
|
|
|
|
|
|
|
|
/* Update the cached PHC time stored in the PF structure */
|
|
|
|
|
WRITE_ONCE(pf->ptp.cached_phc_time, systime);
|
|
|
|
|
|
|
|
|
|
ice_for_each_vsi(pf, i) {
|
|
|
|
|
struct ice_vsi *vsi = pf->vsi[i];
|
|
|
|
|
int j;
|
|
|
|
|
|
|
|
|
|
if (!vsi)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
if (vsi->type != ICE_VSI_PF)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
ice_for_each_rxq(vsi, j) {
|
|
|
|
|
if (!vsi->rx_rings[j])
|
|
|
|
|
continue;
|
|
|
|
|
WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
clear_bit(ICE_CFG_BUSY, pf->state);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b
|
|
|
|
|
* @cached_phc_time: recently cached copy of PHC time
|
|
|
|
@ -636,11 +586,403 @@ static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp)
|
|
|
|
|
static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)
|
|
|
|
|
{
|
|
|
|
|
const u64 mask = GENMASK_ULL(31, 0);
|
|
|
|
|
unsigned long discard_time;
|
|
|
|
|
|
|
|
|
|
/* Discard the hardware timestamp if the cached PHC time is too old */
|
|
|
|
|
discard_time = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
|
|
|
|
|
if (time_is_before_jiffies(discard_time)) {
|
|
|
|
|
pf->ptp.tx_hwtstamp_discarded++;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time,
|
|
|
|
|
(in_tstamp >> 8) & mask);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* ice_ptp_tx_tstamp_work - Process Tx timestamps for a port
|
|
|
|
|
* @work: pointer to the kthread_work struct
|
|
|
|
|
*
|
|
|
|
|
* Process timestamps captured by the PHY associated with this port. To do
|
|
|
|
|
* this, loop over each index with a waiting skb.
|
|
|
|
|
*
|
|
|
|
|
* If a given index has a valid timestamp, perform the following steps:
|
|
|
|
|
*
|
|
|
|
|
* 1) copy the timestamp out of the PHY register
|
|
|
|
|
* 4) clear the timestamp valid bit in the PHY register
|
|
|
|
|
* 5) unlock the index by clearing the associated in_use bit.
|
|
|
|
|
* 2) extend the 40b timestamp value to get a 64bit timestamp
|
|
|
|
|
* 3) send that timestamp to the stack
|
|
|
|
|
*
|
|
|
|
|
* After looping, if we still have waiting SKBs, then re-queue the work. This
|
|
|
|
|
* may cause us effectively poll even when not strictly necessary. We do this
|
|
|
|
|
* because it's possible a new timestamp was requested around the same time as
|
|
|
|
|
* the interrupt. In some cases hardware might not interrupt us again when the
|
|
|
|
|
* timestamp is captured.
|
|
|
|
|
*
|
|
|
|
|
* Note that we only take the tracking lock when clearing the bit and when
|
|
|
|
|
* checking if we need to re-queue this task. The only place where bits can be
|
|
|
|
|
* set is the hard xmit routine where an SKB has a request flag set. The only
|
|
|
|
|
* places where we clear bits are this work function, or the periodic cleanup
|
|
|
|
|
* thread. If the cleanup thread clears a bit we're processing we catch it
|
|
|
|
|
* when we lock to clear the bit and then grab the SKB pointer. If a Tx thread
|
|
|
|
|
* starts a new timestamp, we might not begin processing it right away but we
|
|
|
|
|
* will notice it at the end when we re-queue the work item. If a Tx thread
|
|
|
|
|
* starts a new timestamp just after this function exits without re-queuing,
|
|
|
|
|
* the interrupt when the timestamp finishes should trigger. Avoiding holding
|
|
|
|
|
* the lock for the entire function is important in order to ensure that Tx
|
|
|
|
|
* threads do not get blocked while waiting for the lock.
|
|
|
|
|
*/
|
|
|
|
|
static void ice_ptp_tx_tstamp_work(struct kthread_work *work)
|
|
|
|
|
{
|
|
|
|
|
struct ice_ptp_port *ptp_port;
|
|
|
|
|
struct ice_ptp_tx *tx;
|
|
|
|
|
struct ice_pf *pf;
|
|
|
|
|
struct ice_hw *hw;
|
|
|
|
|
u8 idx;
|
|
|
|
|
|
|
|
|
|
tx = container_of(work, struct ice_ptp_tx, work);
|
|
|
|
|
if (!tx->init)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
ptp_port = container_of(tx, struct ice_ptp_port, tx);
|
|
|
|
|
pf = ptp_port_to_pf(ptp_port);
|
|
|
|
|
hw = &pf->hw;
|
|
|
|
|
|
|
|
|
|
for_each_set_bit(idx, tx->in_use, tx->len) {
|
|
|
|
|
struct skb_shared_hwtstamps shhwtstamps = {};
|
|
|
|
|
u8 phy_idx = idx + tx->quad_offset;
|
|
|
|
|
u64 raw_tstamp, tstamp;
|
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
|
|
|
|
|
|
|
|
|
|
err = ice_read_phy_tstamp(hw, tx->quad, phy_idx,
|
|
|
|
|
&raw_tstamp);
|
|
|
|
|
if (err)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
|
|
|
|
|
|
|
|
|
|
/* Check if the timestamp is invalid or stale */
|
|
|
|
|
if (!(raw_tstamp & ICE_PTP_TS_VALID) ||
|
|
|
|
|
raw_tstamp == tx->tstamps[idx].cached_tstamp)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
/* The timestamp is valid, so we'll go ahead and clear this
|
|
|
|
|
* index and then send the timestamp up to the stack.
|
|
|
|
|
*/
|
|
|
|
|
spin_lock(&tx->lock);
|
|
|
|
|
tx->tstamps[idx].cached_tstamp = raw_tstamp;
|
|
|
|
|
clear_bit(idx, tx->in_use);
|
|
|
|
|
skb = tx->tstamps[idx].skb;
|
|
|
|
|
tx->tstamps[idx].skb = NULL;
|
|
|
|
|
spin_unlock(&tx->lock);
|
|
|
|
|
|
|
|
|
|
/* it's (unlikely but) possible we raced with the cleanup
|
|
|
|
|
* thread for discarding old timestamp requests.
|
|
|
|
|
*/
|
|
|
|
|
if (!skb)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
/* Extend the timestamp using cached PHC time */
|
|
|
|
|
tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
|
|
|
|
|
if (tstamp) {
|
|
|
|
|
shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
|
|
|
|
|
ice_trace(tx_tstamp_complete, skb, idx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
skb_tstamp_tx(skb, &shhwtstamps);
|
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Check if we still have work to do. If so, re-queue this task to
|
|
|
|
|
* poll for remaining timestamps.
|
|
|
|
|
*/
|
|
|
|
|
spin_lock(&tx->lock);
|
|
|
|
|
if (!bitmap_empty(tx->in_use, tx->len))
|
|
|
|
|
kthread_queue_work(pf->ptp.kworker, &tx->work);
|
|
|
|
|
spin_unlock(&tx->lock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps
|
|
|
|
|
* @tx: Tx tracking structure to initialize
|
|
|
|
|
*
|
|
|
|
|
* Assumes that the length has already been initialized. Do not call directly,
|
|
|
|
|
* use the ice_ptp_init_tx_e822 or ice_ptp_init_tx_e810 instead.
|
|
|
|
|
*/
|
|
|
|
|
static int
|
|
|
|
|
ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
|
|
|
|
|
{
|
|
|
|
|
tx->tstamps = kcalloc(tx->len, sizeof(*tx->tstamps), GFP_KERNEL);
|
|
|
|
|
if (!tx->tstamps)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
|
|
tx->in_use = bitmap_zalloc(tx->len, GFP_KERNEL);
|
|
|
|
|
if (!tx->in_use) {
|
|
|
|
|
kfree(tx->tstamps);
|
|
|
|
|
tx->tstamps = NULL;
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
spin_lock_init(&tx->lock);
|
|
|
|
|
kthread_init_work(&tx->work, ice_ptp_tx_tstamp_work);
|
|
|
|
|
|
|
|
|
|
tx->init = 1;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker
|
|
|
|
|
* @pf: Board private structure
|
|
|
|
|
* @tx: the tracker to flush
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
|
|
|
|
|
{
|
|
|
|
|
u8 idx;
|
|
|
|
|
|
|
|
|
|
for (idx = 0; idx < tx->len; idx++) {
|
|
|
|
|
u8 phy_idx = idx + tx->quad_offset;
|
|
|
|
|
|
|
|
|
|
spin_lock(&tx->lock);
|
|
|
|
|
if (tx->tstamps[idx].skb) {
|
|
|
|
|
dev_kfree_skb_any(tx->tstamps[idx].skb);
|
|
|
|
|
tx->tstamps[idx].skb = NULL;
|
|
|
|
|
pf->ptp.tx_hwtstamp_flushed++;
|
|
|
|
|
}
|
|
|
|
|
clear_bit(idx, tx->in_use);
|
|
|
|
|
spin_unlock(&tx->lock);
|
|
|
|
|
|
|
|
|
|
/* Clear any potential residual timestamp in the PHY block */
|
|
|
|
|
if (!pf->hw.reset_ongoing)
|
|
|
|
|
ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
|
|
|
|
|
* @pf: Board private structure
|
|
|
|
|
* @tx: Tx tracking structure to release
|
|
|
|
|
*
|
|
|
|
|
* Free memory associated with the Tx timestamp tracker.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
|
|
|
|
|
{
|
|
|
|
|
tx->init = 0;
|
|
|
|
|
|
|
|
|
|
kthread_cancel_work_sync(&tx->work);
|
|
|
|
|
|
|
|
|
|
ice_ptp_flush_tx_tracker(pf, tx);
|
|
|
|
|
|
|
|
|
|
kfree(tx->tstamps);
|
|
|
|
|
tx->tstamps = NULL;
|
|
|
|
|
|
|
|
|
|
bitmap_free(tx->in_use);
|
|
|
|
|
tx->in_use = NULL;
|
|
|
|
|
|
|
|
|
|
tx->len = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* ice_ptp_init_tx_e822 - Initialize tracking for Tx timestamps
|
|
|
|
|
* @pf: Board private structure
|
|
|
|
|
* @tx: the Tx tracking structure to initialize
|
|
|
|
|
* @port: the port this structure tracks
|
|
|
|
|
*
|
|
|
|
|
* Initialize the Tx timestamp tracker for this port. For generic MAC devices,
|
|
|
|
|
* the timestamp block is shared for all ports in the same quad. To avoid
|
|
|
|
|
* ports using the same timestamp index, logically break the block of
|
|
|
|
|
* registers into chunks based on the port number.
|
|
|
|
|
*/
|
|
|
|
|
static int
|
|
|
|
|
ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
|
|
|
|
|
{
|
|
|
|
|
tx->quad = port / ICE_PORTS_PER_QUAD;
|
|
|
|
|
tx->quad_offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT;
|
|
|
|
|
tx->len = INDEX_PER_PORT;
|
|
|
|
|
|
|
|
|
|
return ice_ptp_alloc_tx_tracker(tx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
|
|
|
|
|
* @pf: Board private structure
|
|
|
|
|
* @tx: the Tx tracking structure to initialize
|
|
|
|
|
*
|
|
|
|
|
* Initialize the Tx timestamp tracker for this PF. For E810 devices, each
|
|
|
|
|
* port has its own block of timestamps, independent of the other ports.
|
|
|
|
|
*/
|
|
|
|
|
static int
|
|
|
|
|
ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
|
|
|
|
|
{
|
|
|
|
|
tx->quad = pf->hw.port_info->lport;
|
|
|
|
|
tx->quad_offset = 0;
|
|
|
|
|
tx->len = INDEX_PER_QUAD;
|
|
|
|
|
|
|
|
|
|
return ice_ptp_alloc_tx_tracker(tx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* ice_ptp_tx_tstamp_cleanup - Cleanup old timestamp requests that got dropped
|
|
|
|
|
* @pf: pointer to the PF struct
|
|
|
|
|
* @tx: PTP Tx tracker to clean up
|
|
|
|
|
*
|
|
|
|
|
* Loop through the Tx timestamp requests and see if any of them have been
|
|
|
|
|
* waiting for a long time. Discard any SKBs that have been waiting for more
|
|
|
|
|
* than 2 seconds. This is long enough to be reasonably sure that the
|
|
|
|
|
* timestamp will never be captured. This might happen if the packet gets
|
|
|
|
|
* discarded before it reaches the PHY timestamping block.
|
|
|
|
|
*/
|
|
|
|
|
static void ice_ptp_tx_tstamp_cleanup(struct ice_pf *pf, struct ice_ptp_tx *tx)
|
|
|
|
|
{
|
|
|
|
|
struct ice_hw *hw = &pf->hw;
|
|
|
|
|
u8 idx;
|
|
|
|
|
|
|
|
|
|
if (!tx->init)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
for_each_set_bit(idx, tx->in_use, tx->len) {
|
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
u64 raw_tstamp;
|
|
|
|
|
|
|
|
|
|
/* Check if this SKB has been waiting for too long */
|
|
|
|
|
if (time_is_after_jiffies(tx->tstamps[idx].start + 2 * HZ))
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
/* Read tstamp to be able to use this register again */
|
|
|
|
|
ice_read_phy_tstamp(hw, tx->quad, idx + tx->quad_offset,
|
|
|
|
|
&raw_tstamp);
|
|
|
|
|
|
|
|
|
|
spin_lock(&tx->lock);
|
|
|
|
|
skb = tx->tstamps[idx].skb;
|
|
|
|
|
tx->tstamps[idx].skb = NULL;
|
|
|
|
|
clear_bit(idx, tx->in_use);
|
|
|
|
|
spin_unlock(&tx->lock);
|
|
|
|
|
|
|
|
|
|
/* Count the number of Tx timestamps which have timed out */
|
|
|
|
|
pf->ptp.tx_hwtstamp_timeouts++;
|
|
|
|
|
|
|
|
|
|
/* Free the SKB after we've cleared the bit */
|
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* ice_ptp_update_cached_phctime - Update the cached PHC time values
|
|
|
|
|
* @pf: Board specific private structure
|
|
|
|
|
*
|
|
|
|
|
* This function updates the system time values which are cached in the PF
|
|
|
|
|
* structure and the Rx rings.
|
|
|
|
|
*
|
|
|
|
|
* This function must be called periodically to ensure that the cached value
|
|
|
|
|
* is never more than 2 seconds old.
|
|
|
|
|
*
|
|
|
|
|
* Note that the cached copy in the PF PTP structure is always updated, even
|
|
|
|
|
* if we can't update the copy in the Rx rings.
|
|
|
|
|
*
|
|
|
|
|
* Return:
|
|
|
|
|
* * 0 - OK, successfully updated
|
|
|
|
|
* * -EAGAIN - PF was busy, need to reschedule the update
|
|
|
|
|
*/
|
|
|
|
|
static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
|
|
|
|
|
{
|
|
|
|
|
struct device *dev = ice_pf_to_dev(pf);
|
|
|
|
|
unsigned long update_before;
|
|
|
|
|
u64 systime;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
update_before = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
|
|
|
|
|
if (pf->ptp.cached_phc_time &&
|
|
|
|
|
time_is_before_jiffies(update_before)) {
|
|
|
|
|
unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies;
|
|
|
|
|
|
|
|
|
|
dev_warn(dev, "%u msecs passed between update to cached PHC time\n",
|
|
|
|
|
jiffies_to_msecs(time_taken));
|
|
|
|
|
pf->ptp.late_cached_phc_updates++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Read the current PHC time */
|
|
|
|
|
systime = ice_ptp_read_src_clk_reg(pf, NULL);
|
|
|
|
|
|
|
|
|
|
/* Update the cached PHC time stored in the PF structure */
|
|
|
|
|
WRITE_ONCE(pf->ptp.cached_phc_time, systime);
|
|
|
|
|
WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies);
|
|
|
|
|
|
|
|
|
|
if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
|
|
|
|
|
return -EAGAIN;
|
|
|
|
|
|
|
|
|
|
ice_for_each_vsi(pf, i) {
|
|
|
|
|
struct ice_vsi *vsi = pf->vsi[i];
|
|
|
|
|
int j;
|
|
|
|
|
|
|
|
|
|
if (!vsi)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
if (vsi->type != ICE_VSI_PF)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
ice_for_each_rxq(vsi, j) {
|
|
|
|
|
if (!vsi->rx_rings[j])
|
|
|
|
|
continue;
|
|
|
|
|
WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
clear_bit(ICE_CFG_BUSY, pf->state);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* ice_ptp_reset_cached_phctime - Reset cached PHC time after an update
|
|
|
|
|
* @pf: Board specific private structure
|
|
|
|
|
*
|
|
|
|
|
* This function must be called when the cached PHC time is no longer valid,
|
|
|
|
|
* such as after a time adjustment. It discards any outstanding Tx timestamps,
|
|
|
|
|
* and updates the cached PHC time for both the PF and Rx rings. If updating
|
|
|
|
|
* the PHC time cannot be done immediately, a warning message is logged and
|
|
|
|
|
* the work item is scheduled.
|
|
|
|
|
*
|
|
|
|
|
* These steps are required in order to ensure that we do not accidentally
|
|
|
|
|
* report a timestamp extended by the wrong PHC cached copy. Note that we
|
|
|
|
|
* do not directly update the cached timestamp here because it is possible
|
|
|
|
|
* this might produce an error when ICE_CFG_BUSY is set. If this occurred, we
|
|
|
|
|
* would have to try again. During that time window, timestamps might be
|
|
|
|
|
* requested and returned with an invalid extension. Thus, on failure to
|
|
|
|
|
* immediately update the cached PHC time we would need to zero the value
|
|
|
|
|
* anyways. For this reason, we just zero the value immediately and queue the
|
|
|
|
|
* update work item.
|
|
|
|
|
*/
|
|
|
|
|
static void ice_ptp_reset_cached_phctime(struct ice_pf *pf)
|
|
|
|
|
{
|
|
|
|
|
struct device *dev = ice_pf_to_dev(pf);
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
/* Update the cached PHC time immediately if possible, otherwise
|
|
|
|
|
* schedule the work item to execute soon.
|
|
|
|
|
*/
|
|
|
|
|
err = ice_ptp_update_cached_phctime(pf);
|
|
|
|
|
if (err) {
|
|
|
|
|
/* If another thread is updating the Rx rings, we won't
|
|
|
|
|
* properly reset them here. This could lead to reporting of
|
|
|
|
|
* invalid timestamps, but there isn't much we can do.
|
|
|
|
|
*/
|
|
|
|
|
dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n",
|
|
|
|
|
__func__);
|
|
|
|
|
|
|
|
|
|
/* Queue the work item to update the Rx rings when possible */
|
|
|
|
|
kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work,
|
|
|
|
|
msecs_to_jiffies(10));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Flush any outstanding Tx timestamps */
|
|
|
|
|
ice_ptp_flush_tx_tracker(pf, &pf->ptp.port.tx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* ice_ptp_read_time - Read the time from the device
|
|
|
|
|
* @pf: Board private structure
|
|
|
|
@ -1509,7 +1851,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
|
|
|
|
|
ice_ptp_unlock(hw);
|
|
|
|
|
|
|
|
|
|
if (!err)
|
|
|
|
|
ice_ptp_update_cached_phctime(pf);
|
|
|
|
|
ice_ptp_reset_cached_phctime(pf);
|
|
|
|
|
|
|
|
|
|
/* Reenable periodic outputs */
|
|
|
|
|
ice_ptp_enable_all_clkout(pf);
|
|
|
|
@ -1588,7 +1930,7 @@ static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ice_ptp_update_cached_phctime(pf);
|
|
|
|
|
ice_ptp_reset_cached_phctime(pf);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
@ -1796,26 +2138,31 @@ void
|
|
|
|
|
ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
|
|
|
|
|
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb)
|
|
|
|
|
{
|
|
|
|
|
struct skb_shared_hwtstamps *hwtstamps;
|
|
|
|
|
u64 ts_ns, cached_time;
|
|
|
|
|
u32 ts_high;
|
|
|
|
|
u64 ts_ns;
|
|
|
|
|
|
|
|
|
|
/* Populate timesync data into skb */
|
|
|
|
|
if (rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID) {
|
|
|
|
|
struct skb_shared_hwtstamps *hwtstamps;
|
|
|
|
|
if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID))
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
/* Use ice_ptp_extend_32b_ts directly, using the ring-specific
|
|
|
|
|
* cached PHC value, rather than accessing the PF. This also
|
|
|
|
|
* allows us to simply pass the upper 32bits of nanoseconds
|
|
|
|
|
* directly. Calling ice_ptp_extend_40b_ts is unnecessary as
|
|
|
|
|
* it would just discard these bits itself.
|
|
|
|
|
*/
|
|
|
|
|
ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
|
|
|
|
|
ts_ns = ice_ptp_extend_32b_ts(rx_ring->cached_phctime, ts_high);
|
|
|
|
|
cached_time = READ_ONCE(rx_ring->cached_phctime);
|
|
|
|
|
|
|
|
|
|
hwtstamps = skb_hwtstamps(skb);
|
|
|
|
|
memset(hwtstamps, 0, sizeof(*hwtstamps));
|
|
|
|
|
hwtstamps->hwtstamp = ns_to_ktime(ts_ns);
|
|
|
|
|
}
|
|
|
|
|
/* Do not report a timestamp if we don't have a cached PHC time */
|
|
|
|
|
if (!cached_time)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
/* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached
|
|
|
|
|
* PHC value, rather than accessing the PF. This also allows us to
|
|
|
|
|
* simply pass the upper 32bits of nanoseconds directly. Calling
|
|
|
|
|
* ice_ptp_extend_40b_ts is unnecessary as it would just discard these
|
|
|
|
|
* bits itself.
|
|
|
|
|
*/
|
|
|
|
|
ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
|
|
|
|
|
ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high);
|
|
|
|
|
|
|
|
|
|
hwtstamps = skb_hwtstamps(skb);
|
|
|
|
|
memset(hwtstamps, 0, sizeof(*hwtstamps));
|
|
|
|
|
hwtstamps->hwtstamp = ns_to_ktime(ts_ns);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@ -2015,112 +2362,6 @@ static long ice_ptp_create_clock(struct ice_pf *pf)
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* ice_ptp_tx_tstamp_work - Process Tx timestamps for a port
|
|
|
|
|
* @work: pointer to the kthread_work struct
|
|
|
|
|
*
|
|
|
|
|
* Process timestamps captured by the PHY associated with this port. To do
|
|
|
|
|
* this, loop over each index with a waiting skb.
|
|
|
|
|
*
|
|
|
|
|
* If a given index has a valid timestamp, perform the following steps:
|
|
|
|
|
*
|
|
|
|
|
* 1) copy the timestamp out of the PHY register
|
|
|
|
|
* 4) clear the timestamp valid bit in the PHY register
|
|
|
|
|
* 5) unlock the index by clearing the associated in_use bit.
|
|
|
|
|
* 2) extend the 40b timestamp value to get a 64bit timestamp
|
|
|
|
|
* 3) send that timestamp to the stack
|
|
|
|
|
*
|
|
|
|
|
* After looping, if we still have waiting SKBs, then re-queue the work. This
|
|
|
|
|
* may cause us effectively poll even when not strictly necessary. We do this
|
|
|
|
|
* because it's possible a new timestamp was requested around the same time as
|
|
|
|
|
* the interrupt. In some cases hardware might not interrupt us again when the
|
|
|
|
|
* timestamp is captured.
|
|
|
|
|
*
|
|
|
|
|
* Note that we only take the tracking lock when clearing the bit and when
|
|
|
|
|
* checking if we need to re-queue this task. The only place where bits can be
|
|
|
|
|
* set is the hard xmit routine where an SKB has a request flag set. The only
|
|
|
|
|
* places where we clear bits are this work function, or the periodic cleanup
|
|
|
|
|
* thread. If the cleanup thread clears a bit we're processing we catch it
|
|
|
|
|
* when we lock to clear the bit and then grab the SKB pointer. If a Tx thread
|
|
|
|
|
* starts a new timestamp, we might not begin processing it right away but we
|
|
|
|
|
* will notice it at the end when we re-queue the work item. If a Tx thread
|
|
|
|
|
* starts a new timestamp just after this function exits without re-queuing,
|
|
|
|
|
* the interrupt when the timestamp finishes should trigger. Avoiding holding
|
|
|
|
|
* the lock for the entire function is important in order to ensure that Tx
|
|
|
|
|
* threads do not get blocked while waiting for the lock.
|
|
|
|
|
*/
|
|
|
|
|
static void ice_ptp_tx_tstamp_work(struct kthread_work *work)
|
|
|
|
|
{
|
|
|
|
|
struct ice_ptp_port *ptp_port;
|
|
|
|
|
struct ice_ptp_tx *tx;
|
|
|
|
|
struct ice_pf *pf;
|
|
|
|
|
struct ice_hw *hw;
|
|
|
|
|
u8 idx;
|
|
|
|
|
|
|
|
|
|
tx = container_of(work, struct ice_ptp_tx, work);
|
|
|
|
|
if (!tx->init)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
ptp_port = container_of(tx, struct ice_ptp_port, tx);
|
|
|
|
|
pf = ptp_port_to_pf(ptp_port);
|
|
|
|
|
hw = &pf->hw;
|
|
|
|
|
|
|
|
|
|
for_each_set_bit(idx, tx->in_use, tx->len) {
|
|
|
|
|
struct skb_shared_hwtstamps shhwtstamps = {};
|
|
|
|
|
u8 phy_idx = idx + tx->quad_offset;
|
|
|
|
|
u64 raw_tstamp, tstamp;
|
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
|
|
|
|
|
|
|
|
|
|
err = ice_read_phy_tstamp(hw, tx->quad, phy_idx,
|
|
|
|
|
&raw_tstamp);
|
|
|
|
|
if (err)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
|
|
|
|
|
|
|
|
|
|
/* Check if the timestamp is invalid or stale */
|
|
|
|
|
if (!(raw_tstamp & ICE_PTP_TS_VALID) ||
|
|
|
|
|
raw_tstamp == tx->tstamps[idx].cached_tstamp)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
/* The timestamp is valid, so we'll go ahead and clear this
|
|
|
|
|
* index and then send the timestamp up to the stack.
|
|
|
|
|
*/
|
|
|
|
|
spin_lock(&tx->lock);
|
|
|
|
|
tx->tstamps[idx].cached_tstamp = raw_tstamp;
|
|
|
|
|
clear_bit(idx, tx->in_use);
|
|
|
|
|
skb = tx->tstamps[idx].skb;
|
|
|
|
|
tx->tstamps[idx].skb = NULL;
|
|
|
|
|
spin_unlock(&tx->lock);
|
|
|
|
|
|
|
|
|
|
/* it's (unlikely but) possible we raced with the cleanup
|
|
|
|
|
* thread for discarding old timestamp requests.
|
|
|
|
|
*/
|
|
|
|
|
if (!skb)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
/* Extend the timestamp using cached PHC time */
|
|
|
|
|
tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
|
|
|
|
|
shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
|
|
|
|
|
|
|
|
|
|
ice_trace(tx_tstamp_complete, skb, idx);
|
|
|
|
|
|
|
|
|
|
skb_tstamp_tx(skb, &shhwtstamps);
|
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Check if we still have work to do. If so, re-queue this task to
|
|
|
|
|
* poll for remaining timestamps.
|
|
|
|
|
*/
|
|
|
|
|
spin_lock(&tx->lock);
|
|
|
|
|
if (!bitmap_empty(tx->in_use, tx->len))
|
|
|
|
|
kthread_queue_work(pf->ptp.kworker, &tx->work);
|
|
|
|
|
spin_unlock(&tx->lock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* ice_ptp_request_ts - Request an available Tx timestamp index
|
|
|
|
|
* @tx: the PTP Tx timestamp tracker to request from
|
|
|
|
@ -2173,167 +2414,6 @@ void ice_ptp_process_ts(struct ice_pf *pf)
|
|
|
|
|
kthread_queue_work(pf->ptp.kworker, &pf->ptp.port.tx.work);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps
|
|
|
|
|
* @tx: Tx tracking structure to initialize
|
|
|
|
|
*
|
|
|
|
|
* Assumes that the length has already been initialized. Do not call directly,
|
|
|
|
|
* use the ice_ptp_init_tx_e822 or ice_ptp_init_tx_e810 instead.
|
|
|
|
|
*/
|
|
|
|
|
static int
|
|
|
|
|
ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
|
|
|
|
|
{
|
|
|
|
|
tx->tstamps = kcalloc(tx->len, sizeof(*tx->tstamps), GFP_KERNEL);
|
|
|
|
|
if (!tx->tstamps)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
|
|
tx->in_use = bitmap_zalloc(tx->len, GFP_KERNEL);
|
|
|
|
|
if (!tx->in_use) {
|
|
|
|
|
kfree(tx->tstamps);
|
|
|
|
|
tx->tstamps = NULL;
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
spin_lock_init(&tx->lock);
|
|
|
|
|
kthread_init_work(&tx->work, ice_ptp_tx_tstamp_work);
|
|
|
|
|
|
|
|
|
|
tx->init = 1;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker
|
|
|
|
|
* @pf: Board private structure
|
|
|
|
|
* @tx: the tracker to flush
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
|
|
|
|
|
{
|
|
|
|
|
u8 idx;
|
|
|
|
|
|
|
|
|
|
for (idx = 0; idx < tx->len; idx++) {
|
|
|
|
|
u8 phy_idx = idx + tx->quad_offset;
|
|
|
|
|
|
|
|
|
|
spin_lock(&tx->lock);
|
|
|
|
|
if (tx->tstamps[idx].skb) {
|
|
|
|
|
dev_kfree_skb_any(tx->tstamps[idx].skb);
|
|
|
|
|
tx->tstamps[idx].skb = NULL;
|
|
|
|
|
}
|
|
|
|
|
clear_bit(idx, tx->in_use);
|
|
|
|
|
spin_unlock(&tx->lock);
|
|
|
|
|
|
|
|
|
|
/* Clear any potential residual timestamp in the PHY block */
|
|
|
|
|
if (!pf->hw.reset_ongoing)
|
|
|
|
|
ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
|
|
|
|
|
* @pf: Board private structure
|
|
|
|
|
* @tx: Tx tracking structure to release
|
|
|
|
|
*
|
|
|
|
|
* Free memory associated with the Tx timestamp tracker.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
|
|
|
|
|
{
|
|
|
|
|
tx->init = 0;
|
|
|
|
|
|
|
|
|
|
kthread_cancel_work_sync(&tx->work);
|
|
|
|
|
|
|
|
|
|
ice_ptp_flush_tx_tracker(pf, tx);
|
|
|
|
|
|
|
|
|
|
kfree(tx->tstamps);
|
|
|
|
|
tx->tstamps = NULL;
|
|
|
|
|
|
|
|
|
|
bitmap_free(tx->in_use);
|
|
|
|
|
tx->in_use = NULL;
|
|
|
|
|
|
|
|
|
|
tx->len = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* ice_ptp_init_tx_e822 - Initialize tracking for Tx timestamps
|
|
|
|
|
* @pf: Board private structure
|
|
|
|
|
* @tx: the Tx tracking structure to initialize
|
|
|
|
|
* @port: the port this structure tracks
|
|
|
|
|
*
|
|
|
|
|
* Initialize the Tx timestamp tracker for this port. For generic MAC devices,
|
|
|
|
|
* the timestamp block is shared for all ports in the same quad. To avoid
|
|
|
|
|
* ports using the same timestamp index, logically break the block of
|
|
|
|
|
* registers into chunks based on the port number.
|
|
|
|
|
*/
|
|
|
|
|
static int
|
|
|
|
|
ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
|
|
|
|
|
{
|
|
|
|
|
tx->quad = port / ICE_PORTS_PER_QUAD;
|
|
|
|
|
tx->quad_offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT;
|
|
|
|
|
tx->len = INDEX_PER_PORT;
|
|
|
|
|
|
|
|
|
|
return ice_ptp_alloc_tx_tracker(tx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
|
|
|
|
|
* @pf: Board private structure
|
|
|
|
|
* @tx: the Tx tracking structure to initialize
|
|
|
|
|
*
|
|
|
|
|
* Initialize the Tx timestamp tracker for this PF. For E810 devices, each
|
|
|
|
|
* port has its own block of timestamps, independent of the other ports.
|
|
|
|
|
*/
|
|
|
|
|
static int
|
|
|
|
|
ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
|
|
|
|
|
{
|
|
|
|
|
tx->quad = pf->hw.port_info->lport;
|
|
|
|
|
tx->quad_offset = 0;
|
|
|
|
|
tx->len = INDEX_PER_QUAD;
|
|
|
|
|
|
|
|
|
|
return ice_ptp_alloc_tx_tracker(tx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* ice_ptp_tx_tstamp_cleanup - Cleanup old timestamp requests that got dropped
|
|
|
|
|
* @hw: pointer to the hw struct
|
|
|
|
|
* @tx: PTP Tx tracker to clean up
|
|
|
|
|
*
|
|
|
|
|
* Loop through the Tx timestamp requests and see if any of them have been
|
|
|
|
|
* waiting for a long time. Discard any SKBs that have been waiting for more
|
|
|
|
|
* than 2 seconds. This is long enough to be reasonably sure that the
|
|
|
|
|
* timestamp will never be captured. This might happen if the packet gets
|
|
|
|
|
* discarded before it reaches the PHY timestamping block.
|
|
|
|
|
*/
|
|
|
|
|
static void ice_ptp_tx_tstamp_cleanup(struct ice_hw *hw, struct ice_ptp_tx *tx)
|
|
|
|
|
{
|
|
|
|
|
u8 idx;
|
|
|
|
|
|
|
|
|
|
if (!tx->init)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
for_each_set_bit(idx, tx->in_use, tx->len) {
|
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
u64 raw_tstamp;
|
|
|
|
|
|
|
|
|
|
/* Check if this SKB has been waiting for too long */
|
|
|
|
|
if (time_is_after_jiffies(tx->tstamps[idx].start + 2 * HZ))
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
/* Read tstamp to be able to use this register again */
|
|
|
|
|
ice_read_phy_tstamp(hw, tx->quad, idx + tx->quad_offset,
|
|
|
|
|
&raw_tstamp);
|
|
|
|
|
|
|
|
|
|
spin_lock(&tx->lock);
|
|
|
|
|
skb = tx->tstamps[idx].skb;
|
|
|
|
|
tx->tstamps[idx].skb = NULL;
|
|
|
|
|
clear_bit(idx, tx->in_use);
|
|
|
|
|
spin_unlock(&tx->lock);
|
|
|
|
|
|
|
|
|
|
/* Free the SKB after we've cleared the bit */
|
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void ice_ptp_periodic_work(struct kthread_work *work)
|
|
|
|
|
{
|
|
|
|
|
struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
|
|
|
|
@ -2345,7 +2425,7 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
|
|
|
|
|
|
|
|
|
|
err = ice_ptp_update_cached_phctime(pf);
|
|
|
|
|
|
|
|
|
|
ice_ptp_tx_tstamp_cleanup(&pf->hw, &pf->ptp.port.tx);
|
|
|
|
|
ice_ptp_tx_tstamp_cleanup(pf, &pf->ptp.port.tx);
|
|
|
|
|
|
|
|
|
|
/* Run twice a second or reschedule if phc update failed */
|
|
|
|
|
kthread_queue_delayed_work(ptp->kworker, &ptp->work,
|
|
|
|
|