2018-01-27 02:50:27 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2016-06-12 03:13:38 +08:00
|
|
|
/*
|
|
|
|
* PCI Express Precision Time Measurement
|
|
|
|
* Copyright (c) 2016, Intel Corporation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include "../pci.h"
|
|
|
|
|
|
|
|
static void pci_ptm_info(struct pci_dev *dev)
|
|
|
|
{
|
2016-06-13 05:26:40 +08:00
|
|
|
char clock_desc[8];
|
|
|
|
|
|
|
|
switch (dev->ptm_granularity) {
|
|
|
|
case 0:
|
|
|
|
snprintf(clock_desc, sizeof(clock_desc), "unknown");
|
|
|
|
break;
|
|
|
|
case 255:
|
|
|
|
snprintf(clock_desc, sizeof(clock_desc), ">254ns");
|
|
|
|
break;
|
|
|
|
default:
|
2019-11-07 05:30:48 +08:00
|
|
|
snprintf(clock_desc, sizeof(clock_desc), "%uns",
|
2016-06-13 05:26:40 +08:00
|
|
|
dev->ptm_granularity);
|
|
|
|
break;
|
|
|
|
}
|
2018-01-19 02:55:24 +08:00
|
|
|
pci_info(dev, "PTM enabled%s, %s granularity\n",
|
2016-06-13 05:26:40 +08:00
|
|
|
dev->ptm_root ? " (root)" : "", clock_desc);
|
2016-06-12 03:13:38 +08:00
|
|
|
}
|
|
|
|
|
2020-12-08 06:39:51 +08:00
|
|
|
void pci_disable_ptm(struct pci_dev *dev)
|
|
|
|
{
|
2022-09-10 04:24:57 +08:00
|
|
|
u16 ptm = dev->ptm_cap;
|
2020-12-08 06:39:51 +08:00
|
|
|
u16 ctrl;
|
|
|
|
|
|
|
|
if (!ptm)
|
|
|
|
return;
|
|
|
|
|
|
|
|
pci_read_config_word(dev, ptm + PCI_PTM_CTRL, &ctrl);
|
|
|
|
ctrl &= ~(PCI_PTM_CTRL_ENABLE | PCI_PTM_CTRL_ROOT);
|
|
|
|
pci_write_config_word(dev, ptm + PCI_PTM_CTRL, ctrl);
|
|
|
|
}
|
|
|
|
|
2020-12-08 06:39:50 +08:00
|
|
|
void pci_save_ptm_state(struct pci_dev *dev)
|
|
|
|
{
|
2022-09-10 04:24:57 +08:00
|
|
|
u16 ptm = dev->ptm_cap;
|
2020-12-08 06:39:50 +08:00
|
|
|
struct pci_cap_saved_state *save_state;
|
|
|
|
u16 *cap;
|
|
|
|
|
|
|
|
if (!ptm)
|
|
|
|
return;
|
|
|
|
|
|
|
|
save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_PTM);
|
2021-08-12 02:59:55 +08:00
|
|
|
if (!save_state)
|
2020-12-08 06:39:50 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
cap = (u16 *)&save_state->cap.data[0];
|
|
|
|
pci_read_config_word(dev, ptm + PCI_PTM_CTRL, cap);
|
|
|
|
}
|
|
|
|
|
|
|
|
void pci_restore_ptm_state(struct pci_dev *dev)
|
|
|
|
{
|
2022-09-10 04:24:57 +08:00
|
|
|
u16 ptm = dev->ptm_cap;
|
2020-12-08 06:39:50 +08:00
|
|
|
struct pci_cap_saved_state *save_state;
|
|
|
|
u16 *cap;
|
|
|
|
|
2022-09-10 04:24:57 +08:00
|
|
|
if (!ptm)
|
2020-12-08 06:39:50 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_PTM);
|
2022-09-10 04:24:57 +08:00
|
|
|
if (!save_state)
|
2020-12-08 06:39:50 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
cap = (u16 *)&save_state->cap.data[0];
|
|
|
|
pci_write_config_word(dev, ptm + PCI_PTM_CTRL, *cap);
|
|
|
|
}
|
|
|
|
|
2022-09-10 04:24:58 +08:00
|
|
|
/*
|
|
|
|
* If the next upstream device supports PTM, return it; otherwise return
|
|
|
|
* NULL. PTM Messages are local, so both link partners must support it.
|
|
|
|
*/
|
|
|
|
static struct pci_dev *pci_upstream_ptm(struct pci_dev *dev)
|
|
|
|
{
|
|
|
|
struct pci_dev *ups = pci_upstream_bridge(dev);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Switch Downstream Ports are not permitted to have a PTM
|
|
|
|
* capability; their PTM behavior is controlled by the Upstream
|
|
|
|
* Port (PCIe r5.0, sec 7.9.16), so if the upstream bridge is a
|
|
|
|
* Switch Downstream Port, look up one more level.
|
|
|
|
*/
|
|
|
|
if (ups && pci_pcie_type(ups) == PCI_EXP_TYPE_DOWNSTREAM)
|
|
|
|
ups = pci_upstream_bridge(ups);
|
|
|
|
|
|
|
|
if (ups && ups->ptm_cap)
|
|
|
|
return ups;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-06-12 03:13:38 +08:00
|
|
|
void pci_ptm_init(struct pci_dev *dev)
|
|
|
|
{
|
2022-09-10 04:24:57 +08:00
|
|
|
u16 ptm;
|
2016-06-12 03:13:38 +08:00
|
|
|
u32 cap, ctrl;
|
2016-06-13 05:26:40 +08:00
|
|
|
u8 local_clock;
|
2016-06-12 03:13:38 +08:00
|
|
|
struct pci_dev *ups;
|
|
|
|
|
|
|
|
if (!pci_is_pcie(dev))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Enable PTM only on interior devices (root ports, switch ports,
|
|
|
|
* etc.) on the assumption that it causes no link traffic until an
|
|
|
|
* endpoint enables it.
|
|
|
|
*/
|
|
|
|
if ((pci_pcie_type(dev) == PCI_EXP_TYPE_ENDPOINT ||
|
|
|
|
pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END))
|
|
|
|
return;
|
|
|
|
|
2022-09-10 04:24:57 +08:00
|
|
|
ptm = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
|
|
|
|
if (!ptm)
|
2020-05-22 04:40:07 +08:00
|
|
|
return;
|
|
|
|
|
2022-09-10 04:24:57 +08:00
|
|
|
dev->ptm_cap = ptm;
|
2020-12-08 06:39:50 +08:00
|
|
|
pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_PTM, sizeof(u16));
|
|
|
|
|
2022-09-10 04:24:57 +08:00
|
|
|
pci_read_config_dword(dev, ptm + PCI_PTM_CAP, &cap);
|
2016-06-13 05:26:40 +08:00
|
|
|
local_clock = (cap & PCI_PTM_GRANULARITY_MASK) >> 8;
|
2016-06-12 03:13:38 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* There's no point in enabling PTM unless it's enabled in the
|
|
|
|
* upstream device or this device can be a PTM Root itself. Per
|
|
|
|
* the spec recommendation (PCIe r3.1, sec 7.32.3), select the
|
|
|
|
* furthest upstream Time Source as the PTM Root.
|
|
|
|
*/
|
2022-09-10 04:24:58 +08:00
|
|
|
ups = pci_upstream_ptm(dev);
|
2016-06-12 03:13:38 +08:00
|
|
|
if (ups && ups->ptm_enabled) {
|
|
|
|
ctrl = PCI_PTM_CTRL_ENABLE;
|
2016-06-13 05:26:40 +08:00
|
|
|
if (ups->ptm_granularity == 0)
|
|
|
|
dev->ptm_granularity = 0;
|
|
|
|
else if (ups->ptm_granularity > local_clock)
|
|
|
|
dev->ptm_granularity = ups->ptm_granularity;
|
2016-06-12 03:13:38 +08:00
|
|
|
} else {
|
|
|
|
if (cap & PCI_PTM_CAP_ROOT) {
|
|
|
|
ctrl = PCI_PTM_CTRL_ENABLE | PCI_PTM_CTRL_ROOT;
|
|
|
|
dev->ptm_root = 1;
|
2016-06-13 05:26:40 +08:00
|
|
|
dev->ptm_granularity = local_clock;
|
2016-06-12 03:13:38 +08:00
|
|
|
} else
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-06-13 05:26:40 +08:00
|
|
|
ctrl |= dev->ptm_granularity << 8;
|
2022-09-10 04:24:57 +08:00
|
|
|
pci_write_config_dword(dev, ptm + PCI_PTM_CTRL, ctrl);
|
2016-06-12 03:13:38 +08:00
|
|
|
dev->ptm_enabled = 1;
|
|
|
|
|
|
|
|
pci_ptm_info(dev);
|
|
|
|
}
|
2016-06-14 00:01:51 +08:00
|
|
|
|
|
|
|
int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
|
|
|
|
{
|
2022-09-10 04:24:57 +08:00
|
|
|
u16 ptm;
|
2016-06-14 00:01:51 +08:00
|
|
|
u32 cap, ctrl;
|
|
|
|
struct pci_dev *ups;
|
|
|
|
|
|
|
|
if (!pci_is_pcie(dev))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2022-09-10 04:24:57 +08:00
|
|
|
ptm = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
|
|
|
|
if (!ptm)
|
2016-06-14 00:01:51 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2022-09-10 04:24:57 +08:00
|
|
|
dev->ptm_cap = ptm;
|
|
|
|
pci_read_config_dword(dev, ptm + PCI_PTM_CAP, &cap);
|
2016-06-14 00:01:51 +08:00
|
|
|
if (!(cap & PCI_PTM_CAP_REQ))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For a PCIe Endpoint, PTM is only useful if the endpoint can
|
|
|
|
* issue PTM requests to upstream devices that have PTM enabled.
|
|
|
|
*
|
|
|
|
* For Root Complex Integrated Endpoints, there is no upstream
|
|
|
|
* device, so there must be some implementation-specific way to
|
|
|
|
* associate the endpoint with a time source.
|
|
|
|
*/
|
|
|
|
if (pci_pcie_type(dev) == PCI_EXP_TYPE_ENDPOINT) {
|
2022-09-10 04:24:58 +08:00
|
|
|
ups = pci_upstream_ptm(dev);
|
2016-06-14 00:01:51 +08:00
|
|
|
if (!ups || !ups->ptm_enabled)
|
|
|
|
return -EINVAL;
|
2016-06-13 05:26:40 +08:00
|
|
|
|
|
|
|
dev->ptm_granularity = ups->ptm_granularity;
|
2016-06-14 00:01:51 +08:00
|
|
|
} else if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) {
|
2016-06-13 05:26:40 +08:00
|
|
|
dev->ptm_granularity = 0;
|
2016-06-14 00:01:51 +08:00
|
|
|
} else
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
ctrl = PCI_PTM_CTRL_ENABLE;
|
2016-06-13 05:26:40 +08:00
|
|
|
ctrl |= dev->ptm_granularity << 8;
|
2022-09-10 04:24:57 +08:00
|
|
|
pci_write_config_dword(dev, ptm + PCI_PTM_CTRL, ctrl);
|
2016-06-14 00:01:51 +08:00
|
|
|
dev->ptm_enabled = 1;
|
|
|
|
|
|
|
|
pci_ptm_info(dev);
|
|
|
|
|
|
|
|
if (granularity)
|
2016-06-13 05:26:40 +08:00
|
|
|
*granularity = dev->ptm_granularity;
|
2016-06-14 00:01:51 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(pci_enable_ptm);
|
2021-07-27 11:36:55 +08:00
|
|
|
|
|
|
|
bool pcie_ptm_enabled(struct pci_dev *dev)
|
|
|
|
{
|
|
|
|
if (!dev)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return dev->ptm_enabled;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(pcie_ptm_enabled);
|