2018-05-10 02:06:04 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2016-05-04 01:33:50 +08:00
|
|
|
/*
|
|
|
|
* Copyright(C) 2016 Linaro Limited. All rights reserved.
|
|
|
|
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/coresight.h>
|
2016-05-04 01:33:52 +08:00
|
|
|
#include <linux/dma-mapping.h>
|
2016-05-04 01:33:50 +08:00
|
|
|
#include "coresight-priv.h"
|
|
|
|
#include "coresight-tmc.h"
|
|
|
|
|
2016-09-09 06:50:39 +08:00
|
|
|
static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
|
2016-05-04 01:33:50 +08:00
|
|
|
{
|
2017-08-03 00:22:16 +08:00
|
|
|
u32 axictl, sts;
|
2016-05-04 01:33:50 +08:00
|
|
|
|
|
|
|
/* Zero out the memory to help with debug */
|
|
|
|
memset(drvdata->vaddr, 0, drvdata->size);
|
|
|
|
|
|
|
|
CS_UNLOCK(drvdata->base);
|
|
|
|
|
|
|
|
/* Wait for TMCSReady bit to be set */
|
|
|
|
tmc_wait_for_tmcready(drvdata);
|
|
|
|
|
|
|
|
writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ);
|
|
|
|
writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
|
|
|
|
|
|
|
|
axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
|
2017-08-03 00:22:14 +08:00
|
|
|
axictl &= ~TMC_AXICTL_CLEAR_MASK;
|
|
|
|
axictl |= (TMC_AXICTL_PROT_CTL_B1 | TMC_AXICTL_WR_BURST_16);
|
|
|
|
axictl |= TMC_AXICTL_AXCACHE_OS;
|
2017-08-03 00:22:15 +08:00
|
|
|
|
|
|
|
if (tmc_etr_has_cap(drvdata, TMC_ETR_AXI_ARCACHE)) {
|
|
|
|
axictl &= ~TMC_AXICTL_ARCACHE_MASK;
|
|
|
|
axictl |= TMC_AXICTL_ARCACHE_OS;
|
|
|
|
}
|
|
|
|
|
2016-05-04 01:33:50 +08:00
|
|
|
writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
|
2017-08-03 00:22:07 +08:00
|
|
|
tmc_write_dba(drvdata, drvdata->paddr);
|
2017-08-03 00:22:16 +08:00
|
|
|
/*
|
|
|
|
* If the TMC pointers must be programmed before the session,
|
|
|
|
* we have to set it properly (i.e, RRP/RWP to base address and
|
|
|
|
* STS to "not full").
|
|
|
|
*/
|
|
|
|
if (tmc_etr_has_cap(drvdata, TMC_ETR_SAVE_RESTORE)) {
|
|
|
|
tmc_write_rrp(drvdata, drvdata->paddr);
|
|
|
|
tmc_write_rwp(drvdata, drvdata->paddr);
|
|
|
|
sts = readl_relaxed(drvdata->base + TMC_STS) & ~TMC_STS_FULL;
|
|
|
|
writel_relaxed(sts, drvdata->base + TMC_STS);
|
|
|
|
}
|
2016-05-04 01:33:50 +08:00
|
|
|
|
|
|
|
writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
|
|
|
|
TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
|
|
|
|
TMC_FFCR_TRIGON_TRIGIN,
|
|
|
|
drvdata->base + TMC_FFCR);
|
|
|
|
writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
|
|
|
|
tmc_enable_hw(drvdata);
|
|
|
|
|
|
|
|
CS_LOCK(drvdata->base);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
|
|
|
|
{
|
2017-08-03 00:21:57 +08:00
|
|
|
const u32 *barrier;
|
2017-08-03 00:22:07 +08:00
|
|
|
u32 val;
|
2017-08-03 00:21:57 +08:00
|
|
|
u32 *temp;
|
2017-08-03 00:22:07 +08:00
|
|
|
u64 rwp;
|
2016-05-04 01:33:50 +08:00
|
|
|
|
2017-08-03 00:22:07 +08:00
|
|
|
rwp = tmc_read_rwp(drvdata);
|
2016-05-04 01:33:50 +08:00
|
|
|
val = readl_relaxed(drvdata->base + TMC_STS);
|
|
|
|
|
2016-08-26 05:18:57 +08:00
|
|
|
/*
|
|
|
|
* Adjust the buffer to point to the beginning of the trace data
|
|
|
|
* and update the available trace data.
|
|
|
|
*/
|
2016-08-26 05:18:59 +08:00
|
|
|
if (val & TMC_STS_FULL) {
|
2016-05-04 01:33:50 +08:00
|
|
|
drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
|
2016-08-26 05:18:57 +08:00
|
|
|
drvdata->len = drvdata->size;
|
2017-08-03 00:21:57 +08:00
|
|
|
|
|
|
|
barrier = barrier_pkt;
|
|
|
|
temp = (u32 *)drvdata->buf;
|
|
|
|
|
|
|
|
while (*barrier) {
|
|
|
|
*temp = *barrier;
|
|
|
|
temp++;
|
|
|
|
barrier++;
|
|
|
|
}
|
|
|
|
|
2016-08-26 05:18:57 +08:00
|
|
|
} else {
|
2016-05-04 01:33:50 +08:00
|
|
|
drvdata->buf = drvdata->vaddr;
|
2016-08-26 05:18:57 +08:00
|
|
|
drvdata->len = rwp - drvdata->paddr;
|
|
|
|
}
|
2016-05-04 01:33:50 +08:00
|
|
|
}
|
|
|
|
|
2016-05-04 01:33:51 +08:00
|
|
|
static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
|
2016-05-04 01:33:50 +08:00
|
|
|
{
|
|
|
|
CS_UNLOCK(drvdata->base);
|
|
|
|
|
|
|
|
tmc_flush_and_stop(drvdata);
|
2016-05-04 01:33:55 +08:00
|
|
|
/*
|
|
|
|
* When operating in sysFS mode the content of the buffer needs to be
|
|
|
|
* read before the TMC is disabled.
|
|
|
|
*/
|
2016-11-30 00:47:15 +08:00
|
|
|
if (drvdata->mode == CS_MODE_SYSFS)
|
2016-05-04 01:33:55 +08:00
|
|
|
tmc_etr_dump_hw(drvdata);
|
2016-05-04 01:33:50 +08:00
|
|
|
tmc_disable_hw(drvdata);
|
|
|
|
|
|
|
|
CS_LOCK(drvdata->base);
|
|
|
|
}
|
|
|
|
|
2016-11-30 00:47:16 +08:00
|
|
|
static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
|
2016-05-04 01:33:50 +08:00
|
|
|
{
|
2016-05-04 01:33:52 +08:00
|
|
|
int ret = 0;
|
|
|
|
bool used = false;
|
2016-05-04 01:33:50 +08:00
|
|
|
unsigned long flags;
|
2016-05-04 01:33:52 +08:00
|
|
|
void __iomem *vaddr = NULL;
|
2018-05-10 02:06:06 +08:00
|
|
|
dma_addr_t paddr = 0;
|
2016-05-04 01:33:50 +08:00
|
|
|
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
|
|
|
|
|
2016-05-04 01:33:52 +08:00
|
|
|
/*
|
|
|
|
* If we don't have a buffer release the lock and allocate memory.
|
|
|
|
* Otherwise keep the lock and move along.
|
|
|
|
*/
|
2016-05-04 01:33:50 +08:00
|
|
|
spin_lock_irqsave(&drvdata->spinlock, flags);
|
2016-05-04 01:33:52 +08:00
|
|
|
if (!drvdata->vaddr) {
|
2016-05-04 01:33:50 +08:00
|
|
|
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
2016-05-04 01:33:52 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Contiguous memory can't be allocated while a spinlock is
|
|
|
|
* held. As such allocate memory here and free it if a buffer
|
|
|
|
* has already been allocated (from a previous session).
|
|
|
|
*/
|
|
|
|
vaddr = dma_alloc_coherent(drvdata->dev, drvdata->size,
|
|
|
|
&paddr, GFP_KERNEL);
|
|
|
|
if (!vaddr)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/* Let's try again */
|
|
|
|
spin_lock_irqsave(&drvdata->spinlock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (drvdata->reading) {
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2016-05-04 01:33:54 +08:00
|
|
|
/*
|
|
|
|
* In sysFS mode we can have multiple writers per sink. Since this
|
|
|
|
* sink is already enabled no memory is needed and the HW need not be
|
|
|
|
* touched.
|
|
|
|
*/
|
2016-11-30 00:47:15 +08:00
|
|
|
if (drvdata->mode == CS_MODE_SYSFS)
|
2016-05-04 01:33:54 +08:00
|
|
|
goto out;
|
|
|
|
|
2016-05-04 01:33:52 +08:00
|
|
|
/*
|
2018-05-10 02:06:05 +08:00
|
|
|
* If drvdata::vaddr == NULL, use the memory allocated above.
|
2016-05-04 01:33:52 +08:00
|
|
|
* Otherwise a buffer still exists from a previous session, so
|
|
|
|
* simply use that.
|
|
|
|
*/
|
2018-05-10 02:06:05 +08:00
|
|
|
if (drvdata->vaddr == NULL) {
|
2016-05-04 01:33:52 +08:00
|
|
|
used = true;
|
|
|
|
drvdata->vaddr = vaddr;
|
|
|
|
drvdata->paddr = paddr;
|
|
|
|
drvdata->buf = drvdata->vaddr;
|
2016-05-04 01:33:50 +08:00
|
|
|
}
|
|
|
|
|
2016-11-30 00:47:15 +08:00
|
|
|
drvdata->mode = CS_MODE_SYSFS;
|
2016-05-04 01:33:50 +08:00
|
|
|
tmc_etr_enable_hw(drvdata);
|
2016-05-04 01:33:52 +08:00
|
|
|
out:
|
2016-05-04 01:33:50 +08:00
|
|
|
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
|
|
|
|
2016-05-04 01:33:52 +08:00
|
|
|
/* Free memory outside the spinlock if need be */
|
|
|
|
if (!used && vaddr)
|
|
|
|
dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
|
|
|
|
|
|
|
|
if (!ret)
|
|
|
|
dev_info(drvdata->dev, "TMC-ETR enabled\n");
|
|
|
|
|
|
|
|
return ret;
|
2016-05-04 01:33:50 +08:00
|
|
|
}
|
|
|
|
|
2016-11-30 00:47:16 +08:00
|
|
|
static int tmc_enable_etr_sink_perf(struct coresight_device *csdev)
|
2016-05-04 01:33:56 +08:00
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
unsigned long flags;
|
|
|
|
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&drvdata->spinlock, flags);
|
|
|
|
if (drvdata->reading) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In Perf mode there can be only one writer per sink. There
|
|
|
|
* is also no need to continue if the ETR is already operated
|
|
|
|
* from sysFS.
|
|
|
|
*/
|
2016-11-30 00:47:15 +08:00
|
|
|
if (drvdata->mode != CS_MODE_DISABLED) {
|
2016-05-04 01:33:56 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2016-11-30 00:47:15 +08:00
|
|
|
drvdata->mode = CS_MODE_PERF;
|
2016-05-04 01:33:56 +08:00
|
|
|
tmc_etr_enable_hw(drvdata);
|
|
|
|
out:
|
|
|
|
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
|
|
|
|
{
|
|
|
|
switch (mode) {
|
|
|
|
case CS_MODE_SYSFS:
|
2016-11-30 00:47:16 +08:00
|
|
|
return tmc_enable_etr_sink_sysfs(csdev);
|
2016-05-04 01:33:56 +08:00
|
|
|
case CS_MODE_PERF:
|
2016-11-30 00:47:16 +08:00
|
|
|
return tmc_enable_etr_sink_perf(csdev);
|
2016-05-04 01:33:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* We shouldn't be here */
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2016-05-04 01:33:50 +08:00
|
|
|
static void tmc_disable_etr_sink(struct coresight_device *csdev)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&drvdata->spinlock, flags);
|
|
|
|
if (drvdata->reading) {
|
|
|
|
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-05-04 01:33:54 +08:00
|
|
|
/* Disable the TMC only if it needs to */
|
2016-11-30 00:47:15 +08:00
|
|
|
if (drvdata->mode != CS_MODE_DISABLED) {
|
2016-05-04 01:33:54 +08:00
|
|
|
tmc_etr_disable_hw(drvdata);
|
2016-11-30 00:47:15 +08:00
|
|
|
drvdata->mode = CS_MODE_DISABLED;
|
|
|
|
}
|
2016-05-04 01:33:54 +08:00
|
|
|
|
2016-05-04 01:33:50 +08:00
|
|
|
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
|
|
|
|
|
|
|
dev_info(drvdata->dev, "TMC-ETR disabled\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct coresight_ops_sink tmc_etr_sink_ops = {
|
|
|
|
.enable = tmc_enable_etr_sink,
|
|
|
|
.disable = tmc_disable_etr_sink,
|
|
|
|
};
|
|
|
|
|
|
|
|
const struct coresight_ops tmc_etr_cs_ops = {
|
|
|
|
.sink_ops = &tmc_etr_sink_ops,
|
|
|
|
};
|
2016-05-04 01:33:51 +08:00
|
|
|
|
|
|
|
int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
|
|
|
|
{
|
2016-05-04 01:33:52 +08:00
|
|
|
int ret = 0;
|
2016-05-04 01:33:51 +08:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/* config types are set a boot time and never change */
|
|
|
|
if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&drvdata->spinlock, flags);
|
2016-05-04 01:33:53 +08:00
|
|
|
if (drvdata->reading) {
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
2016-05-04 01:33:51 +08:00
|
|
|
|
2016-05-04 01:33:56 +08:00
|
|
|
/* Don't interfere if operated from Perf */
|
2016-11-30 00:47:15 +08:00
|
|
|
if (drvdata->mode == CS_MODE_PERF) {
|
2016-05-04 01:33:56 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2016-05-04 01:33:52 +08:00
|
|
|
/* If drvdata::buf is NULL the trace data has been read already */
|
|
|
|
if (drvdata->buf == NULL) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2016-05-04 01:33:51 +08:00
|
|
|
/* Disable the TMC if need be */
|
2016-11-30 00:47:15 +08:00
|
|
|
if (drvdata->mode == CS_MODE_SYSFS)
|
2016-05-04 01:33:51 +08:00
|
|
|
tmc_etr_disable_hw(drvdata);
|
|
|
|
|
|
|
|
drvdata->reading = true;
|
2016-05-04 01:33:52 +08:00
|
|
|
out:
|
2016-05-04 01:33:51 +08:00
|
|
|
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
|
|
|
|
2016-05-04 01:33:56 +08:00
|
|
|
return ret;
|
2016-05-04 01:33:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
2016-05-04 01:33:52 +08:00
|
|
|
dma_addr_t paddr;
|
|
|
|
void __iomem *vaddr = NULL;
|
2016-05-04 01:33:51 +08:00
|
|
|
|
|
|
|
/* config types are set a boot time and never change */
|
|
|
|
if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&drvdata->spinlock, flags);
|
|
|
|
|
|
|
|
/* RE-enable the TMC if need be */
|
2016-11-30 00:47:15 +08:00
|
|
|
if (drvdata->mode == CS_MODE_SYSFS) {
|
2016-05-04 01:33:52 +08:00
|
|
|
/*
|
|
|
|
* The trace run will continue with the same allocated trace
|
2016-06-15 01:17:14 +08:00
|
|
|
* buffer. The trace buffer is cleared in tmc_etr_enable_hw(),
|
|
|
|
* so we don't have to explicitly clear it. Also, since the
|
|
|
|
* tracer is still enabled drvdata::buf can't be NULL.
|
2016-05-04 01:33:52 +08:00
|
|
|
*/
|
2016-05-04 01:33:51 +08:00
|
|
|
tmc_etr_enable_hw(drvdata);
|
2016-05-04 01:33:52 +08:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* The ETR is not tracing and the buffer was just read.
|
|
|
|
* As such prepare to free the trace buffer.
|
|
|
|
*/
|
|
|
|
vaddr = drvdata->vaddr;
|
|
|
|
paddr = drvdata->paddr;
|
2016-06-15 01:17:13 +08:00
|
|
|
drvdata->buf = drvdata->vaddr = NULL;
|
2016-05-04 01:33:52 +08:00
|
|
|
}
|
2016-05-04 01:33:51 +08:00
|
|
|
|
|
|
|
drvdata->reading = false;
|
|
|
|
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
|
|
|
|
2016-05-04 01:33:52 +08:00
|
|
|
/* Free allocated memory out side of the spinlock */
|
|
|
|
if (vaddr)
|
|
|
|
dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
|
|
|
|
|
2016-05-04 01:33:51 +08:00
|
|
|
return 0;
|
|
|
|
}
|