2018-05-10 02:06:04 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2016-05-04 01:33:50 +08:00
|
|
|
/*
|
|
|
|
* Copyright(C) 2016 Linaro Limited. All rights reserved.
|
|
|
|
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
|
|
|
|
*/
|
|
|
|
|
2019-04-26 03:52:56 +08:00
|
|
|
#include <linux/atomic.h>
|
2016-05-04 01:33:59 +08:00
|
|
|
#include <linux/circ_buf.h>
|
2016-05-04 01:33:50 +08:00
|
|
|
#include <linux/coresight.h>
|
2016-05-04 01:33:59 +08:00
|
|
|
#include <linux/perf_event.h>
|
2016-05-04 01:33:52 +08:00
|
|
|
#include <linux/slab.h>
|
2016-05-04 01:33:50 +08:00
|
|
|
#include "coresight-priv.h"
|
|
|
|
#include "coresight-tmc.h"
|
2018-09-21 03:17:56 +08:00
|
|
|
#include "coresight-etm-perf.h"
|
|
|
|
|
|
|
|
static int tmc_set_etf_buffer(struct coresight_device *csdev,
|
|
|
|
struct perf_output_handle *handle);
|
2016-05-04 01:33:50 +08:00
|
|
|
|
2018-09-21 03:18:06 +08:00
|
|
|
static void __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
|
2016-05-04 01:33:50 +08:00
|
|
|
{
|
|
|
|
CS_UNLOCK(drvdata->base);
|
|
|
|
|
|
|
|
/* Wait for TMCSReady bit to be set */
|
|
|
|
tmc_wait_for_tmcready(drvdata);
|
|
|
|
|
|
|
|
writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
|
|
|
|
writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
|
|
|
|
TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
|
|
|
|
TMC_FFCR_TRIGON_TRIGIN,
|
|
|
|
drvdata->base + TMC_FFCR);
|
|
|
|
|
|
|
|
writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
|
|
|
|
tmc_enable_hw(drvdata);
|
|
|
|
|
|
|
|
CS_LOCK(drvdata->base);
|
|
|
|
}
|
|
|
|
|
2018-09-21 03:18:06 +08:00
|
|
|
static int tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
|
|
|
|
{
|
2021-02-02 02:13:28 +08:00
|
|
|
int rc = coresight_claim_device(drvdata->csdev);
|
2018-09-21 03:18:16 +08:00
|
|
|
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
2018-09-21 03:18:06 +08:00
|
|
|
__tmc_etb_enable_hw(drvdata);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-05-04 01:33:50 +08:00
|
|
|
static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
|
|
|
|
{
|
|
|
|
char *bufp;
|
2018-07-12 03:40:18 +08:00
|
|
|
u32 read_data, lost;
|
2016-05-04 01:33:50 +08:00
|
|
|
|
2018-07-12 03:40:18 +08:00
|
|
|
/* Check if the buffer wrapped around. */
|
|
|
|
lost = readl_relaxed(drvdata->base + TMC_STS) & TMC_STS_FULL;
|
2016-05-04 01:33:50 +08:00
|
|
|
bufp = drvdata->buf;
|
2016-08-26 05:18:57 +08:00
|
|
|
drvdata->len = 0;
|
2016-05-04 01:33:50 +08:00
|
|
|
while (1) {
|
2018-09-21 03:18:01 +08:00
|
|
|
read_data = readl_relaxed(drvdata->base + TMC_RRD);
|
|
|
|
if (read_data == 0xFFFFFFFF)
|
|
|
|
break;
|
|
|
|
memcpy(bufp, &read_data, 4);
|
|
|
|
bufp += 4;
|
|
|
|
drvdata->len += 4;
|
2016-05-04 01:33:50 +08:00
|
|
|
}
|
2018-09-21 03:18:01 +08:00
|
|
|
|
2018-07-12 03:40:18 +08:00
|
|
|
if (lost)
|
|
|
|
coresight_insert_barrier_packet(drvdata->buf);
|
|
|
|
return;
|
2016-05-04 01:33:50 +08:00
|
|
|
}
|
|
|
|
|
2018-09-21 03:18:16 +08:00
|
|
|
static void __tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
|
2016-05-04 01:33:50 +08:00
|
|
|
{
|
|
|
|
CS_UNLOCK(drvdata->base);
|
|
|
|
|
|
|
|
tmc_flush_and_stop(drvdata);
|
2016-05-04 01:33:55 +08:00
|
|
|
/*
|
|
|
|
* When operating in sysFS mode the content of the buffer needs to be
|
|
|
|
* read before the TMC is disabled.
|
|
|
|
*/
|
2016-11-30 00:47:15 +08:00
|
|
|
if (drvdata->mode == CS_MODE_SYSFS)
|
2016-05-04 01:33:55 +08:00
|
|
|
tmc_etb_dump_hw(drvdata);
|
2016-05-04 01:33:50 +08:00
|
|
|
tmc_disable_hw(drvdata);
|
|
|
|
|
|
|
|
CS_LOCK(drvdata->base);
|
|
|
|
}
|
|
|
|
|
2018-09-21 03:18:16 +08:00
|
|
|
static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
|
|
|
|
{
|
|
|
|
__tmc_etb_disable_hw(drvdata);
|
2021-02-02 02:13:28 +08:00
|
|
|
coresight_disclaim_device(drvdata->csdev);
|
2018-09-21 03:18:16 +08:00
|
|
|
}
|
|
|
|
|
2018-09-21 03:18:06 +08:00
|
|
|
static void __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
|
2016-05-04 01:33:50 +08:00
|
|
|
{
|
|
|
|
CS_UNLOCK(drvdata->base);
|
|
|
|
|
|
|
|
/* Wait for TMCSReady bit to be set */
|
|
|
|
tmc_wait_for_tmcready(drvdata);
|
|
|
|
|
|
|
|
writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
|
|
|
|
writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
|
|
|
|
drvdata->base + TMC_FFCR);
|
|
|
|
writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
|
|
|
|
tmc_enable_hw(drvdata);
|
|
|
|
|
|
|
|
CS_LOCK(drvdata->base);
|
|
|
|
}
|
|
|
|
|
2018-09-21 03:18:06 +08:00
|
|
|
static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
|
|
|
|
{
|
2021-02-02 02:13:28 +08:00
|
|
|
int rc = coresight_claim_device(drvdata->csdev);
|
2018-09-21 03:18:16 +08:00
|
|
|
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
2018-09-21 03:18:06 +08:00
|
|
|
__tmc_etf_enable_hw(drvdata);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-05-04 01:33:50 +08:00
|
|
|
static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
|
|
|
|
{
|
2021-02-02 02:13:28 +08:00
|
|
|
struct coresight_device *csdev = drvdata->csdev;
|
|
|
|
|
2016-05-04 01:33:50 +08:00
|
|
|
CS_UNLOCK(drvdata->base);
|
|
|
|
|
|
|
|
tmc_flush_and_stop(drvdata);
|
|
|
|
tmc_disable_hw(drvdata);
|
2021-02-02 02:13:28 +08:00
|
|
|
coresight_disclaim_device_unlocked(csdev);
|
2016-05-04 01:33:50 +08:00
|
|
|
CS_LOCK(drvdata->base);
|
|
|
|
}
|
|
|
|
|
2018-07-12 03:40:15 +08:00
|
|
|
/*
|
|
|
|
* Return the available trace data in the buffer from @pos, with
|
|
|
|
* a maximum limit of @len, updating the @bufpp on where to
|
|
|
|
* find it.
|
|
|
|
*/
|
|
|
|
ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
|
|
|
|
loff_t pos, size_t len, char **bufpp)
|
|
|
|
{
|
|
|
|
ssize_t actual = len;
|
|
|
|
|
|
|
|
/* Adjust the len to available size @pos */
|
|
|
|
if (pos + actual > drvdata->len)
|
|
|
|
actual = drvdata->len - pos;
|
|
|
|
if (actual > 0)
|
|
|
|
*bufpp = drvdata->buf + pos;
|
|
|
|
return actual;
|
|
|
|
}
|
|
|
|
|
2016-11-30 00:47:16 +08:00
|
|
|
static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
|
2016-05-04 01:33:50 +08:00
|
|
|
{
|
2016-05-04 01:33:52 +08:00
|
|
|
int ret = 0;
|
|
|
|
bool used = false;
|
|
|
|
char *buf = NULL;
|
2016-05-04 01:33:50 +08:00
|
|
|
unsigned long flags;
|
|
|
|
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
|
|
|
|
|
2016-05-04 01:33:52 +08:00
|
|
|
/*
|
|
|
|
* If we don't have a buffer release the lock and allocate memory.
|
|
|
|
* Otherwise keep the lock and move along.
|
|
|
|
*/
|
2016-05-04 01:33:50 +08:00
|
|
|
spin_lock_irqsave(&drvdata->spinlock, flags);
|
2016-05-04 01:33:52 +08:00
|
|
|
if (!drvdata->buf) {
|
2016-05-04 01:33:50 +08:00
|
|
|
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
2016-05-04 01:33:52 +08:00
|
|
|
|
|
|
|
/* Allocating the memory here while outside of the spinlock */
|
|
|
|
buf = kzalloc(drvdata->size, GFP_KERNEL);
|
|
|
|
if (!buf)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/* Let's try again */
|
|
|
|
spin_lock_irqsave(&drvdata->spinlock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (drvdata->reading) {
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2016-05-04 01:33:54 +08:00
|
|
|
/*
|
|
|
|
* In sysFS mode we can have multiple writers per sink. Since this
|
|
|
|
* sink is already enabled no memory is needed and the HW need not be
|
|
|
|
* touched.
|
|
|
|
*/
|
2019-04-26 03:52:56 +08:00
|
|
|
if (drvdata->mode == CS_MODE_SYSFS) {
|
|
|
|
atomic_inc(csdev->refcnt);
|
2016-05-04 01:33:54 +08:00
|
|
|
goto out;
|
2019-04-26 03:52:56 +08:00
|
|
|
}
|
2016-05-04 01:33:54 +08:00
|
|
|
|
2016-05-04 01:33:52 +08:00
|
|
|
/*
|
|
|
|
* If drvdata::buf isn't NULL, memory was allocated for a previous
|
|
|
|
* trace run but wasn't read. If so simply zero-out the memory.
|
|
|
|
* Otherwise use the memory allocated above.
|
|
|
|
*
|
|
|
|
* The memory is freed when users read the buffer using the
|
|
|
|
* /dev/xyz.{etf|etb} interface. See tmc_read_unprepare_etf() for
|
|
|
|
* details.
|
|
|
|
*/
|
|
|
|
if (drvdata->buf) {
|
|
|
|
memset(drvdata->buf, 0, drvdata->size);
|
|
|
|
} else {
|
|
|
|
used = true;
|
|
|
|
drvdata->buf = buf;
|
2016-05-04 01:33:50 +08:00
|
|
|
}
|
|
|
|
|
2018-09-21 03:18:06 +08:00
|
|
|
ret = tmc_etb_enable_hw(drvdata);
|
2019-04-26 03:52:56 +08:00
|
|
|
if (!ret) {
|
2018-09-21 03:18:06 +08:00
|
|
|
drvdata->mode = CS_MODE_SYSFS;
|
2019-04-26 03:52:56 +08:00
|
|
|
atomic_inc(csdev->refcnt);
|
|
|
|
} else {
|
2018-09-21 03:18:06 +08:00
|
|
|
/* Free up the buffer if we failed to enable */
|
|
|
|
used = false;
|
2019-04-26 03:52:56 +08:00
|
|
|
}
|
2016-05-04 01:33:52 +08:00
|
|
|
out:
|
2016-05-04 01:33:50 +08:00
|
|
|
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
|
|
|
|
2016-05-04 01:33:52 +08:00
|
|
|
/* Free memory outside the spinlock if need be */
|
2016-08-26 05:19:07 +08:00
|
|
|
if (!used)
|
2016-05-04 01:33:52 +08:00
|
|
|
kfree(buf);
|
|
|
|
|
|
|
|
return ret;
|
2016-05-04 01:33:50 +08:00
|
|
|
}
|
|
|
|
|
2018-09-21 03:17:56 +08:00
|
|
|
static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
|
2016-05-04 01:33:56 +08:00
|
|
|
{
|
|
|
|
int ret = 0;
|
2019-04-26 03:53:09 +08:00
|
|
|
pid_t pid;
|
2016-05-04 01:33:56 +08:00
|
|
|
unsigned long flags;
|
|
|
|
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
|
2018-09-21 03:17:56 +08:00
|
|
|
struct perf_output_handle *handle = data;
|
coresight: tmc-etf: Fix NULL ptr dereference in tmc_enable_etf_sink_perf()
There was a report of NULL pointer dereference in ETF enable
path for perf CS mode with PID monitoring. It is almost 100%
reproducible when the process to monitor is something very
active such as chrome and with ETF as the sink and not ETR.
Currently in a bid to find the pid, the owner is dereferenced
via task_pid_nr() call in tmc_enable_etf_sink_perf() and with
owner being NULL, we get a NULL pointer dereference.
Looking at the ETR and other places in the kernel, ETF and the
ETB are the only places trying to dereference the task(owner)
in tmc_enable_etf_sink_perf() which is also called from the
sched_in path as in the call trace. Owner(task) is NULL even
in the case of ETR in tmc_enable_etr_sink_perf(), but since we
cache the PID in alloc_buffer() callback and it is done as part
of etm_setup_aux() when allocating buffer for ETR sink, we never
dereference this NULL pointer and we are safe. So lets do the
same thing with ETF and cache the PID to which the cs_buffer
belongs in tmc_alloc_etf_buffer() as done for ETR. This will
also remove the unnecessary function calls(task_pid_nr()) since
we are caching the PID.
Easily reproducible running below:
perf record -e cs_etm/@tmc_etf0/ -N -p <pid>
Unable to handle kernel NULL pointer dereference at virtual address 0000000000000548
Mem abort info:
ESR = 0x96000006
EC = 0x25: DABT (current EL), IL = 32 bits
SET = 0, FnV = 0
EA = 0, S1PTW = 0
Data abort info:
ISV = 0, ISS = 0x00000006
CM = 0, WnR = 0
<snip>...
Call trace:
tmc_enable_etf_sink+0xe4/0x280
coresight_enable_path+0x168/0x1fc
etm_event_start+0x8c/0xf8
etm_event_add+0x38/0x54
event_sched_in+0x194/0x2ac
group_sched_in+0x54/0x12c
flexible_sched_in+0xd8/0x120
visit_groups_merge+0x100/0x16c
ctx_flexible_sched_in+0x50/0x74
ctx_sched_in+0xa4/0xa8
perf_event_sched_in+0x60/0x6c
perf_event_context_sched_in+0x98/0xe0
__perf_event_task_sched_in+0x5c/0xd8
finish_task_switch+0x184/0x1cc
schedule_tail+0x20/0xec
ret_from_fork+0x4/0x18
Fixes: 880af782c6e8 ("coresight: tmc-etf: Add support for CPU-wide trace scenarios")
Cc: stable@vger.kernel.org
Signed-off-by: Sai Prakash Ranjan <saiprakash.ranjan@codeaurora.org>
Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
Link: https://lore.kernel.org/r/20201127175256.1092685-10-mathieu.poirier@linaro.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2020-11-28 01:52:50 +08:00
|
|
|
struct cs_buffers *buf = etm_perf_sink_config(handle);
|
2016-05-04 01:33:56 +08:00
|
|
|
|
|
|
|
spin_lock_irqsave(&drvdata->spinlock, flags);
|
2018-09-21 03:18:06 +08:00
|
|
|
do {
|
2016-05-04 01:33:56 +08:00
|
|
|
ret = -EINVAL;
|
2018-09-21 03:18:06 +08:00
|
|
|
if (drvdata->reading)
|
|
|
|
break;
|
|
|
|
/*
|
2019-04-26 03:53:09 +08:00
|
|
|
* No need to continue if the ETB/ETF is already operated
|
|
|
|
* from sysFS.
|
2018-09-21 03:18:06 +08:00
|
|
|
*/
|
2019-04-26 03:53:09 +08:00
|
|
|
if (drvdata->mode == CS_MODE_SYSFS) {
|
|
|
|
ret = -EBUSY;
|
2018-09-21 03:18:06 +08:00
|
|
|
break;
|
2019-04-26 03:53:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Get a handle on the pid of the process to monitor */
|
coresight: tmc-etf: Fix NULL ptr dereference in tmc_enable_etf_sink_perf()
There was a report of NULL pointer dereference in ETF enable
path for perf CS mode with PID monitoring. It is almost 100%
reproducible when the process to monitor is something very
active such as chrome and with ETF as the sink and not ETR.
Currently in a bid to find the pid, the owner is dereferenced
via task_pid_nr() call in tmc_enable_etf_sink_perf() and with
owner being NULL, we get a NULL pointer dereference.
Looking at the ETR and other places in the kernel, ETF and the
ETB are the only places trying to dereference the task(owner)
in tmc_enable_etf_sink_perf() which is also called from the
sched_in path as in the call trace. Owner(task) is NULL even
in the case of ETR in tmc_enable_etr_sink_perf(), but since we
cache the PID in alloc_buffer() callback and it is done as part
of etm_setup_aux() when allocating buffer for ETR sink, we never
dereference this NULL pointer and we are safe. So lets do the
same thing with ETF and cache the PID to which the cs_buffer
belongs in tmc_alloc_etf_buffer() as done for ETR. This will
also remove the unnecessary function calls(task_pid_nr()) since
we are caching the PID.
Easily reproducible running below:
perf record -e cs_etm/@tmc_etf0/ -N -p <pid>
Unable to handle kernel NULL pointer dereference at virtual address 0000000000000548
Mem abort info:
ESR = 0x96000006
EC = 0x25: DABT (current EL), IL = 32 bits
SET = 0, FnV = 0
EA = 0, S1PTW = 0
Data abort info:
ISV = 0, ISS = 0x00000006
CM = 0, WnR = 0
<snip>...
Call trace:
tmc_enable_etf_sink+0xe4/0x280
coresight_enable_path+0x168/0x1fc
etm_event_start+0x8c/0xf8
etm_event_add+0x38/0x54
event_sched_in+0x194/0x2ac
group_sched_in+0x54/0x12c
flexible_sched_in+0xd8/0x120
visit_groups_merge+0x100/0x16c
ctx_flexible_sched_in+0x50/0x74
ctx_sched_in+0xa4/0xa8
perf_event_sched_in+0x60/0x6c
perf_event_context_sched_in+0x98/0xe0
__perf_event_task_sched_in+0x5c/0xd8
finish_task_switch+0x184/0x1cc
schedule_tail+0x20/0xec
ret_from_fork+0x4/0x18
Fixes: 880af782c6e8 ("coresight: tmc-etf: Add support for CPU-wide trace scenarios")
Cc: stable@vger.kernel.org
Signed-off-by: Sai Prakash Ranjan <saiprakash.ranjan@codeaurora.org>
Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
Link: https://lore.kernel.org/r/20201127175256.1092685-10-mathieu.poirier@linaro.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2020-11-28 01:52:50 +08:00
|
|
|
pid = buf->pid;
|
2019-04-26 03:53:09 +08:00
|
|
|
|
|
|
|
if (drvdata->pid != -1 && drvdata->pid != pid) {
|
|
|
|
ret = -EBUSY;
|
|
|
|
break;
|
|
|
|
}
|
2016-05-04 01:33:56 +08:00
|
|
|
|
2018-09-21 03:18:06 +08:00
|
|
|
ret = tmc_set_etf_buffer(csdev, handle);
|
|
|
|
if (ret)
|
|
|
|
break;
|
2019-04-26 03:53:09 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* No HW configuration is needed if the sink is already in
|
|
|
|
* use for this session.
|
|
|
|
*/
|
|
|
|
if (drvdata->pid == pid) {
|
|
|
|
atomic_inc(csdev->refcnt);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-09-21 03:18:06 +08:00
|
|
|
ret = tmc_etb_enable_hw(drvdata);
|
2019-04-26 03:52:56 +08:00
|
|
|
if (!ret) {
|
2019-04-26 03:53:09 +08:00
|
|
|
/* Associate with monitored process. */
|
|
|
|
drvdata->pid = pid;
|
2018-09-21 03:18:06 +08:00
|
|
|
drvdata->mode = CS_MODE_PERF;
|
2019-04-26 03:52:56 +08:00
|
|
|
atomic_inc(csdev->refcnt);
|
|
|
|
}
|
2018-09-21 03:18:06 +08:00
|
|
|
} while (0);
|
2016-05-04 01:33:56 +08:00
|
|
|
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-09-21 03:17:56 +08:00
|
|
|
static int tmc_enable_etf_sink(struct coresight_device *csdev,
|
|
|
|
u32 mode, void *data)
|
2016-05-04 01:33:56 +08:00
|
|
|
{
|
2017-06-06 04:15:05 +08:00
|
|
|
int ret;
|
|
|
|
|
2016-05-04 01:33:56 +08:00
|
|
|
switch (mode) {
|
|
|
|
case CS_MODE_SYSFS:
|
2017-06-06 04:15:05 +08:00
|
|
|
ret = tmc_enable_etf_sink_sysfs(csdev);
|
|
|
|
break;
|
2016-05-04 01:33:56 +08:00
|
|
|
case CS_MODE_PERF:
|
2018-09-21 03:17:56 +08:00
|
|
|
ret = tmc_enable_etf_sink_perf(csdev, data);
|
2017-06-06 04:15:05 +08:00
|
|
|
break;
|
|
|
|
/* We shouldn't be here */
|
|
|
|
default:
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
2016-05-04 01:33:56 +08:00
|
|
|
}
|
|
|
|
|
2017-06-06 04:15:05 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2019-06-20 01:29:12 +08:00
|
|
|
dev_dbg(&csdev->dev, "TMC-ETB/ETF enabled\n");
|
2017-06-06 04:15:05 +08:00
|
|
|
return 0;
|
2016-05-04 01:33:56 +08:00
|
|
|
}
|
|
|
|
|
2019-04-26 03:52:55 +08:00
|
|
|
static int tmc_disable_etf_sink(struct coresight_device *csdev)
|
2016-05-04 01:33:50 +08:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&drvdata->spinlock, flags);
|
2019-04-26 03:52:56 +08:00
|
|
|
|
2016-05-04 01:33:50 +08:00
|
|
|
if (drvdata->reading) {
|
|
|
|
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
2019-04-26 03:52:55 +08:00
|
|
|
return -EBUSY;
|
2016-05-04 01:33:50 +08:00
|
|
|
}
|
|
|
|
|
2019-04-26 03:52:56 +08:00
|
|
|
if (atomic_dec_return(csdev->refcnt)) {
|
|
|
|
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
2019-04-26 03:52:57 +08:00
|
|
|
/* Complain if we (somehow) got out of sync */
|
|
|
|
WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
|
|
|
|
tmc_etb_disable_hw(drvdata);
|
2019-04-26 03:53:09 +08:00
|
|
|
/* Dissociate from monitored process. */
|
|
|
|
drvdata->pid = -1;
|
2019-04-26 03:52:57 +08:00
|
|
|
drvdata->mode = CS_MODE_DISABLED;
|
2016-05-04 01:33:54 +08:00
|
|
|
|
2016-05-04 01:33:50 +08:00
|
|
|
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
|
|
|
|
2019-06-20 01:29:12 +08:00
|
|
|
dev_dbg(&csdev->dev, "TMC-ETB/ETF disabled\n");
|
2019-04-26 03:52:55 +08:00
|
|
|
return 0;
|
2016-05-04 01:33:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int tmc_enable_etf_link(struct coresight_device *csdev,
|
|
|
|
int inport, int outport)
|
|
|
|
{
|
2019-11-05 02:12:50 +08:00
|
|
|
int ret = 0;
|
2016-05-04 01:33:50 +08:00
|
|
|
unsigned long flags;
|
|
|
|
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
|
2019-11-05 02:12:50 +08:00
|
|
|
bool first_enable = false;
|
2016-05-04 01:33:50 +08:00
|
|
|
|
|
|
|
spin_lock_irqsave(&drvdata->spinlock, flags);
|
|
|
|
if (drvdata->reading) {
|
|
|
|
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
2019-11-05 02:12:50 +08:00
|
|
|
if (atomic_read(&csdev->refcnt[0]) == 0) {
|
|
|
|
ret = tmc_etf_enable_hw(drvdata);
|
|
|
|
if (!ret) {
|
|
|
|
drvdata->mode = CS_MODE_SYSFS;
|
|
|
|
first_enable = true;
|
|
|
|
}
|
|
|
|
}
|
2018-09-21 03:18:06 +08:00
|
|
|
if (!ret)
|
2019-11-05 02:12:50 +08:00
|
|
|
atomic_inc(&csdev->refcnt[0]);
|
2016-05-04 01:33:50 +08:00
|
|
|
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
|
|
|
|
2019-11-05 02:12:50 +08:00
|
|
|
if (first_enable)
|
2019-06-20 01:29:12 +08:00
|
|
|
dev_dbg(&csdev->dev, "TMC-ETF enabled\n");
|
2018-09-21 03:18:06 +08:00
|
|
|
return ret;
|
2016-05-04 01:33:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void tmc_disable_etf_link(struct coresight_device *csdev,
|
|
|
|
int inport, int outport)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
|
2019-11-05 02:12:50 +08:00
|
|
|
bool last_disable = false;
|
2016-05-04 01:33:50 +08:00
|
|
|
|
|
|
|
spin_lock_irqsave(&drvdata->spinlock, flags);
|
|
|
|
if (drvdata->reading) {
|
|
|
|
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-11-05 02:12:50 +08:00
|
|
|
if (atomic_dec_return(&csdev->refcnt[0]) == 0) {
|
|
|
|
tmc_etf_disable_hw(drvdata);
|
|
|
|
drvdata->mode = CS_MODE_DISABLED;
|
|
|
|
last_disable = true;
|
|
|
|
}
|
2016-05-04 01:33:50 +08:00
|
|
|
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
|
|
|
|
2019-11-05 02:12:50 +08:00
|
|
|
if (last_disable)
|
|
|
|
dev_dbg(&csdev->dev, "TMC-ETF disabled\n");
|
2016-05-04 01:33:50 +08:00
|
|
|
}
|
|
|
|
|
2019-04-26 03:53:01 +08:00
|
|
|
static void *tmc_alloc_etf_buffer(struct coresight_device *csdev,
|
|
|
|
struct perf_event *event, void **pages,
|
|
|
|
int nr_pages, bool overwrite)
|
2016-05-04 01:33:59 +08:00
|
|
|
{
|
2019-06-21 06:12:35 +08:00
|
|
|
int node;
|
2016-05-04 01:33:59 +08:00
|
|
|
struct cs_buffers *buf;
|
|
|
|
|
2019-06-21 06:12:35 +08:00
|
|
|
node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
|
2016-05-04 01:33:59 +08:00
|
|
|
|
|
|
|
/* Allocate memory structure for interaction with Perf */
|
|
|
|
buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
|
|
|
|
if (!buf)
|
|
|
|
return NULL;
|
|
|
|
|
coresight: tmc-etf: Fix NULL ptr dereference in tmc_enable_etf_sink_perf()
There was a report of NULL pointer dereference in ETF enable
path for perf CS mode with PID monitoring. It is almost 100%
reproducible when the process to monitor is something very
active such as chrome and with ETF as the sink and not ETR.
Currently in a bid to find the pid, the owner is dereferenced
via task_pid_nr() call in tmc_enable_etf_sink_perf() and with
owner being NULL, we get a NULL pointer dereference.
Looking at the ETR and other places in the kernel, ETF and the
ETB are the only places trying to dereference the task(owner)
in tmc_enable_etf_sink_perf() which is also called from the
sched_in path as in the call trace. Owner(task) is NULL even
in the case of ETR in tmc_enable_etr_sink_perf(), but since we
cache the PID in alloc_buffer() callback and it is done as part
of etm_setup_aux() when allocating buffer for ETR sink, we never
dereference this NULL pointer and we are safe. So lets do the
same thing with ETF and cache the PID to which the cs_buffer
belongs in tmc_alloc_etf_buffer() as done for ETR. This will
also remove the unnecessary function calls(task_pid_nr()) since
we are caching the PID.
Easily reproducible running below:
perf record -e cs_etm/@tmc_etf0/ -N -p <pid>
Unable to handle kernel NULL pointer dereference at virtual address 0000000000000548
Mem abort info:
ESR = 0x96000006
EC = 0x25: DABT (current EL), IL = 32 bits
SET = 0, FnV = 0
EA = 0, S1PTW = 0
Data abort info:
ISV = 0, ISS = 0x00000006
CM = 0, WnR = 0
<snip>...
Call trace:
tmc_enable_etf_sink+0xe4/0x280
coresight_enable_path+0x168/0x1fc
etm_event_start+0x8c/0xf8
etm_event_add+0x38/0x54
event_sched_in+0x194/0x2ac
group_sched_in+0x54/0x12c
flexible_sched_in+0xd8/0x120
visit_groups_merge+0x100/0x16c
ctx_flexible_sched_in+0x50/0x74
ctx_sched_in+0xa4/0xa8
perf_event_sched_in+0x60/0x6c
perf_event_context_sched_in+0x98/0xe0
__perf_event_task_sched_in+0x5c/0xd8
finish_task_switch+0x184/0x1cc
schedule_tail+0x20/0xec
ret_from_fork+0x4/0x18
Fixes: 880af782c6e8 ("coresight: tmc-etf: Add support for CPU-wide trace scenarios")
Cc: stable@vger.kernel.org
Signed-off-by: Sai Prakash Ranjan <saiprakash.ranjan@codeaurora.org>
Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
Link: https://lore.kernel.org/r/20201127175256.1092685-10-mathieu.poirier@linaro.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2020-11-28 01:52:50 +08:00
|
|
|
buf->pid = task_pid_nr(event->owner);
|
2016-05-04 01:33:59 +08:00
|
|
|
buf->snapshot = overwrite;
|
|
|
|
buf->nr_pages = nr_pages;
|
|
|
|
buf->data_pages = pages;
|
|
|
|
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tmc_free_etf_buffer(void *config)
|
|
|
|
{
|
|
|
|
struct cs_buffers *buf = config;
|
|
|
|
|
|
|
|
kfree(buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tmc_set_etf_buffer(struct coresight_device *csdev,
|
2018-09-21 03:17:56 +08:00
|
|
|
struct perf_output_handle *handle)
|
2016-05-04 01:33:59 +08:00
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
unsigned long head;
|
2018-09-21 03:17:56 +08:00
|
|
|
struct cs_buffers *buf = etm_perf_sink_config(handle);
|
|
|
|
|
|
|
|
if (!buf)
|
|
|
|
return -EINVAL;
|
2016-05-04 01:33:59 +08:00
|
|
|
|
|
|
|
/* wrap head around to the amount of space we have */
|
|
|
|
head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
|
|
|
|
|
|
|
|
/* find the page to write to */
|
|
|
|
buf->cur = head / PAGE_SIZE;
|
|
|
|
|
|
|
|
/* and offset within that page */
|
|
|
|
buf->offset = head % PAGE_SIZE;
|
|
|
|
|
|
|
|
local_set(&buf->data_size, 0);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-09-21 03:17:54 +08:00
|
|
|
static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
|
2016-05-04 01:33:59 +08:00
|
|
|
struct perf_output_handle *handle,
|
|
|
|
void *sink_config)
|
|
|
|
{
|
2017-08-03 00:21:55 +08:00
|
|
|
bool lost = false;
|
2016-05-04 01:33:59 +08:00
|
|
|
int i, cur;
|
2017-08-03 00:21:57 +08:00
|
|
|
const u32 *barrier;
|
2016-05-04 01:33:59 +08:00
|
|
|
u32 *buf_ptr;
|
2017-08-03 00:22:07 +08:00
|
|
|
u64 read_ptr, write_ptr;
|
2018-09-21 03:17:54 +08:00
|
|
|
u32 status;
|
2019-04-26 03:53:09 +08:00
|
|
|
unsigned long offset, to_read = 0, flags;
|
2016-05-04 01:33:59 +08:00
|
|
|
struct cs_buffers *buf = sink_config;
|
|
|
|
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
|
|
|
|
|
|
|
|
if (!buf)
|
2018-09-21 03:17:54 +08:00
|
|
|
return 0;
|
2016-05-04 01:33:59 +08:00
|
|
|
|
|
|
|
/* This shouldn't happen */
|
2016-11-30 00:47:15 +08:00
|
|
|
if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
|
2018-09-21 03:17:54 +08:00
|
|
|
return 0;
|
2016-05-04 01:33:59 +08:00
|
|
|
|
2019-04-26 03:52:58 +08:00
|
|
|
spin_lock_irqsave(&drvdata->spinlock, flags);
|
2019-04-26 03:53:09 +08:00
|
|
|
|
|
|
|
/* Don't do anything if another tracer is using this sink */
|
|
|
|
if (atomic_read(csdev->refcnt) != 1)
|
|
|
|
goto out;
|
|
|
|
|
2016-05-04 01:33:59 +08:00
|
|
|
CS_UNLOCK(drvdata->base);
|
|
|
|
|
|
|
|
tmc_flush_and_stop(drvdata);
|
|
|
|
|
2017-08-03 00:22:07 +08:00
|
|
|
read_ptr = tmc_read_rrp(drvdata);
|
|
|
|
write_ptr = tmc_read_rwp(drvdata);
|
2016-05-04 01:33:59 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Get a hold of the status register and see if a wrap around
|
|
|
|
* has occurred. If so adjust things accordingly.
|
|
|
|
*/
|
|
|
|
status = readl_relaxed(drvdata->base + TMC_STS);
|
|
|
|
if (status & TMC_STS_FULL) {
|
2017-08-03 00:21:55 +08:00
|
|
|
lost = true;
|
2016-05-04 01:33:59 +08:00
|
|
|
to_read = drvdata->size;
|
|
|
|
} else {
|
|
|
|
to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The TMC RAM buffer may be bigger than the space available in the
|
|
|
|
* perf ring buffer (handle->size). If so advance the RRP so that we
|
2019-06-20 01:29:08 +08:00
|
|
|
* get the latest trace data. In snapshot mode none of that matters
|
|
|
|
* since we are expected to clobber stale data in favour of the latest
|
|
|
|
* traces.
|
2016-05-04 01:33:59 +08:00
|
|
|
*/
|
2019-06-20 01:29:08 +08:00
|
|
|
if (!buf->snapshot && to_read > handle->size) {
|
2019-08-30 04:28:40 +08:00
|
|
|
u32 mask = tmc_get_memwidth_mask(drvdata);
|
2016-05-04 01:33:59 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure the new size is aligned in accordance with the
|
2019-08-30 04:28:40 +08:00
|
|
|
* requirement explained in function tmc_get_memwidth_mask().
|
2016-05-04 01:33:59 +08:00
|
|
|
*/
|
|
|
|
to_read = handle->size & mask;
|
|
|
|
/* Move the RAM read pointer up */
|
|
|
|
read_ptr = (write_ptr + drvdata->size) - to_read;
|
|
|
|
/* Make sure we are still within our limits */
|
|
|
|
if (read_ptr > (drvdata->size - 1))
|
|
|
|
read_ptr -= drvdata->size;
|
|
|
|
/* Tell the HW */
|
2017-08-03 00:22:07 +08:00
|
|
|
tmc_write_rrp(drvdata, read_ptr);
|
2017-08-03 00:21:55 +08:00
|
|
|
lost = true;
|
2016-05-04 01:33:59 +08:00
|
|
|
}
|
|
|
|
|
2019-06-20 01:29:09 +08:00
|
|
|
/*
|
|
|
|
* Don't set the TRUNCATED flag in snapshot mode because 1) the
|
|
|
|
* captured buffer is expected to be truncated and 2) a full buffer
|
|
|
|
* prevents the event from being re-enabled by the perf core,
|
|
|
|
* resulting in stale data being send to user space.
|
|
|
|
*/
|
|
|
|
if (!buf->snapshot && lost)
|
2017-08-03 00:21:55 +08:00
|
|
|
perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
|
|
|
|
|
2016-05-04 01:33:59 +08:00
|
|
|
cur = buf->cur;
|
|
|
|
offset = buf->offset;
|
2020-09-29 00:34:52 +08:00
|
|
|
barrier = coresight_barrier_pkt;
|
2016-05-04 01:33:59 +08:00
|
|
|
|
|
|
|
/* for every byte to read */
|
|
|
|
for (i = 0; i < to_read; i += 4) {
|
|
|
|
buf_ptr = buf->data_pages[cur] + offset;
|
|
|
|
*buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
|
|
|
|
|
2021-06-15 01:59:00 +08:00
|
|
|
if (lost && i < CORESIGHT_BARRIER_PKT_SIZE) {
|
2017-08-03 00:21:57 +08:00
|
|
|
*buf_ptr = *barrier;
|
|
|
|
barrier++;
|
|
|
|
}
|
|
|
|
|
2016-05-04 01:33:59 +08:00
|
|
|
offset += 4;
|
|
|
|
if (offset >= PAGE_SIZE) {
|
|
|
|
offset = 0;
|
|
|
|
cur++;
|
|
|
|
/* wrap around at the end of the buffer */
|
|
|
|
cur &= buf->nr_pages - 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-20 01:29:07 +08:00
|
|
|
/*
|
|
|
|
* In snapshot mode we simply increment the head by the number of byte
|
|
|
|
* that were written. User space function cs_etm_find_snapshot() will
|
|
|
|
* figure out how many bytes to get from the AUX buffer based on the
|
|
|
|
* position of the head.
|
|
|
|
*/
|
|
|
|
if (buf->snapshot)
|
|
|
|
handle->head += to_read;
|
|
|
|
|
2016-05-04 01:33:59 +08:00
|
|
|
CS_LOCK(drvdata->base);
|
2019-04-26 03:53:09 +08:00
|
|
|
out:
|
2019-04-26 03:52:58 +08:00
|
|
|
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
2018-09-21 03:17:54 +08:00
|
|
|
|
|
|
|
return to_read;
|
2016-05-04 01:33:59 +08:00
|
|
|
}
|
|
|
|
|
2016-05-04 01:33:50 +08:00
|
|
|
static const struct coresight_ops_sink tmc_etf_sink_ops = {
|
|
|
|
.enable = tmc_enable_etf_sink,
|
|
|
|
.disable = tmc_disable_etf_sink,
|
2016-05-04 01:33:59 +08:00
|
|
|
.alloc_buffer = tmc_alloc_etf_buffer,
|
|
|
|
.free_buffer = tmc_free_etf_buffer,
|
|
|
|
.update_buffer = tmc_update_etf_buffer,
|
2016-05-04 01:33:50 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct coresight_ops_link tmc_etf_link_ops = {
|
|
|
|
.enable = tmc_enable_etf_link,
|
|
|
|
.disable = tmc_disable_etf_link,
|
|
|
|
};
|
|
|
|
|
|
|
|
const struct coresight_ops tmc_etb_cs_ops = {
|
|
|
|
.sink_ops = &tmc_etf_sink_ops,
|
|
|
|
};
|
|
|
|
|
|
|
|
const struct coresight_ops tmc_etf_cs_ops = {
|
|
|
|
.sink_ops = &tmc_etf_sink_ops,
|
|
|
|
.link_ops = &tmc_etf_link_ops,
|
|
|
|
};
|
2016-05-04 01:33:51 +08:00
|
|
|
|
|
|
|
int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
|
|
|
|
{
|
|
|
|
enum tmc_mode mode;
|
|
|
|
int ret = 0;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/* config types are set a boot time and never change */
|
|
|
|
if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
|
|
|
|
drvdata->config_type != TMC_CONFIG_TYPE_ETF))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&drvdata->spinlock, flags);
|
|
|
|
|
2016-05-04 01:33:53 +08:00
|
|
|
if (drvdata->reading) {
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2016-05-04 01:33:56 +08:00
|
|
|
/* Don't interfere if operated from Perf */
|
2016-11-30 00:47:15 +08:00
|
|
|
if (drvdata->mode == CS_MODE_PERF) {
|
2016-05-04 01:33:56 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2016-05-04 01:33:52 +08:00
|
|
|
/* If drvdata::buf is NULL the trace data has been read already */
|
|
|
|
if (drvdata->buf == NULL) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2016-05-04 01:33:51 +08:00
|
|
|
/* Disable the TMC if need be */
|
2020-05-19 02:02:32 +08:00
|
|
|
if (drvdata->mode == CS_MODE_SYSFS) {
|
|
|
|
/* There is no point in reading a TMC in HW FIFO mode */
|
|
|
|
mode = readl_relaxed(drvdata->base + TMC_MODE);
|
|
|
|
if (mode != TMC_MODE_CIRCULAR_BUFFER) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
2018-09-21 03:18:16 +08:00
|
|
|
__tmc_etb_disable_hw(drvdata);
|
2020-05-19 02:02:32 +08:00
|
|
|
}
|
2016-05-04 01:33:51 +08:00
|
|
|
|
|
|
|
drvdata->reading = true;
|
|
|
|
out:
|
|
|
|
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
|
|
|
|
{
|
2016-05-04 01:33:52 +08:00
|
|
|
char *buf = NULL;
|
2016-05-04 01:33:51 +08:00
|
|
|
enum tmc_mode mode;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/* config types are set a boot time and never change */
|
|
|
|
if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
|
|
|
|
drvdata->config_type != TMC_CONFIG_TYPE_ETF))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&drvdata->spinlock, flags);
|
|
|
|
|
|
|
|
/* Re-enable the TMC if need be */
|
2016-11-30 00:47:15 +08:00
|
|
|
if (drvdata->mode == CS_MODE_SYSFS) {
|
2020-07-17 01:57:42 +08:00
|
|
|
/* There is no point in reading a TMC in HW FIFO mode */
|
|
|
|
mode = readl_relaxed(drvdata->base + TMC_MODE);
|
|
|
|
if (mode != TMC_MODE_CIRCULAR_BUFFER) {
|
|
|
|
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2016-05-04 01:33:52 +08:00
|
|
|
/*
|
|
|
|
* The trace run will continue with the same allocated trace
|
|
|
|
* buffer. As such zero-out the buffer so that we don't end
|
|
|
|
* up with stale data.
|
|
|
|
*
|
|
|
|
* Since the tracer is still enabled drvdata::buf
|
|
|
|
* can't be NULL.
|
|
|
|
*/
|
|
|
|
memset(drvdata->buf, 0, drvdata->size);
|
2018-09-21 03:18:06 +08:00
|
|
|
__tmc_etb_enable_hw(drvdata);
|
2016-05-04 01:33:52 +08:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* The ETB/ETF is not tracing and the buffer was just read.
|
|
|
|
* As such prepare to free the trace buffer.
|
|
|
|
*/
|
|
|
|
buf = drvdata->buf;
|
|
|
|
drvdata->buf = NULL;
|
|
|
|
}
|
2016-05-04 01:33:51 +08:00
|
|
|
|
|
|
|
drvdata->reading = false;
|
|
|
|
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
|
|
|
|
2016-05-04 01:33:52 +08:00
|
|
|
/*
|
|
|
|
* Free allocated memory outside of the spinlock. There is no need
|
|
|
|
* to assert the validity of 'buf' since calling kfree(NULL) is safe.
|
|
|
|
*/
|
|
|
|
kfree(buf);
|
|
|
|
|
2016-05-04 01:33:51 +08:00
|
|
|
return 0;
|
|
|
|
}
|