aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/hwtracing/coresight/coresight-tmc-etr.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/hwtracing/coresight/coresight-tmc-etr.c')
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c66
1 files changed, 60 insertions, 6 deletions
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 3309b1344ffc..cd9ceb7c6126 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -18,6 +18,8 @@
#include "coresight-etm-perf.h"
#include "coresight-priv.h"
#include "coresight-tmc.h"
+#include "coresight-tmc-secure-etr.h"
+
struct etr_flat_buf {
struct device *dev;
@@ -793,10 +795,13 @@ static inline void tmc_etr_disable_catu(struct tmc_drvdata *drvdata)
helper_ops(catu)->disable(catu, drvdata->etr_buf);
}
+extern const struct etr_buf_operations etr_secure_buf_ops;
+
static const struct etr_buf_operations *etr_buf_ops[] = {
[ETR_MODE_FLAT] = &etr_flat_buf_ops,
[ETR_MODE_ETR_SG] = &etr_sg_buf_ops,
[ETR_MODE_CATU] = NULL,
+ [ETR_MODE_SECURE] = &etr_secure_buf_ops,
};
void tmc_etr_set_catu_ops(const struct etr_buf_operations *catu)
@@ -822,6 +827,7 @@ static inline int tmc_etr_mode_alloc_buf(int mode,
case ETR_MODE_FLAT:
case ETR_MODE_ETR_SG:
case ETR_MODE_CATU:
+ case ETR_MODE_SECURE:
if (etr_buf_ops[mode] && etr_buf_ops[mode]->alloc)
rc = etr_buf_ops[mode]->alloc(drvdata, etr_buf,
node, pages);
@@ -863,6 +869,12 @@ static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata,
etr_buf->size = size;
+ if (drvdata->etr_quirks & CORESIGHT_QUIRK_ETR_SECURE_BUFF) {
+ rc = tmc_etr_mode_alloc_buf(ETR_MODE_SECURE, drvdata,
+ etr_buf, node, pages);
+ goto done;
+ }
+
/*
* If we have to use an existing list of pages, we cannot reliably
* use a contiguous DMA memory (even if we have an IOMMU). Otherwise,
@@ -885,6 +897,8 @@ static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata,
if (rc && has_catu)
rc = tmc_etr_mode_alloc_buf(ETR_MODE_CATU, drvdata,
etr_buf, node, pages);
+
+done:
if (rc) {
kfree(etr_buf);
return ERR_PTR(rc);
@@ -956,11 +970,11 @@ static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata)
dev_dbg(&drvdata->csdev->dev,
"tmc memory error detected, truncating buffer\n");
etr_buf->len = 0;
- etr_buf->full = 0;
+ etr_buf->full = false;
return;
}
- etr_buf->full = status & TMC_STS_FULL;
+ etr_buf->full = !!(status & TMC_STS_FULL);
WARN_ON(!etr_buf->ops || !etr_buf->ops->sync);
@@ -974,15 +988,22 @@ static void __tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
CS_UNLOCK(drvdata->base);
+ if (drvdata->etr_quirks & CORESIGHT_QUIRK_ETR_RESET_CTL_REG)
+ tmc_disable_hw(drvdata);
+
/* Wait for TMCSReady bit to be set */
tmc_wait_for_tmcready(drvdata);
- writel_relaxed(etr_buf->size / 4, drvdata->base + TMC_RSZ);
+ if (drvdata->etr_quirks & CORESIGHT_QUIRK_ETR_BUFFSIZE_8BX)
+ writel_relaxed(etr_buf->size / 8, drvdata->base + TMC_RSZ);
+ else
+ writel_relaxed(etr_buf->size / 4, drvdata->base + TMC_RSZ);
writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
axictl &= ~TMC_AXICTL_CLEAR_MASK;
- axictl |= (TMC_AXICTL_PROT_CTL_B1 | TMC_AXICTL_WR_BURST_16);
+ axictl |= TMC_AXICTL_PROT_CTL_B1;
+ axictl |= TMC_AXICTL_WR_BURST(drvdata->max_burst_size);
axictl |= TMC_AXICTL_AXCACHE_OS;
if (tmc_etr_has_cap(drvdata, TMC_ETR_AXI_ARCACHE)) {
@@ -1040,7 +1061,7 @@ static int tmc_etr_enable_hw(struct tmc_drvdata *drvdata,
rc = tmc_etr_enable_catu(drvdata, etr_buf);
if (rc)
return rc;
- rc = coresight_claim_device(drvdata->base);
+ rc = coresight_claim_device(drvdata->csdev);
if (!rc) {
drvdata->etr_buf = etr_buf;
__tmc_etr_enable_hw(drvdata);
@@ -1134,7 +1155,7 @@ void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
__tmc_etr_disable_hw(drvdata);
/* Disable CATU device if this ETR is connected to one */
tmc_etr_disable_catu(drvdata);
- coresight_disclaim_device(drvdata->base);
+ coresight_disclaim_device(drvdata->csdev);
/* Reset the ETR buf used by hardware */
drvdata->etr_buf = NULL;
}
@@ -1205,6 +1226,11 @@ out:
if (free_buf)
tmc_etr_free_sysfs_buf(free_buf);
+ if (!ret && (drvdata->etr_quirks & CORESIGHT_QUIRK_ETM_SW_SYNC) &&
+ (drvdata->mode != CS_MODE_READ_PREVBOOT))
+ smp_call_function_single(drvdata->rc_cpu, tmc_etr_timer_start,
+ drvdata, true);
+
if (!ret)
dev_dbg(&csdev->dev, "TMC-ETR enabled\n");
@@ -1648,6 +1674,10 @@ static int tmc_disable_etr_sink(struct coresight_device *csdev)
{
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ u32 mode;
+
+ /* Cache the drvdata->mode */
+ mode = drvdata->mode;
spin_lock_irqsave(&drvdata->spinlock, flags);
@@ -1672,6 +1702,11 @@ static int tmc_disable_etr_sink(struct coresight_device *csdev)
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ if ((drvdata->etr_quirks & CORESIGHT_QUIRK_ETM_SW_SYNC) &&
+ (mode == CS_MODE_SYSFS))
+ smp_call_function_single(drvdata->rc_cpu, tmc_etr_timer_cancel,
+ drvdata, true);
+
dev_dbg(&csdev->dev, "TMC-ETR disabled\n");
return 0;
}
@@ -1697,6 +1732,20 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
return -EINVAL;
+ if (drvdata->mode == CS_MODE_READ_PREVBOOT) {
+ /* Initialize drvdata for reading trace data from last boot */
+ ret = tmc_enable_etr_sink_sysfs(drvdata->csdev);
+ if (ret)
+ return ret;
+ /* Update the buffer offset, len */
+ tmc_etr_sync_sysfs_buf(drvdata);
+ return 0;
+ }
+
+ if (drvdata->etr_quirks & CORESIGHT_QUIRK_ETR_NO_STOP_FLUSH)
+ smp_call_function_single(drvdata->rc_cpu, tmc_flushstop_etm_off,
+ drvdata, true);
+
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
ret = -EBUSY;
@@ -1759,5 +1808,10 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
if (sysfs_buf)
tmc_etr_free_sysfs_buf(sysfs_buf);
+ if ((drvdata->mode == CS_MODE_SYSFS) &&
+ (drvdata->etr_quirks & CORESIGHT_QUIRK_ETR_NO_STOP_FLUSH))
+ smp_call_function_single(drvdata->rc_cpu, tmc_flushstop_etm_on,
+ drvdata, true);
+
return 0;
}