*/
#define TRBE_WORKAROUND_OVERWRITE_FILL_MODE 0
#define TRBE_WORKAROUND_WRITE_OUT_OF_RANGE 1
+#define TRBE_NEEDS_DRAIN_AFTER_DISABLE 2
static int trbe_errata_cpucaps[] = {
[TRBE_WORKAROUND_OVERWRITE_FILL_MODE] = ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE,
[TRBE_WORKAROUND_WRITE_OUT_OF_RANGE] = ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE,
+ [TRBE_NEEDS_DRAIN_AFTER_DISABLE] = ARM64_WORKAROUND_2064142,
-1, /* Sentinel, must be the last entry */
};
return trbe_has_erratum(cpudata, TRBE_WORKAROUND_WRITE_OUT_OF_RANGE);
}
+static inline bool trbe_needs_drain_after_disable(struct trbe_cpudata *cpudata)
+{
+ /*
+ * Errata affected TRBE implementation will need TSB CSYNC and
+ * DSB in order to prevent subsequent writes into certain TRBE
+ * system registers from being ignored and not effected.
+ */
+ return trbe_has_erratum(cpudata, TRBE_NEEDS_DRAIN_AFTER_DISABLE);
+}
+
static int trbe_alloc_node(struct perf_event *event)
{
if (event->cpu == -1)
return cpu_to_node(event->cpu);
}
-static void trbe_drain_buffer(void)
+static inline void trbe_drain_buffer(void)
{
tsb_csync();
dsb(nsh);
}
-static void trbe_drain_and_disable_local(void)
+static inline void set_trbe_disabled(struct trbe_cpudata *cpudata)
{
u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
- trbe_drain_buffer();
-
/*
* Disable the TRBE without clearing LIMITPTR which
* might be required for fetching the buffer limits.
*/
trblimitr &= ~TRBLIMITR_ENABLE;
write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
+
+ if (trbe_needs_drain_after_disable(cpudata))
+ trbe_drain_buffer();
isb();
}
-static void trbe_reset_local(void)
+static void trbe_drain_and_disable_local(struct trbe_cpudata *cpudata)
{
- trbe_drain_and_disable_local();
+ trbe_drain_buffer();
+ set_trbe_disabled(cpudata);
+}
+
+static void trbe_reset_local(struct trbe_cpudata *cpudata)
+{
+ trbe_drain_and_disable_local(cpudata);
write_sysreg_s(0, SYS_TRBLIMITR_EL1);
write_sysreg_s(0, SYS_TRBPTR_EL1);
write_sysreg_s(0, SYS_TRBBASER_EL1);
* at event_stop(). So disable the TRBE here and leave
* the update_buffer() to return a 0 size.
*/
- trbe_drain_and_disable_local();
+ trbe_drain_and_disable_local(buf->cpudata);
perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
perf_aux_output_end(handle, 0);
*this_cpu_ptr(buf->cpudata->drvdata->handle) = NULL;
WARN_ON(buf->trbe_hw_base < buf->trbe_base);
WARN_ON(buf->trbe_write < buf->trbe_hw_base);
WARN_ON(buf->trbe_write >= buf->trbe_limit);
- set_trbe_disabled();
- isb();
+ set_trbe_disabled(buf->cpudata);
clr_trbe_status();
set_trbe_base_pointer(buf->trbe_hw_base);
set_trbe_write_pointer(buf->trbe_write);
* the TRBE here will ensure that no IRQ could be generated when the perf
* handle gets freed in etm_event_stop().
*/
- trbe_drain_and_disable_local();
+ trbe_drain_and_disable_local(cpudata);
/* Check if there is a pending interrupt and handle it here */
status = read_sysreg_s(SYS_TRBSR_EL1);
if (cpudata->mode != CS_MODE_PERF)
return -EINVAL;
- trbe_drain_and_disable_local();
+ trbe_drain_and_disable_local(cpudata);
buf->cpudata = NULL;
cpudata->buf = NULL;
cpudata->mode = CS_MODE_DISABLED;
* is able to detect this with a disconnected handle
* (handle->event = NULL).
*/
- trbe_drain_and_disable_local();
+ trbe_drain_and_disable_local(buf->cpudata);
*this_cpu_ptr(buf->cpudata->drvdata->handle) = NULL;
return -EINVAL;
}
{
struct perf_output_handle **handle_ptr = dev;
struct perf_output_handle *handle = *handle_ptr;
+ struct trbe_buf *buf = etm_perf_sink_config(handle);
enum trbe_fault_action act;
u64 status;
bool truncated = false;
* Ensure the trace is visible to the CPUs and
* any external aborts have been resolved.
*/
- trbe_drain_and_disable_local();
+ trbe_drain_and_disable_local(buf->cpudata);
clr_trbe_irq();
isb();
static void arm_trbe_enable_cpu(void *info)
{
struct trbe_drvdata *drvdata = info;
+ struct trbe_cpudata *cpudata = this_cpu_ptr(drvdata->cpudata);
- trbe_reset_local();
+ trbe_reset_local(cpudata);
enable_percpu_irq(drvdata->irq, IRQ_TYPE_NONE);
}
struct coresight_device *trbe_csdev = coresight_get_percpu_sink(cpu);
disable_percpu_irq(drvdata->irq);
- trbe_reset_local();
+ trbe_reset_local(cpudata);
if (trbe_csdev) {
coresight_unregister(trbe_csdev);
cpudata->drvdata = NULL;
struct trbe_drvdata *drvdata = hlist_entry_safe(node, struct trbe_drvdata, hotplug_node);
if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) {
+ struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
+
disable_percpu_irq(drvdata->irq);
- trbe_reset_local();
+ trbe_reset_local(cpudata);
}
return 0;
}