+static int cs_etm__run_per_cpu_timeless_decoder(struct cs_etm_queue *etmq)
+{
+ int idx, err = 0;
+ struct cs_etm_traceid_queue *tidq;
+ struct int_node *inode;
+
+ /* Go through each buffer in the queue and decode them one by one */
+ while (1) {
+ err = cs_etm__get_data_block(etmq);
+ if (err <= 0)
+ return err;
+
+ /* Run trace decoder until buffer consumed or end of trace */
+ do {
+ err = cs_etm__decode_data_block(etmq);
+ if (err)
+ return err;
+
+ /*
+ * cs_etm__run_per_thread_timeless_decoder() runs on a
+ * single traceID queue because each TID has a separate
+ * buffer. But here in per-cpu mode we need to iterate
+ * over each channel instead.
+ */
+ intlist__for_each_entry(inode,
+ etmq->traceid_queues_list) {
+ idx = (int)(intptr_t)inode->priv;
+ tidq = etmq->traceid_queues[idx];
+ cs_etm__process_traceid_queue(etmq, tidq);
+ }
+ } while (etmq->buf_len);
+
+ intlist__for_each_entry(inode, etmq->traceid_queues_list) {
+ idx = (int)(intptr_t)inode->priv;
+ tidq = etmq->traceid_queues[idx];
+ /* Flush any remaining branch stack entries */
+ err = cs_etm__end_block(etmq, tidq);
+ if (err)
+ return err;
+ }
+ }
+
+ return err;
+}
+