diff mbox series

[2/6] perf cs-etm: Split setup and timestamp search functions

Message ID 20210713154008.29656-3-james.clark@arm.com (mailing list archive)
State New, archived
Headers show
Series perf cs-etm: Support TRBE (unformatted decoding) | expand

Commit Message

James Clark July 13, 2021, 3:40 p.m. UTC
This refactoring has some benefits:
 * Decoding is done to find the timestamp. If we want to print errors
   when maps aren't available, then doing it from cs_etm__setup_queue()
   may cause warnings to be printed.
 * The cs_etm__setup_queue() flow is shared between timed and timeless
   modes, so it needs to be guarded by an if statement which can now
   be removed.
 * Allows moving the setup queues function earlier.
 * If data was piped in, then not all queues would be filled so it
   wouldn't have worked properly anyway. Now it waits for flush so
   data in all queues will be available.

The motivation for this is to decouple setup functions with ones that
involve decoding. That way we can move the setup function earlier when
the formatted/unformatted trace information is available.

Signed-off-by: James Clark <james.clark@arm.com>
---
 tools/perf/util/cs-etm.c | 41 ++++++++++++++++++++++++++++------------
 1 file changed, 29 insertions(+), 12 deletions(-)

Comments

Mathieu Poirier July 19, 2021, 7:38 p.m. UTC | #1
On Tue, Jul 13, 2021 at 04:40:04PM +0100, James Clark wrote:
> This refactoring has some benefits:
>  * Decoding is done to find the timestamp. If we want to print errors
>    when maps aren't available, then doing it from cs_etm__setup_queue()
>    may cause warnings to be printed.
>  * The cs_etm__setup_queue() flow is shared between timed and timeless
>    modes, so it needs to be guarded by an if statement which can now
>    be removed.
>  * Allows moving the setup queues function earlier.
>  * If data was piped in, then not all queues would be filled so it
>    wouldn't have worked properly anyway. Now it waits for flush so
>    data in all queues will be available.
> 
> The motivation for this is to decouple setup functions with ones that
> involve decoding. That way we can move the setup function earlier when
> the formatted/unformatted trace information is available.
> 
> Signed-off-by: James Clark <james.clark@arm.com>
> ---
>  tools/perf/util/cs-etm.c | 41 ++++++++++++++++++++++++++++------------
>  1 file changed, 29 insertions(+), 12 deletions(-)

Reviewed-by: Mathieu Poirier <mathieu.poirier@linaro.org>

> 
> diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
> index 4c69ef391f60..426e99c07ca9 100644
> --- a/tools/perf/util/cs-etm.c
> +++ b/tools/perf/util/cs-etm.c
> @@ -809,29 +809,32 @@ static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
>  			       struct auxtrace_queue *queue,
>  			       unsigned int queue_nr)
>  {
> -	int ret = 0;
> -	unsigned int cs_queue_nr;
> -	u8 trace_chan_id;
> -	u64 cs_timestamp;
>  	struct cs_etm_queue *etmq = queue->priv;
>  
>  	if (list_empty(&queue->head) || etmq)
> -		goto out;
> +		return 0;
>  
>  	etmq = cs_etm__alloc_queue(etm);
>  
> -	if (!etmq) {
> -		ret = -ENOMEM;
> -		goto out;
> -	}
> +	if (!etmq)
> +		return -ENOMEM;
>  
>  	queue->priv = etmq;
>  	etmq->etm = etm;
>  	etmq->queue_nr = queue_nr;
>  	etmq->offset = 0;
>  
> -	if (etm->timeless_decoding)
> -		goto out;
> +	return 0;
> +}
> +
> +static int cs_etm__queue_first_cs_timestamp(struct cs_etm_auxtrace *etm,
> +					    struct cs_etm_queue *etmq,
> +					    unsigned int queue_nr)
> +{
> +	int ret = 0;
> +	unsigned int cs_queue_nr;
> +	u8 trace_chan_id;
> +	u64 cs_timestamp;
>  
>  	/*
>  	 * We are under a CPU-wide trace scenario.  As such we need to know
> @@ -2218,13 +2221,27 @@ static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
>  static int cs_etm__process_queues(struct cs_etm_auxtrace *etm)
>  {
>  	int ret = 0;
> -	unsigned int cs_queue_nr, queue_nr;
> +	unsigned int cs_queue_nr, queue_nr, i;
>  	u8 trace_chan_id;
>  	u64 cs_timestamp;
>  	struct auxtrace_queue *queue;
>  	struct cs_etm_queue *etmq;
>  	struct cs_etm_traceid_queue *tidq;
>  
> +	/*
> +	 * Pre-populate the heap with one entry from each queue so that we can
> +	 * start processing in time order across all queues.
> +	 */
> +	for (i = 0; i < etm->queues.nr_queues; i++) {
> +		etmq = etm->queues.queue_array[i].priv;
> +		if (!etmq)
> +			continue;
> +
> +		ret = cs_etm__queue_first_cs_timestamp(etm, etmq, i);
> +		if (ret)
> +			return ret;
> +	}
> +
>  	while (1) {
>  		if (!etm->heap.heap_cnt)
>  			goto out;
> -- 
> 2.28.0
>
diff mbox series

Patch

diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index 4c69ef391f60..426e99c07ca9 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -809,29 +809,32 @@  static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
 			       struct auxtrace_queue *queue,
 			       unsigned int queue_nr)
 {
-	int ret = 0;
-	unsigned int cs_queue_nr;
-	u8 trace_chan_id;
-	u64 cs_timestamp;
 	struct cs_etm_queue *etmq = queue->priv;
 
 	if (list_empty(&queue->head) || etmq)
-		goto out;
+		return 0;
 
 	etmq = cs_etm__alloc_queue(etm);
 
-	if (!etmq) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!etmq)
+		return -ENOMEM;
 
 	queue->priv = etmq;
 	etmq->etm = etm;
 	etmq->queue_nr = queue_nr;
 	etmq->offset = 0;
 
-	if (etm->timeless_decoding)
-		goto out;
+	return 0;
+}
+
+static int cs_etm__queue_first_cs_timestamp(struct cs_etm_auxtrace *etm,
+					    struct cs_etm_queue *etmq,
+					    unsigned int queue_nr)
+{
+	int ret = 0;
+	unsigned int cs_queue_nr;
+	u8 trace_chan_id;
+	u64 cs_timestamp;
 
 	/*
 	 * We are under a CPU-wide trace scenario.  As such we need to know
@@ -2218,13 +2221,27 @@  static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
 static int cs_etm__process_queues(struct cs_etm_auxtrace *etm)
 {
 	int ret = 0;
-	unsigned int cs_queue_nr, queue_nr;
+	unsigned int cs_queue_nr, queue_nr, i;
 	u8 trace_chan_id;
 	u64 cs_timestamp;
 	struct auxtrace_queue *queue;
 	struct cs_etm_queue *etmq;
 	struct cs_etm_traceid_queue *tidq;
 
+	/*
+	 * Pre-populate the heap with one entry from each queue so that we can
+	 * start processing in time order across all queues.
+	 */
+	for (i = 0; i < etm->queues.nr_queues; i++) {
+		etmq = etm->queues.queue_array[i].priv;
+		if (!etmq)
+			continue;
+
+		ret = cs_etm__queue_first_cs_timestamp(etm, etmq, i);
+		if (ret)
+			return ret;
+	}
+
 	while (1) {
 		if (!etm->heap.heap_cnt)
 			goto out;