diff mbox series

[3/3] kernel-shark-qt: Add helper function to find the next_cpu in kshark_load_data_*()

Message ID 20180703162432.480139641@goodmis.org (mailing list archive)
State Accepted, archived
Headers show
Series kernel-shark-qt: Merge kshark_load_data_records/entries() | expand

Commit Message

Steven Rostedt July 3, 2018, 4:21 p.m. UTC
From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>

The two functions kshark_load_data_entries() and
kshark_load_data_records() both do the same thing to find the next cpu to
load. Add a helper function pick_next_cpu() for both of them to use to
simplify the code.

Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
---
 kernel-shark-qt/src/libkshark.c | 53 +++++++++++++++++----------------
 1 file changed, 27 insertions(+), 26 deletions(-)

Comments

Yordan Karadzhov July 4, 2018, 11:59 a.m. UTC | #1
On  3.07.2018 19:21, Steven Rostedt wrote:
> From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
> 
> The two functions kshark_load_data_entries() and
> kshark_load_data_records() both do the same thing to find the next cpu to
> load. Add a helper function pick_next_cpu() for both of them to use to
> simplify the code.
> 
> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
> ---
>   kernel-shark-qt/src/libkshark.c | 53 +++++++++++++++++----------------
>   1 file changed, 27 insertions(+), 26 deletions(-)
> 
> diff --git a/kernel-shark-qt/src/libkshark.c b/kernel-shark-qt/src/libkshark.c
> index 680949077b7f..fe8aada75149 100644
> --- a/kernel-shark-qt/src/libkshark.c
> +++ b/kernel-shark-qt/src/libkshark.c
> @@ -566,6 +566,25 @@ static size_t get_records(struct kshark_context *kshark_ctx,
>   	return -ENOMEM;
>   }
>   


The consolidate the kshark load_data*() functions will be very useful, 
because we have to add at least one more function of this type. I mean 
the one used by the NumPy interface.

Please apply those patches.
Thanks!
Yordan


> +static int pick_next_cpu(struct rec_list **rec_list, int n_cpus)
> +{
> +	uint64_t ts = 0;
> +	int next_cpu = -1;
> +	int cpu;
> +
> +	for (cpu = 0; cpu < n_cpus; ++cpu) {
> +		if (!rec_list[cpu])
> +			continue;
> +
> +		if (!ts || rec_list[cpu]->rec->ts < ts) {
> +			ts = rec_list[cpu]->rec->ts;
> +			next_cpu = cpu;
> +		}
> +	}
> +
> +	return next_cpu;
> +}
> +
>   /**
>    * @brief Load the content of the trace data file into an array of
>    *	  kshark_entries. This function provides fast loading, however the
> @@ -593,9 +612,8 @@ ssize_t kshark_load_data_entries(struct kshark_context *kshark_ctx,
>   	struct rec_list **rec_list;
>   	struct rec_list *temp_rec;
>   	struct pevent_record *rec;
> -	int cpu, n_cpus, next_cpu;
>   	size_t count, total = 0;
> -	uint64_t ts;
> +	int n_cpus;
>   	int ret;
>   
>   	if (*data_rows)
> @@ -612,17 +630,9 @@ ssize_t kshark_load_data_entries(struct kshark_context *kshark_ctx,
>   	n_cpus = tracecmd_cpus(kshark_ctx->handle);
>   
>   	for (count = 0; count < total; count++) {
> -		ts = 0;
> -		next_cpu = -1;
> -		for (cpu = 0; cpu < n_cpus; ++cpu) {
> -			if (!rec_list[cpu])
> -				continue;
> -
> -			if (!ts || rec_list[cpu]->rec->ts < ts) {
> -				ts = rec_list[cpu]->rec->ts;
> -				next_cpu = cpu;
> -			}
> -		}
> +		int next_cpu;
> +
> +		next_cpu = pick_next_cpu(rec_list, n_cpus);
>   
>   		if (next_cpu >= 0) {
>   			entry = malloc(sizeof(struct kshark_entry));
> @@ -694,9 +704,8 @@ ssize_t kshark_load_data_records(struct kshark_context *kshark_ctx,
>   	struct pevent_record *rec;
>   	struct rec_list **rec_list;
>   	struct rec_list *temp_rec;
> -	int cpu, n_cpus, next_cpu;
>   	size_t count, total = 0;
> -	uint64_t ts;
> +	int n_cpus;
>   	int pid;
>   
>   	total = get_records(kshark_ctx, &rec_list);
> @@ -710,17 +719,9 @@ ssize_t kshark_load_data_records(struct kshark_context *kshark_ctx,
>   	n_cpus = tracecmd_cpus(kshark_ctx->handle);
>   
>   	for (count = 0; count < total; count++) {
> -		ts = 0;
> -		next_cpu = -1;
> -		for (cpu = 0; cpu < n_cpus; ++cpu) {
> -			if (!rec_list[cpu])
> -				continue;
> -
> -			if (!ts || rec_list[cpu]->rec->ts < ts) {
> -				ts = rec_list[cpu]->rec->ts;
> -				next_cpu = cpu;
> -			}
> -		}
> +		int next_cpu;
> +
> +		next_cpu = pick_next_cpu(rec_list, n_cpus);
>   
>   		if (next_cpu >= 0) {
>   			rec = rec_list[next_cpu]->rec;
>
diff mbox series

Patch

diff --git a/kernel-shark-qt/src/libkshark.c b/kernel-shark-qt/src/libkshark.c
index 680949077b7f..fe8aada75149 100644
--- a/kernel-shark-qt/src/libkshark.c
+++ b/kernel-shark-qt/src/libkshark.c
@@ -566,6 +566,25 @@  static size_t get_records(struct kshark_context *kshark_ctx,
 	return -ENOMEM;
 }
 
+static int pick_next_cpu(struct rec_list **rec_list, int n_cpus)
+{
+	uint64_t ts = 0;
+	int next_cpu = -1;
+	int cpu;
+
+	for (cpu = 0; cpu < n_cpus; ++cpu) {
+		if (!rec_list[cpu])
+			continue;
+
+		if (!ts || rec_list[cpu]->rec->ts < ts) {
+			ts = rec_list[cpu]->rec->ts;
+			next_cpu = cpu;
+		}
+	}
+
+	return next_cpu;
+}
+
 /**
  * @brief Load the content of the trace data file into an array of
  *	  kshark_entries. This function provides fast loading, however the
@@ -593,9 +612,8 @@  ssize_t kshark_load_data_entries(struct kshark_context *kshark_ctx,
 	struct rec_list **rec_list;
 	struct rec_list *temp_rec;
 	struct pevent_record *rec;
-	int cpu, n_cpus, next_cpu;
 	size_t count, total = 0;
-	uint64_t ts;
+	int n_cpus;
 	int ret;
 
 	if (*data_rows)
@@ -612,17 +630,9 @@  ssize_t kshark_load_data_entries(struct kshark_context *kshark_ctx,
 	n_cpus = tracecmd_cpus(kshark_ctx->handle);
 
 	for (count = 0; count < total; count++) {
-		ts = 0;
-		next_cpu = -1;
-		for (cpu = 0; cpu < n_cpus; ++cpu) {
-			if (!rec_list[cpu])
-				continue;
-
-			if (!ts || rec_list[cpu]->rec->ts < ts) {
-				ts = rec_list[cpu]->rec->ts;
-				next_cpu = cpu;
-			}
-		}
+		int next_cpu;
+
+		next_cpu = pick_next_cpu(rec_list, n_cpus);
 
 		if (next_cpu >= 0) {
 			entry = malloc(sizeof(struct kshark_entry));
@@ -694,9 +704,8 @@  ssize_t kshark_load_data_records(struct kshark_context *kshark_ctx,
 	struct pevent_record *rec;
 	struct rec_list **rec_list;
 	struct rec_list *temp_rec;
-	int cpu, n_cpus, next_cpu;
 	size_t count, total = 0;
-	uint64_t ts;
+	int n_cpus;
 	int pid;
 
 	total = get_records(kshark_ctx, &rec_list);
@@ -710,17 +719,9 @@  ssize_t kshark_load_data_records(struct kshark_context *kshark_ctx,
 	n_cpus = tracecmd_cpus(kshark_ctx->handle);
 
 	for (count = 0; count < total; count++) {
-		ts = 0;
-		next_cpu = -1;
-		for (cpu = 0; cpu < n_cpus; ++cpu) {
-			if (!rec_list[cpu])
-				continue;
-
-			if (!ts || rec_list[cpu]->rec->ts < ts) {
-				ts = rec_list[cpu]->rec->ts;
-				next_cpu = cpu;
-			}
-		}
+		int next_cpu;
+
+		next_cpu = pick_next_cpu(rec_list, n_cpus);
 
 		if (next_cpu >= 0) {
 			rec = rec_list[next_cpu]->rec;