diff mbox series

[2/2] scrub status: add json output format

Message ID 20250207023302.311829-3-racz.zoli@gmail.com (mailing list archive)
State New
Headers show
Series btrfs-progs: scrub status: add json output format | expand

Commit Message

Racz Zoltan Feb. 7, 2025, 2:33 a.m. UTC
This patch adds support for json formatting of the "scrub status"
command. Please not that in the info section the started-at key in
02:00:00 1970 because i bypassed the "no stats available" so I can make
sure those stats are correctly formatted in the output as well. 

Example usage:
1. btrfs --format json scrub status /

json output:
{
  "__header": {
    "version": "1"
  },
  "scrub-status": {
    "uuid": "1a7d1bc4-c212-42bf-b05c-73bd313d3ecd",
    "info": {
      "started-at": "Thu Jan  1 02:00:00 1970",
      "status": "finished",
      "duration": "0:00:00"
    },
    "scrub": {
      "total-bytes-to-scrub": "67184017408",
      "rate": "0"
    }
  }
}

2. btrfs --format json scrub status / -R

json output: 
{
  "__header": {
    "version": "1"
  },
  "scrub-status": {
    "uuid": "1a7d1bc4-c212-42bf-b05c-73bd313d3ecd",
    "info": {
      "started-at": "Thu Jan  1 02:00:00 1970",
      "status": "finished",
      "duration": "0:00:00"
    },
    "scrub": {
      "data-extents-scrubbed": "0",
      "tree-extents-scrubbed": "0",
      "data-bytes-scrubbed": "0",
      "tree-bytes-scrubbed": "0",
      "read-errors": "0",
      "csum-errors": "0",
      "verify-errors": "0",
      "no-csum": "0",
      "csum-discards": "0",
      "super-errors": "0",
      "malloc-errors": "0",
      "uncorrectable-errors": "0",
      "unverified-errors": "0",
      "corrected-errors": "0",
      "last-physical": "0"
    }
  }
}

 cmds/scrub.c | 251 +++++++++++++++++++++++++++++++++++++++------------
 1 file changed, 195 insertions(+), 56 deletions(-)

Comments

David Sterba Feb. 11, 2025, 7:41 p.m. UTC | #1
On Fri, Feb 07, 2025 at 04:33:02AM +0200, Racz Zoltan wrote:
> This patch adds support for json formatting of the "scrub status"
> command. Please not that in the info section the started-at key in
> 02:00:00 1970 because i bypassed the "no stats available" so I can make
> sure those stats are correctly formatted in the output as well. 
> 
> Example usage:
> 1. btrfs --format json scrub status /

Thanks. The status in json is useful and it found a few things than may
be missing in the json formatting. The most obvious one is that there's
too much duplication of the code in plain vs json output. The ideal
version is that there's only rowspec definition of all the keys and only
fmt_print for each one, it'll get formatted properly given the selected
output format.

But there are already exceptions in other code that prints both json and
plain text due to the requirements that can't be done with fmt_ but we
need to keep the visual output.


> +static const struct rowspec scrub_status_rowspec[] = {
> +	{ .key = "uuid", .fmt = "%s", .out_json = "uuid"},
> +	{ .key = "status", .fmt = "%s", .out_json = "status"},
> +	{ .key = "duration", .fmt = "%u:%s", .out_json = "duration"},

We'll need a new internal json type for dration, so the value is number
of seconds and formatted automatically. The .fmt can be any proper
printf formatter but it's left for flexibility until we find a reason to
make a separate type for that to avoid code repetition or differences
how the same type of information is formatted.

> +	{ .key = "started_at", .fmt = "%s", .out_json = "started-at"},

And another type for timestamp, input in seconds, formatted as some
standard human readable format that can be parsed back eventually.

> +	{ .key = "resumed_at", .fmt = "%s", .out_json = "resumed-at"},
> +	{ .key = "data_extents_scrubbed", .fmt = "%lld", .out_json = "data-extents-scrubbed"},

The keys are internal, I'd prefer to use "-" as separator.

> +	{ .key = "tree_extents_scrubbed", .fmt = "%lld", .out_json = "tree-extents-scrubbed"},
> +	{ .key = "data_bytes_scrubbed", .fmt = "%lld", .out_json = "data-bytes-scrubbed"},
> +	{ .key = "tree_bytes_scrubbed", .fmt = "%lld", .out_json = "tree-bytes-scrubbed"},
> +	{ .key = "read_errors", .fmt = "%lld", .out_json = "read-errors"},
> +	{ .key = "csum_errors", .fmt = "%lld", .out_json = "csum-errors"},
> +	{ .key = "verify_errors", .fmt = "%lld", .out_json = "verify-errors"},
> +	{ .key = "no_csum", .fmt = "%lld", .out_json = "no-csum"},
> +	{ .key = "csum_discards", .fmt = "%lld", .out_json = "csum-discards"},
> +	{ .key = "super_errors", .fmt = "%lld", .out_json = "super-errors"},
> +	{ .key = "malloc_errors", .fmt = "%lld", .out_json = "malloc-errors"},
> +	{ .key = "uncorrectable_errors", .fmt = "%lld", .out_json = "uncorrectable-errors"},
> +	{ .key = "unverified_errors", .fmt = "%lld", .out_json = "unverified-errors"},
> +	{ .key = "corrected_errors", .fmt = "%lld", .out_json = "corrected-errors"},
> +	{ .key = "last_physical", .fmt = "%lld", .out_json = "last-physical"},

All the numbers seem to be u64, so %llu format should be there but it's
wrong in current version already. This would be nice to fix first (in a
separate patch).

> +	{ .key = "time_left", .fmt = "%llu:%02llu:%02llu", .out_json = "time-left"},

Duration again, also it may need to be formatted with days taken into
account. With a filesystem it's not impossible.

> +	{ .key = "eta", .fmt = "%s", .out_json = "eta"},

Timestamp type.

> +	{ .key = "total_bytes_to_scrub", .fmt = "%lld", .out_json = "total-bytes-to-scrub"},
> +	{ .key = "bytes_scrubbed", .fmt = "%lld", .out_json = "bytes-scrubbed"},
> +	{ .key = "rate", .fmt = "%lld", .out_json = "rate"},
> +	{ .key = "limit", .fmt = "%lld", .out_json = "limit"},
> +
> +	ROWSPEC_END
> +};

So the plan for now is to first update the formatter and then use it for
scrub status in json. Let me know if you're up for it. Adding the types
should be easy, it's in fmt_print().
Racz Zoltan Feb. 11, 2025, 11:37 p.m. UTC | #2
I just submitted a patch to the mailing list with the subject "[PATCH]
btrfs-progs: add duration format to fmt_print" which adds the format
type you suggested.
And I have a question about the timestamp format. As I saw in
format-output.c there is a type named "date-time" which accepts
seconds as the input and prints out something like "2025-02-12
14:25:30 +0200", so I think it would be usable for the timestamp
format you mentioned.

On Tue, Feb 11, 2025 at 9:41 PM David Sterba <dsterba@suse.cz> wrote:
>
> On Fri, Feb 07, 2025 at 04:33:02AM +0200, Racz Zoltan wrote:
> > This patch adds support for json formatting of the "scrub status"
> > command. Please not that in the info section the started-at key in
> > 02:00:00 1970 because i bypassed the "no stats available" so I can make
> > sure those stats are correctly formatted in the output as well.
> >
> > Example usage:
> > 1. btrfs --format json scrub status /
>
> Thanks. The status in json is useful and it found a few things than may
> be missing in the json formatting. The most obvious one is that there's
> too much duplication of the code in plain vs json output. The ideal
> version is that there's only rowspec definition of all the keys and only
> fmt_print for each one, it'll get formatted properly given the selected
> output format.
>
> But there are already exceptions in other code that prints both json and
> plain text due to the requirements that can't be done with fmt_ but we
> need to keep the visual output.
>
>
> > +static const struct rowspec scrub_status_rowspec[] = {
> > +     { .key = "uuid", .fmt = "%s", .out_json = "uuid"},
> > +     { .key = "status", .fmt = "%s", .out_json = "status"},
> > +     { .key = "duration", .fmt = "%u:%s", .out_json = "duration"},
>
> We'll need a new internal json type for dration, so the value is number
> of seconds and formatted automatically. The .fmt can be any proper
> printf formatter but it's left for flexibility until we find a reason to
> make a separate type for that to avoid code repetition or differences
> how the same type of information is formatted.
>
> > +     { .key = "started_at", .fmt = "%s", .out_json = "started-at"},
>
> And another type for timestamp, input in seconds, formatted as some
> standard human readable format that can be parsed back eventually.
>
> > +     { .key = "resumed_at", .fmt = "%s", .out_json = "resumed-at"},
> > +     { .key = "data_extents_scrubbed", .fmt = "%lld", .out_json = "data-extents-scrubbed"},
>
> The keys are internal, I'd prefer to use "-" as separator.
>
> > +     { .key = "tree_extents_scrubbed", .fmt = "%lld", .out_json = "tree-extents-scrubbed"},
> > +     { .key = "data_bytes_scrubbed", .fmt = "%lld", .out_json = "data-bytes-scrubbed"},
> > +     { .key = "tree_bytes_scrubbed", .fmt = "%lld", .out_json = "tree-bytes-scrubbed"},
> > +     { .key = "read_errors", .fmt = "%lld", .out_json = "read-errors"},
> > +     { .key = "csum_errors", .fmt = "%lld", .out_json = "csum-errors"},
> > +     { .key = "verify_errors", .fmt = "%lld", .out_json = "verify-errors"},
> > +     { .key = "no_csum", .fmt = "%lld", .out_json = "no-csum"},
> > +     { .key = "csum_discards", .fmt = "%lld", .out_json = "csum-discards"},
> > +     { .key = "super_errors", .fmt = "%lld", .out_json = "super-errors"},
> > +     { .key = "malloc_errors", .fmt = "%lld", .out_json = "malloc-errors"},
> > +     { .key = "uncorrectable_errors", .fmt = "%lld", .out_json = "uncorrectable-errors"},
> > +     { .key = "unverified_errors", .fmt = "%lld", .out_json = "unverified-errors"},
> > +     { .key = "corrected_errors", .fmt = "%lld", .out_json = "corrected-errors"},
> > +     { .key = "last_physical", .fmt = "%lld", .out_json = "last-physical"},
>
> All the numbers seem to be u64, so %llu format should be there but it's
> wrong in current version already. This would be nice to fix first (in a
> separate patch).
>
> > +     { .key = "time_left", .fmt = "%llu:%02llu:%02llu", .out_json = "time-left"},
>
> Duration again, also it may need to be formatted with days taken into
> account. With a filesystem it's not impossible.
>
> > +     { .key = "eta", .fmt = "%s", .out_json = "eta"},
>
> Timestamp type.
>
> > +     { .key = "total_bytes_to_scrub", .fmt = "%lld", .out_json = "total-bytes-to-scrub"},
> > +     { .key = "bytes_scrubbed", .fmt = "%lld", .out_json = "bytes-scrubbed"},
> > +     { .key = "rate", .fmt = "%lld", .out_json = "rate"},
> > +     { .key = "limit", .fmt = "%lld", .out_json = "limit"},
> > +
> > +     ROWSPEC_END
> > +};
>
> So the plan for now is to first update the formatter and then use it for
> scrub status in json. Let me know if you're up for it. Adding the types
> should be easy, it's in fmt_print().
diff mbox series

Patch

diff --git a/cmds/scrub.c b/cmds/scrub.c
index 3507c9d8..31b965fc 100644
--- a/cmds/scrub.c
+++ b/cmds/scrub.c
@@ -53,6 +53,7 @@ 
 #include "common/sysfs-utils.h"
 #include "common/string-table.h"
 #include "common/string-utils.h"
+#include "common/format-output.h"
 #include "common/help.h"
 #include "cmds/commands.h"
 
@@ -123,23 +124,78 @@  struct scrub_fs_stat {
 	int i;
 };
 
+struct format_ctx fctx;
+
+static const struct rowspec scrub_status_rowspec[] = {
+	{ .key = "uuid", .fmt = "%s", .out_json = "uuid"},
+	{ .key = "status", .fmt = "%s", .out_json = "status"},
+	{ .key = "duration", .fmt = "%u:%s", .out_json = "duration"},
+	{ .key = "started_at", .fmt = "%s", .out_json = "started-at"},
+	{ .key = "resumed_at", .fmt = "%s", .out_json = "resumed-at"},
+	{ .key = "data_extents_scrubbed", .fmt = "%lld", .out_json = "data-extents-scrubbed"},
+	{ .key = "tree_extents_scrubbed", .fmt = "%lld", .out_json = "tree-extents-scrubbed"},
+	{ .key = "data_bytes_scrubbed", .fmt = "%lld", .out_json = "data-bytes-scrubbed"},
+	{ .key = "tree_bytes_scrubbed", .fmt = "%lld", .out_json = "tree-bytes-scrubbed"},
+	{ .key = "read_errors", .fmt = "%lld", .out_json = "read-errors"},
+	{ .key = "csum_errors", .fmt = "%lld", .out_json = "csum-errors"},
+	{ .key = "verify_errors", .fmt = "%lld", .out_json = "verify-errors"},
+	{ .key = "no_csum", .fmt = "%lld", .out_json = "no-csum"},
+	{ .key = "csum_discards", .fmt = "%lld", .out_json = "csum-discards"},
+	{ .key = "super_errors", .fmt = "%lld", .out_json = "super-errors"},
+	{ .key = "malloc_errors", .fmt = "%lld", .out_json = "malloc-errors"},
+	{ .key = "uncorrectable_errors", .fmt = "%lld", .out_json = "uncorrectable-errors"},
+	{ .key = "unverified_errors", .fmt = "%lld", .out_json = "unverified-errors"},
+	{ .key = "corrected_errors", .fmt = "%lld", .out_json = "corrected-errors"},
+	{ .key = "last_physical", .fmt = "%lld", .out_json = "last-physical"},
+	{ .key = "time_left", .fmt = "%llu:%02llu:%02llu", .out_json = "time-left"},
+	{ .key = "eta", .fmt = "%s", .out_json = "eta"},
+	{ .key = "total_bytes_to_scrub", .fmt = "%lld", .out_json = "total-bytes-to-scrub"},
+	{ .key = "bytes_scrubbed", .fmt = "%lld", .out_json = "bytes-scrubbed"},
+	{ .key = "rate", .fmt = "%lld", .out_json = "rate"},
+	{ .key = "limit", .fmt = "%lld", .out_json = "limit"},
+
+	ROWSPEC_END
+};
+
 static void print_scrub_full(struct btrfs_scrub_progress *sp)
 {
-	pr_verbose(LOG_DEFAULT, "\tdata_extents_scrubbed: %lld\n", sp->data_extents_scrubbed);
-	pr_verbose(LOG_DEFAULT, "\ttree_extents_scrubbed: %lld\n", sp->tree_extents_scrubbed);
-	pr_verbose(LOG_DEFAULT, "\tdata_bytes_scrubbed: %lld\n", sp->data_bytes_scrubbed);
-	pr_verbose(LOG_DEFAULT, "\ttree_bytes_scrubbed: %lld\n", sp->tree_bytes_scrubbed);
-	pr_verbose(LOG_DEFAULT, "\tread_errors: %lld\n", sp->read_errors);
-	pr_verbose(LOG_DEFAULT, "\tcsum_errors: %lld\n", sp->csum_errors);
-	pr_verbose(LOG_DEFAULT, "\tverify_errors: %lld\n", sp->verify_errors);
-	pr_verbose(LOG_DEFAULT, "\tno_csum: %lld\n", sp->no_csum);
-	pr_verbose(LOG_DEFAULT, "\tcsum_discards: %lld\n", sp->csum_discards);
-	pr_verbose(LOG_DEFAULT, "\tsuper_errors: %lld\n", sp->super_errors);
-	pr_verbose(LOG_DEFAULT, "\tmalloc_errors: %lld\n", sp->malloc_errors);
-	pr_verbose(LOG_DEFAULT, "\tuncorrectable_errors: %lld\n", sp->uncorrectable_errors);
-	pr_verbose(LOG_DEFAULT, "\tunverified_errors: %lld\n", sp->unverified_errors);
-	pr_verbose(LOG_DEFAULT, "\tcorrected_errors: %lld\n", sp->corrected_errors);
-	pr_verbose(LOG_DEFAULT, "\tlast_physical: %lld\n", sp->last_physical);
+	if (bconf.output_format == CMD_FORMAT_JSON) {
+		fmt_print_start_group(&fctx, "scrub", JSON_TYPE_MAP);
+
+		fmt_print(&fctx, "data_extents_scrubbed", sp->data_extents_scrubbed);
+		fmt_print(&fctx, "tree_extents_scrubbed", sp->tree_extents_scrubbed);
+		fmt_print(&fctx, "data_bytes_scrubbed", sp->data_bytes_scrubbed);
+		fmt_print(&fctx, "tree_bytes_scrubbed", sp->tree_bytes_scrubbed);
+		fmt_print(&fctx, "read_errors", sp->read_errors);
+		fmt_print(&fctx, "csum_errors", sp->csum_errors);
+		fmt_print(&fctx, "verify_errors", sp->verify_errors);
+		fmt_print(&fctx, "no_csum", sp->no_csum);
+		fmt_print(&fctx, "csum_discards", sp->csum_discards);
+		fmt_print(&fctx, "super_errors", sp->super_errors);
+		fmt_print(&fctx, "malloc_errors", sp->malloc_errors);
+		fmt_print(&fctx, "uncorrectable_errors", sp->uncorrectable_errors);
+		fmt_print(&fctx, "unverified_errors", sp->unverified_errors);
+		fmt_print(&fctx, "corrected_errors", sp->corrected_errors);
+		fmt_print(&fctx, "last_physical", sp->last_physical);
+
+		fmt_print_end_group(&fctx, "scrub");
+	} else {
+		pr_verbose(LOG_DEFAULT, "\tdata_extents_scrubbed: %lld\n", sp->data_extents_scrubbed);
+		pr_verbose(LOG_DEFAULT, "\ttree_extents_scrubbed: %lld\n", sp->tree_extents_scrubbed);
+		pr_verbose(LOG_DEFAULT, "\tdata_bytes_scrubbed: %lld\n", sp->data_bytes_scrubbed);
+		pr_verbose(LOG_DEFAULT, "\ttree_bytes_scrubbed: %lld\n", sp->tree_bytes_scrubbed);
+		pr_verbose(LOG_DEFAULT, "\tread_errors: %lld\n", sp->read_errors);
+		pr_verbose(LOG_DEFAULT, "\tcsum_errors: %lld\n", sp->csum_errors);
+		pr_verbose(LOG_DEFAULT, "\tverify_errors: %lld\n", sp->verify_errors);
+		pr_verbose(LOG_DEFAULT, "\tno_csum: %lld\n", sp->no_csum);
+		pr_verbose(LOG_DEFAULT, "\tcsum_discards: %lld\n", sp->csum_discards);
+		pr_verbose(LOG_DEFAULT, "\tsuper_errors: %lld\n", sp->super_errors);
+		pr_verbose(LOG_DEFAULT, "\tmalloc_errors: %lld\n", sp->malloc_errors);
+		pr_verbose(LOG_DEFAULT, "\tuncorrectable_errors: %lld\n", sp->uncorrectable_errors);
+		pr_verbose(LOG_DEFAULT, "\tunverified_errors: %lld\n", sp->unverified_errors);
+		pr_verbose(LOG_DEFAULT, "\tcorrected_errors: %lld\n", sp->corrected_errors);
+		pr_verbose(LOG_DEFAULT, "\tlast_physical: %lld\n", sp->last_physical);
+	}
 }
 
 #define PRINT_SCRUB_ERROR(test, desc) do {	\
@@ -157,6 +213,8 @@  static void print_scrub_summary(struct btrfs_scrub_progress *p, struct scrub_sta
 	u64 sec_left = 0;
 	time_t sec_eta;
 
+	const bool json = (bconf.output_format == CMD_FORMAT_JSON);
+
 	bytes_scrubbed = p->data_bytes_scrubbed + p->tree_bytes_scrubbed;
 	/*
 	 * If duration is zero seconds (rounded down), then the Rate metric
@@ -177,8 +235,13 @@  static void print_scrub_summary(struct btrfs_scrub_progress *p, struct scrub_sta
 
 	err_cnt2 = p->corrected_errors + p->uncorrectable_errors;
 
+	if (json) {
+		fmt_print_start_group(&fctx, "scrub", JSON_TYPE_MAP);
+	}
+
 	if (p->malloc_errors)
-		pr_verbose(LOG_DEFAULT, "*** WARNING: memory allocation failed while scrubbing. "
+		if (!json) 
+			pr_verbose(LOG_DEFAULT, "*** WARNING: memory allocation failed while scrubbing. "
 		       "results may be inaccurate\n");
 
 	if (s->in_progress) {
@@ -191,44 +254,81 @@  static void print_scrub_summary(struct btrfs_scrub_progress *p, struct scrub_sta
 		t[sizeof(t) - 1] = '\0';
 		strftime(t, sizeof(t), "%c", &tm);
 
-		pr_verbose(LOG_DEFAULT, "Time left:        %llu:%02llu:%02llu\n",
-			sec_left / 3600, (sec_left / 60) % 60, sec_left % 60);
-		pr_verbose(LOG_DEFAULT, "ETA:              %s\n", t);
-		pr_verbose(LOG_DEFAULT, "Total to scrub:   %s\n",
-			pretty_size_mode(bytes_total, unit_mode));
-		pr_verbose(LOG_DEFAULT, "Bytes scrubbed:   %s  (%.2f%%)\n",
-			pretty_size_mode(bytes_scrubbed, unit_mode),
-			100.0 * bytes_scrubbed / bytes_total);
+		if (json) {
+			fmt_print(&fctx, "time_left", sec_left / 3600, (sec_left / 60) % 60, sec_left % 60);
+			fmt_print(&fctx, "eta", t);
+			fmt_print(&fctx, "total_bytes_to_scrub", bytes_total);
+			fmt_print(&fctx, "bytes_scrubbed", bytes_scrubbed);
+		}
+		else
+		{
+			pr_verbose(LOG_DEFAULT, "Time left:        %llu:%02llu:%02llu\n",
+				sec_left / 3600, (sec_left / 60) % 60, sec_left % 60);
+			pr_verbose(LOG_DEFAULT, "ETA:              %s\n", t);
+			pr_verbose(LOG_DEFAULT, "Total to scrub:   %s\n",
+				pretty_size_mode(bytes_total, unit_mode));
+			pr_verbose(LOG_DEFAULT, "Bytes scrubbed:   %s  (%.2f%%)\n",
+				pretty_size_mode(bytes_scrubbed, unit_mode),
+				100.0 * bytes_scrubbed / bytes_total);
+
+		}
 	} else {
-		pr_verbose(LOG_DEFAULT, "Total to scrub:   %s\n",
-			pretty_size_mode(bytes_total, unit_mode));
+		if (json) 
+			fmt_print(&fctx, "total_bytes_to_scrub", bytes_total);
+		else
+			pr_verbose(LOG_DEFAULT, "Total to scrub:   %s\n",
+				pretty_size_mode(bytes_total, unit_mode));
 	}
 	/*
 	 * Rate and size units are disproportionate so they are affected only
 	 * by --raw, otherwise it's human readable
 	 */
-	
-	pr_verbose(LOG_DEFAULT, "Rate:             %s/s",
-		pretty_size_mode(bytes_per_sec, unit_mode));
-	if (limit > 1)
-		pr_verbose(LOG_DEFAULT, " (limit %s/s)",
-			pretty_size_mode(limit, unit_mode));
-	else if (limit == 1)
-		pr_verbose(LOG_DEFAULT, " (some device limits set)");
-	pr_verbose(LOG_DEFAULT, "\n");
-
-	pr_verbose(LOG_DEFAULT, "Error summary:   ");
-	if (err_cnt || err_cnt2) {
-		PRINT_SCRUB_ERROR(p->read_errors, "read");
-		PRINT_SCRUB_ERROR(p->super_errors, "super");
-		PRINT_SCRUB_ERROR(p->verify_errors, "verify");
-		PRINT_SCRUB_ERROR(p->csum_errors, "csum");
-		pr_verbose(LOG_DEFAULT, "\n");
-		pr_verbose(LOG_DEFAULT, "  Corrected:      %llu\n", p->corrected_errors);
-		pr_verbose(LOG_DEFAULT, "  Uncorrectable:  %llu\n", p->uncorrectable_errors);
-		pr_verbose(LOG_DEFAULT, "  Unverified:     %llu\n", p->unverified_errors);
+	if (json) {
+		fmt_print(&fctx, "rate", bytes_per_sec);
+		if (limit > 1)
+			fmt_print(&fctx, "limit", limit);
 	} else {
-		pr_verbose(LOG_DEFAULT, " no errors found\n");
+		pr_verbose(LOG_DEFAULT, "Rate:             %s/s",
+			pretty_size_mode(bytes_per_sec, unit_mode));
+		if (limit > 1)
+			pr_verbose(LOG_DEFAULT, " (limit %s/s)",
+				pretty_size_mode(limit, unit_mode));
+		else if (limit == 1)
+			pr_verbose(LOG_DEFAULT, " (some device limits set)");
+		pr_verbose(LOG_DEFAULT, "\n");
+	}
+
+	if (json) {
+		if (err_cnt || err_cnt2) {
+			fmt_print_start_group(&fctx, "error-summary", JSON_TYPE_MAP);
+			fmt_print(&fctx, "read_errors", p->read_errors);
+			fmt_print(&fctx, "super_errors", p->super_errors);
+			fmt_print(&fctx, "verify_errors", p->verify_errors);
+			fmt_print(&fctx, "csum_errors", p->csum_errors);
+			fmt_print(&fctx, "corrected_errors", p->corrected_errors);
+			fmt_print(&fctx, "uncorrectable_errors", p->uncorrectable_errors);
+			fmt_print(&fctx, "unverified_errors", p->unverified_errors);
+			fmt_print_end_group(&fctx, "error-summary");
+		}
+	}
+	else {
+		pr_verbose(LOG_DEFAULT, "Error summary:   ");
+		if (err_cnt || err_cnt2) {
+			PRINT_SCRUB_ERROR(p->read_errors, "read");
+			PRINT_SCRUB_ERROR(p->super_errors, "super");
+			PRINT_SCRUB_ERROR(p->verify_errors, "verify");
+			PRINT_SCRUB_ERROR(p->csum_errors, "csum");
+			pr_verbose(LOG_DEFAULT, "\n");
+			pr_verbose(LOG_DEFAULT, "  Corrected:      %llu\n", p->corrected_errors);
+			pr_verbose(LOG_DEFAULT, "  Uncorrectable:  %llu\n", p->uncorrectable_errors);
+			pr_verbose(LOG_DEFAULT, "  Unverified:     %llu\n", p->unverified_errors);
+		} else {
+			pr_verbose(LOG_DEFAULT, " no errors found\n");
+		}
+	}
+
+	if (json) {
+		fmt_print_end_group(&fctx, "scrub");
 	}
 }
 
@@ -298,32 +398,57 @@  static void _print_scrub_ss(struct scrub_stats *ss)
 	struct tm tm;
 	time_t seconds;
 	unsigned hours;
+	char *status;
 
-	if (!ss || !ss->t_start) {
+	const bool json_output = (bconf.output_format == CMD_FORMAT_JSON);
+
+	if ((!ss || !ss->t_start) && !json_output) {
 		pr_verbose(LOG_DEFAULT, "\tno stats available\n");
 		return;
 	}
+
+	if (json_output)
+		fmt_print_start_group(&fctx, "info", JSON_TYPE_MAP);
+
 	if (ss->t_resumed) {
 		localtime_r(&ss->t_resumed, &tm);
 		strftime(t, sizeof(t), "%c", &tm);
 		t[sizeof(t) - 1] = '\0';
-		pr_verbose(LOG_DEFAULT, "Scrub resumed:    %s\n", t);
+
+		if (json_output)
+			fmt_print(&fctx, "resumed_at", t);
+		else 
+			pr_verbose(LOG_DEFAULT, "Scrub resumed:    %s\n", t);
 	} else {
 		localtime_r(&ss->t_start, &tm);
 		strftime(t, sizeof(t), "%c", &tm);
 		t[sizeof(t) - 1] = '\0';
-		pr_verbose(LOG_DEFAULT, "Scrub started:    %s\n", t);
+
+		if (json_output)
+			fmt_print(&fctx, "started_at", t);
+		else
+			pr_verbose(LOG_DEFAULT, "Scrub started:    %s\n", t);
 	}
 
 	seconds = ss->duration;
 	hours = ss->duration / (60 * 60);
 	gmtime_r(&seconds, &tm);
 	strftime(t, sizeof(t), "%M:%S", &tm);
-	pr_verbose(LOG_DEFAULT, "Status:           %s\n",
-			(ss->in_progress ? "running" :
+
+	status = (ss->in_progress ? "running" :
 			 (ss->canceled ? "aborted" :
-			  (ss->finished ? "finished" : "interrupted"))));
-	pr_verbose(LOG_DEFAULT, "Duration:         %u:%s\n", hours, t);
+			  (ss->finished ? "finished" : "interrupted")));
+	
+
+	if (json_output) {
+		fmt_print(&fctx, "status", status);
+		fmt_print(&fctx, "duration", hours, t);
+		fmt_print_end_group(&fctx, "info");
+	} else {
+		pr_verbose(LOG_DEFAULT, "Status:           %s\n", status);
+		pr_verbose(LOG_DEFAULT, "Duration:         %u:%s\n", hours, t);
+	}
+
 }
 
 static void print_scrub_dev(struct btrfs_ioctl_dev_info_args *di,
@@ -1818,6 +1943,8 @@  static int cmd_scrub_status(const struct cmd_struct *cmd, int argc, char **argv)
 	int fdres = -1;
 	int err = 0;
 
+	const bool json_output = (bconf.output_format == CMD_FORMAT_JSON);
+
 	unit_mode = get_unit_mode_from_arg(&argc, argv, 0);
 
 	optind = 0;
@@ -1896,7 +2023,13 @@  static int cmd_scrub_status(const struct cmd_struct *cmd, int argc, char **argv)
 	}
 	in_progress = is_scrub_running_in_kernel(fdmnt, di_args, fi_args.num_devices);
 
-	pr_verbose(LOG_DEFAULT, "UUID:             %s\n", fsid);
+
+	if (json_output) {
+		fmt_start(&fctx, scrub_status_rowspec, 1, 0);
+		fmt_print_start_group(&fctx, "scrub-status", JSON_TYPE_MAP);
+		fmt_print(&fctx, "uuid", fsid);
+	} else 
+		pr_verbose(LOG_DEFAULT, "UUID:             %s\n", fsid);
 
 	if (do_stats_per_dev) {
 		for (i = 0; i < fi_args.num_devices; ++i) {
@@ -1943,10 +2076,16 @@  static int cmd_scrub_status(const struct cmd_struct *cmd, int argc, char **argv)
 			/* This is still slightly off for RAID56 */
 			total_bytes_used += sp->used_bytes * factor;
 		}
+
 		print_fs_stat(&fs_stat, print_raw, total_bytes_used,
 			      fi_args.num_devices, limit);
 	}
 
+	if (json_output) {
+		fmt_print_end_group(&fctx, "scrub-status");
+		fmt_end(&fctx);
+	}
+
 out:
 	free_history(past_scrubs);
 	free(di_args);
@@ -1957,7 +2096,7 @@  out:
 
 	return !!err;
 }
-static DEFINE_SIMPLE_COMMAND(scrub_status, "status");
+static DEFINE_COMMAND_WITH_FLAGS(scrub_status, "status", CMD_FORMAT_JSON);
 
 static const char * const cmd_scrub_limit_usage[] = {
 	"btrfs scrub limit [options] <path>",