@@ -651,6 +651,9 @@ static int ceph_sync_writepages(struct ceph_fs_client *fsc,
if (!rc)
rc = ceph_osdc_wait_request(osdc, req);
+ ceph_update_write_latency(&fsc->mdsc->metric, req->r_start_stamp,
+ req->r_end_stamp, rc);
+
ceph_osdc_put_request(req);
if (rc == 0)
rc = len;
@@ -802,6 +805,9 @@ static void writepages_finish(struct ceph_osd_request *req)
ceph_clear_error_write(ci);
}
+ ceph_update_write_latency(&fsc->mdsc->metric, req->r_start_stamp,
+ req->r_end_stamp, rc);
+
/*
* We lost the cache cap, need to truncate the page before
* it is unlocked, otherwise we'd truncate it later in the
@@ -1860,6 +1866,10 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
if (!err)
err = ceph_osdc_wait_request(&fsc->client->osdc, req);
+
+ ceph_update_write_latency(&fsc->mdsc->metric, req->r_start_stamp,
+ req->r_end_stamp, err);
+
out_put:
ceph_osdc_put_request(req);
if (err == -ECANCELED)
@@ -140,6 +140,12 @@ static int metric_show(struct seq_file *s, void *p)
avg = total ? sum / total : 0;
seq_printf(s, "%-14s%-12lld%-16lld%lld\n", "read", total, sum, avg);
+ total = percpu_counter_sum(&mdsc->metric.total_writes);
+ sum = percpu_counter_sum(&mdsc->metric.write_latency_sum);
+ sum = jiffies_to_usecs(sum);
+ avg = total ? sum / total : 0;
+ seq_printf(s, "%-14s%-12lld%-16lld%lld\n", "write", total, sum, avg);
+
seq_printf(s, "\n");
seq_printf(s, "item total miss hit\n");
seq_printf(s, "-------------------------------------------------\n");
@@ -1058,9 +1058,14 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
inode, rc, osd_data->bvec_pos.iter.bi_size);
/* r_start_stamp == 0 means the request was not submitted */
- if (req->r_start_stamp && !aio_req->write)
- ceph_update_read_latency(metric, req->r_start_stamp,
- req->r_end_stamp, rc);
+ if (req->r_start_stamp) {
+ if (aio_req->write)
+ ceph_update_write_latency(metric, req->r_start_stamp,
+ req->r_end_stamp, rc);
+ else
+ ceph_update_read_latency(metric, req->r_start_stamp,
+ req->r_end_stamp, rc);
+ }
if (rc == -EOLDSNAPC) {
struct ceph_aio_work *aio_work;
@@ -1307,7 +1312,10 @@ static void ceph_aio_retry_work(struct work_struct *work)
if (!ret)
ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
- if (!write)
+ if (write)
+ ceph_update_write_latency(metric, req->r_start_stamp,
+ req->r_end_stamp, ret);
+ else
ceph_update_read_latency(metric, req->r_start_stamp,
req->r_end_stamp, ret);
@@ -1482,6 +1490,8 @@ static void ceph_aio_retry_work(struct work_struct *work)
if (!ret)
ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
+ ceph_update_write_latency(&fsc->mdsc->metric, req->r_start_stamp,
+ req->r_end_stamp, ret);
out:
ceph_osdc_put_request(req);
if (ret != 0) {
@@ -4353,8 +4353,20 @@ static int ceph_mdsc_metric_init(struct ceph_client_metric *metric)
if (ret)
goto err_read_latency_sum;
+ ret = percpu_counter_init(&metric->total_writes, 0, GFP_KERNEL);
+ if (ret)
+ goto err_total_writes;
+
+ ret = percpu_counter_init(&metric->write_latency_sum, 0, GFP_KERNEL);
+ if (ret)
+ goto err_write_latency_sum;
+
return 0;
+err_write_latency_sum:
+ percpu_counter_destroy(&metric->total_writes);
+err_total_writes:
+ percpu_counter_destroy(&metric->read_latency_sum);
err_read_latency_sum:
percpu_counter_destroy(&metric->total_reads);
err_total_reads:
@@ -4706,6 +4718,8 @@ void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
ceph_mdsc_stop(mdsc);
+ percpu_counter_destroy(&mdsc->metric.write_latency_sum);
+ percpu_counter_destroy(&mdsc->metric.total_writes);
percpu_counter_destroy(&mdsc->metric.read_latency_sum);
percpu_counter_destroy(&mdsc->metric.total_reads);
percpu_counter_destroy(&mdsc->metric.i_caps_mis);
@@ -13,6 +13,9 @@ struct ceph_client_metric {
struct percpu_counter total_reads;
struct percpu_counter read_latency_sum;
+
+ struct percpu_counter total_writes;
+ struct percpu_counter write_latency_sum;
};
static inline void ceph_update_cap_hit(struct ceph_client_metric *m)
@@ -36,4 +39,16 @@ static inline void ceph_update_read_latency(struct ceph_client_metric *m,
percpu_counter_inc(&m->total_reads);
percpu_counter_add(&m->read_latency_sum, r_end - r_start);
}
+
+static inline void ceph_update_write_latency(struct ceph_client_metric *m,
+ unsigned long r_start,
+ unsigned long r_end,
+ int rc)
+{
+ if (rc && rc != -ETIMEDOUT)
+ return;
+
+ percpu_counter_inc(&m->total_writes);
+ percpu_counter_add(&m->write_latency_sum, r_end - r_start);
+}
#endif /* _FS_CEPH_MDS_METRIC_H */