Message ID | 20191206015021.31611-1-xiubli@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | ceph: add __send_request helper | expand |
On Thu, 2019-12-05 at 20:50 -0500, xiubli@redhat.com wrote: > From: Xiubo Li <xiubli@redhat.com> > > Signed-off-by: Xiubo Li <xiubli@redhat.com> > --- > fs/ceph/mds_client.c | 47 +++++++++++++++++++++++--------------------- > 1 file changed, 25 insertions(+), 22 deletions(-) > > diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c > index e47341da5a71..82dfc85b24ee 100644 > --- a/fs/ceph/mds_client.c > +++ b/fs/ceph/mds_client.c > @@ -2514,6 +2514,26 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc, > return 0; > } > > +/* > + * called under mdsc->mutex > + */ > +static int __send_request(struct ceph_mds_client *mdsc, > + struct ceph_mds_session *session, > + struct ceph_mds_request *req, > + bool drop_cap_releases) > +{ > + int err; > + > + err = __prepare_send_request(mdsc, req, session->s_mds, > + drop_cap_releases); > + if (!err) { > + ceph_msg_get(req->r_request); > + ceph_con_send(&session->s_con, req->r_request); > + } > + > + return err; > +} > + > /* > * send request, or put it on the appropriate wait list. > */ > @@ -2603,11 +2623,7 @@ static void __do_request(struct ceph_mds_client *mdsc, > if (req->r_request_started == 0) /* note request start time */ > req->r_request_started = jiffies; > > - err = __prepare_send_request(mdsc, req, mds, false); > - if (!err) { > - ceph_msg_get(req->r_request); > - ceph_con_send(&session->s_con, req->r_request); > - } > + err = __send_request(mdsc, session, req, false); > > out_session: > ceph_put_mds_session(session); > @@ -3210,7 +3226,6 @@ static void handle_session(struct ceph_mds_session *session, > return; > } > > - > /* > * called under session->mutex. > */ > @@ -3219,18 +3234,12 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc, > { > struct ceph_mds_request *req, *nreq; > struct rb_node *p; > - int err; > > dout("replay_unsafe_requests mds%d\n", session->s_mds); > > mutex_lock(&mdsc->mutex); > - list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) { > - err = __prepare_send_request(mdsc, req, session->s_mds, true); > - if (!err) { > - ceph_msg_get(req->r_request); > - ceph_con_send(&session->s_con, req->r_request); > - } > - } > + list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) > + __send_request(mdsc, session, req, true); > > /* > * also re-send old requests when MDS enters reconnect stage. So that MDS > @@ -3245,14 +3254,8 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc, > if (req->r_attempts == 0) > continue; /* only old requests */ > if (req->r_session && > - req->r_session->s_mds == session->s_mds) { > - err = __prepare_send_request(mdsc, req, > - session->s_mds, true); > - if (!err) { > - ceph_msg_get(req->r_request); > - ceph_con_send(&session->s_con, req->r_request); > - } > - } > + req->r_session->s_mds == session->s_mds) > + __send_request(mdsc, session, req, true); > } > mutex_unlock(&mdsc->mutex); > } Merged into ceph-client/testing.
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index e47341da5a71..82dfc85b24ee 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -2514,6 +2514,26 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc, return 0; } +/* + * called under mdsc->mutex + */ +static int __send_request(struct ceph_mds_client *mdsc, + struct ceph_mds_session *session, + struct ceph_mds_request *req, + bool drop_cap_releases) +{ + int err; + + err = __prepare_send_request(mdsc, req, session->s_mds, + drop_cap_releases); + if (!err) { + ceph_msg_get(req->r_request); + ceph_con_send(&session->s_con, req->r_request); + } + + return err; +} + /* * send request, or put it on the appropriate wait list. */ @@ -2603,11 +2623,7 @@ static void __do_request(struct ceph_mds_client *mdsc, if (req->r_request_started == 0) /* note request start time */ req->r_request_started = jiffies; - err = __prepare_send_request(mdsc, req, mds, false); - if (!err) { - ceph_msg_get(req->r_request); - ceph_con_send(&session->s_con, req->r_request); - } + err = __send_request(mdsc, session, req, false); out_session: ceph_put_mds_session(session); @@ -3210,7 +3226,6 @@ static void handle_session(struct ceph_mds_session *session, return; } - /* * called under session->mutex. */ @@ -3219,18 +3234,12 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc, { struct ceph_mds_request *req, *nreq; struct rb_node *p; - int err; dout("replay_unsafe_requests mds%d\n", session->s_mds); mutex_lock(&mdsc->mutex); - list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) { - err = __prepare_send_request(mdsc, req, session->s_mds, true); - if (!err) { - ceph_msg_get(req->r_request); - ceph_con_send(&session->s_con, req->r_request); - } - } + list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) + __send_request(mdsc, session, req, true); /* * also re-send old requests when MDS enters reconnect stage. So that MDS @@ -3245,14 +3254,8 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc, if (req->r_attempts == 0) continue; /* only old requests */ if (req->r_session && - req->r_session->s_mds == session->s_mds) { - err = __prepare_send_request(mdsc, req, - session->s_mds, true); - if (!err) { - ceph_msg_get(req->r_request); - ceph_con_send(&session->s_con, req->r_request); - } - } + req->r_session->s_mds == session->s_mds) + __send_request(mdsc, session, req, true); } mutex_unlock(&mdsc->mutex); }