diff mbox

[linux-cifs-client,2/4] cifs: iterate over cifs_oplock_list using list_for_each_entry_safe

Message ID 1250511368-9812-3-git-send-email-jlayton@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Jeff Layton Aug. 17, 2009, 12:16 p.m. UTC
Removing an entry from a list while you're iterating over it can be
problematic and racy, so use the _safe version of the list iteration
routine in DeleteTconOplockQEntries.

Also restructure the oplock_thread loop to make sure that if a
kthread_stop races in just as the thread goes to sleep, then it won't
just sit there for 39s.

Finally, remove DeleteOplockQEntry(). It's only called from one place
and we can avoid a function call this way.

Signed-off-by: Jeff Layton <jlayton@redhat.com>
---
 fs/cifs/cifsfs.c    |   37 +++++++++++++++++++++++--------------
 fs/cifs/cifsproto.h |    1 -
 fs/cifs/transport.c |   23 +++++------------------
 3 files changed, 28 insertions(+), 33 deletions(-)

Comments

Shirish Pargaonkar Aug. 17, 2009, 3:09 p.m. UTC | #1
On Mon, Aug 17, 2009 at 7:16 AM, Jeff Layton<jlayton@redhat.com> wrote:
> Removing an entry from a list while you're iterating over it can be
> problematic and racy, so use the _safe version of the list iteration
> routine in DeleteTconOplockQEntries.
>
> Also restructure the oplock_thread loop to make sure that if a
> kthread_stop races in just as the thread goes to sleep, then it won't
> just sit there for 39s.
>
> Finally, remove DeleteOplockQEntry(). It's only called from one place
> and we can avoid a function call this way.
>
> Signed-off-by: Jeff Layton <jlayton@redhat.com>
> ---
>  fs/cifs/cifsfs.c    |   37 +++++++++++++++++++++++--------------
>  fs/cifs/cifsproto.h |    1 -
>  fs/cifs/transport.c |   23 +++++------------------
>  3 files changed, 28 insertions(+), 33 deletions(-)
>
> diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
> index 0d4e0da..ab4b373 100644
> --- a/fs/cifs/cifsfs.c
> +++ b/fs/cifs/cifsfs.c
> @@ -976,7 +976,7 @@ cifs_destroy_mids(void)
>  static int cifs_oplock_thread(void *dummyarg)
>  {
>        struct oplock_q_entry *oplock_item;
> -       struct cifsTconInfo *pTcon;
> +       struct cifsTconInfo *tcon;
>        struct inode *inode;
>        __u16  netfid;
>        int rc, waitrc = 0;
> @@ -986,20 +986,24 @@ static int cifs_oplock_thread(void *dummyarg)
>                if (try_to_freeze())
>                        continue;
>
> +               /*
> +                * can't reasonably use list_for_each macros here. It's
> +                * possible that another thread could come along and remove
> +                * some of the entries while the lock is released. It's fine
> +                * though since we're just popping one off the head on each
> +                * iteration anyway.
> +                */
>                mutex_lock(&cifs_oplock_mutex);
> -               if (list_empty(&cifs_oplock_list)) {
> -                       mutex_unlock(&cifs_oplock_mutex);
> -                       set_current_state(TASK_INTERRUPTIBLE);
> -                       schedule_timeout(39*HZ);
> -               } else {
> -                       oplock_item = list_entry(cifs_oplock_list.next,
> -                                               struct oplock_q_entry, qhead);
> +               while(!list_empty(&cifs_oplock_list)) {
>                        cFYI(1, ("found oplock item to write out"));
> -                       pTcon = oplock_item->tcon;
> +                       oplock_item = list_entry(cifs_oplock_list.next,
> +                                                struct oplock_q_entry, qhead);
> +                       tcon = oplock_item->tcon;
>                        inode = oplock_item->pinode;
>                        netfid = oplock_item->netfid;
> +                       list_del(&oplock_item->qhead);
> +                       kmem_cache_free(cifs_oplock_cachep, oplock_item);
>                        mutex_unlock(&cifs_oplock_mutex);

Is not very clear with changes and indentations, but we do take mutex
lock within the new while loop right?

> -                       DeleteOplockQEntry(oplock_item);
>                        /* can not grab inode sem here since it would
>                                deadlock when oplock received on delete
>                                since vfs_unlink holds the i_mutex across
> @@ -1034,16 +1038,21 @@ static int cifs_oplock_thread(void *dummyarg)
>                                not bother sending an oplock release if session
>                                to server still is disconnected since oplock
>                                already released by the server in that case */
> -                       if (!pTcon->need_reconnect) {
> -                               rc = CIFSSMBLock(0, pTcon, netfid,
> +                       if (!tcon->need_reconnect) {
> +                               rc = CIFSSMBLock(0, tcon, netfid,
>                                                0 /* len */ , 0 /* offset */, 0,
>                                                0, LOCKING_ANDX_OPLOCK_RELEASE,
>                                                false /* wait flag */);
>                                cFYI(1, ("Oplock release rc = %d", rc));
>                        }
> -                       set_current_state(TASK_INTERRUPTIBLE);
> -                       schedule_timeout(1);  /* yield in case q were corrupt */
> +                       mutex_lock(&cifs_oplock_mutex);
>                }
> +               mutex_unlock(&cifs_oplock_mutex);
> +               set_current_state(TASK_INTERRUPTIBLE);
> +               if (kthread_should_stop())
> +                       break;
> +               /* FIXME: why 39s here? Turn this into a #define constant? */
> +               schedule_timeout(39*HZ);
>        } while (!kthread_should_stop());
>
>        return 0;
> diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
> index da8fbf5..b7554a7 100644
> --- a/fs/cifs/cifsproto.h
> +++ b/fs/cifs/cifsproto.h
> @@ -88,7 +88,6 @@ extern int CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses,
>  extern __u16 GetNextMid(struct TCP_Server_Info *server);
>  extern struct oplock_q_entry *AllocOplockQEntry(struct inode *, u16,
>                                                 struct cifsTconInfo *);
> -extern void DeleteOplockQEntry(struct oplock_q_entry *);
>  extern void DeleteTconOplockQEntries(struct cifsTconInfo *);
>  extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
>  extern u64 cifs_UnixTimeToNT(struct timespec);
> diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
> index 92e1538..59f0e95 100644
> --- a/fs/cifs/transport.c
> +++ b/fs/cifs/transport.c
> @@ -126,28 +126,15 @@ AllocOplockQEntry(struct inode *pinode, __u16 fid, struct cifsTconInfo *tcon)
>        return temp;
>  }
>
> -void DeleteOplockQEntry(struct oplock_q_entry *oplockEntry)
> -{
> -       mutex_lock(&cifs_oplock_mutex);
> -    /* should we check if list empty first? */
> -       list_del(&oplockEntry->qhead);
> -       mutex_unlock(&cifs_oplock_mutex);
> -       kmem_cache_free(cifs_oplock_cachep, oplockEntry);
> -}
> -
> -
>  void DeleteTconOplockQEntries(struct cifsTconInfo *tcon)
>  {
> -       struct oplock_q_entry *temp;
> -
> -       if (tcon == NULL)
> -               return;
> +       struct oplock_q_entry *entry, *next;
>
>        mutex_lock(&cifs_oplock_mutex);
> -       list_for_each_entry(temp, &cifs_oplock_list, qhead) {
> -               if ((temp->tcon) && (temp->tcon == tcon)) {
> -                       list_del(&temp->qhead);
> -                       kmem_cache_free(cifs_oplock_cachep, temp);
> +       list_for_each_entry_safe(entry, next, &cifs_oplock_list, qhead) {

Just a nitpick, since we do not use next anywhere, may be use NULL instead?

> +               if (entry->tcon && entry->tcon == tcon) {
> +                       list_del(&entry->qhead);
> +                       kmem_cache_free(cifs_oplock_cachep, entry);
>                }
>        }
>        mutex_unlock(&cifs_oplock_mutex);
> --
> 1.6.0.6
>
> _______________________________________________
> linux-cifs-client mailing list
> linux-cifs-client@lists.samba.org
> https://lists.samba.org/mailman/listinfo/linux-cifs-client
>
Jeff Layton Aug. 17, 2009, 3:46 p.m. UTC | #2
On Mon, 17 Aug 2009 10:09:56 -0500
Shirish Pargaonkar <shirishpargaonkar@gmail.com> wrote:

> On Mon, Aug 17, 2009 at 7:16 AM, Jeff Layton<jlayton@redhat.com> wrote:
> > Removing an entry from a list while you're iterating over it can be
> > problematic and racy, so use the _safe version of the list iteration
> > routine in DeleteTconOplockQEntries.
> >
> > Also restructure the oplock_thread loop to make sure that if a
> > kthread_stop races in just as the thread goes to sleep, then it won't
> > just sit there for 39s.
> >
> > Finally, remove DeleteOplockQEntry(). It's only called from one place
> > and we can avoid a function call this way.
> >
> > Signed-off-by: Jeff Layton <jlayton@redhat.com>
> > ---
> >  fs/cifs/cifsfs.c    |   37 +++++++++++++++++++++++--------------
> >  fs/cifs/cifsproto.h |    1 -
> >  fs/cifs/transport.c |   23 +++++------------------
> >  3 files changed, 28 insertions(+), 33 deletions(-)
> >
> > diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
> > index 0d4e0da..ab4b373 100644
> > --- a/fs/cifs/cifsfs.c
> > +++ b/fs/cifs/cifsfs.c
> > @@ -976,7 +976,7 @@ cifs_destroy_mids(void)
> >  static int cifs_oplock_thread(void *dummyarg)
> >  {
> >        struct oplock_q_entry *oplock_item;
> > -       struct cifsTconInfo *pTcon;
> > +       struct cifsTconInfo *tcon;
> >        struct inode *inode;
> >        __u16  netfid;
> >        int rc, waitrc = 0;
> > @@ -986,20 +986,24 @@ static int cifs_oplock_thread(void *dummyarg)
> >                if (try_to_freeze())
> >                        continue;
> >
> > +               /*
> > +                * can't reasonably use list_for_each macros here. It's
> > +                * possible that another thread could come along and remove
> > +                * some of the entries while the lock is released. It's fine
> > +                * though since we're just popping one off the head on each
> > +                * iteration anyway.
> > +                */
> >                mutex_lock(&cifs_oplock_mutex);
> > -               if (list_empty(&cifs_oplock_list)) {
> > -                       mutex_unlock(&cifs_oplock_mutex);
> > -                       set_current_state(TASK_INTERRUPTIBLE);
> > -                       schedule_timeout(39*HZ);
> > -               } else {
> > -                       oplock_item = list_entry(cifs_oplock_list.next,
> > -                                               struct oplock_q_entry, qhead);
> > +               while(!list_empty(&cifs_oplock_list)) {
> >                        cFYI(1, ("found oplock item to write out"));
> > -                       pTcon = oplock_item->tcon;
> > +                       oplock_item = list_entry(cifs_oplock_list.next,
> > +                                                struct oplock_q_entry, qhead);
> > +                       tcon = oplock_item->tcon;
> >                        inode = oplock_item->pinode;
> >                        netfid = oplock_item->netfid;
> > +                       list_del(&oplock_item->qhead);
> > +                       kmem_cache_free(cifs_oplock_cachep, oplock_item);
> >                        mutex_unlock(&cifs_oplock_mutex);
> 
> Is not very clear with changes and indentations, but we do take mutex
> lock within the new while loop right?
> 

Right. In a later patch, I have the thread hold the mutex while the
call goes out on the wire as well (in order to prevent the tcon from
getting ripped out from under it).

> > -                       DeleteOplockQEntry(oplock_item);
> >                        /* can not grab inode sem here since it would
> >                                deadlock when oplock received on delete
> >                                since vfs_unlink holds the i_mutex across
> > @@ -1034,16 +1038,21 @@ static int cifs_oplock_thread(void *dummyarg)
> >                                not bother sending an oplock release if session
> >                                to server still is disconnected since oplock
> >                                already released by the server in that case */
> > -                       if (!pTcon->need_reconnect) {
> > -                               rc = CIFSSMBLock(0, pTcon, netfid,
> > +                       if (!tcon->need_reconnect) {
> > +                               rc = CIFSSMBLock(0, tcon, netfid,
> >                                                0 /* len */ , 0 /* offset */, 0,
> >                                                0, LOCKING_ANDX_OPLOCK_RELEASE,
> >                                                false /* wait flag */);
> >                                cFYI(1, ("Oplock release rc = %d", rc));
> >                        }
> > -                       set_current_state(TASK_INTERRUPTIBLE);
> > -                       schedule_timeout(1);  /* yield in case q were corrupt */
> > +                       mutex_lock(&cifs_oplock_mutex);
> >                }
> > +               mutex_unlock(&cifs_oplock_mutex);
> > +               set_current_state(TASK_INTERRUPTIBLE);
> > +               if (kthread_should_stop())
> > +                       break;
> > +               /* FIXME: why 39s here? Turn this into a #define constant? */
> > +               schedule_timeout(39*HZ);
> >        } while (!kthread_should_stop());
> >
> >        return 0;
> > diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
> > index da8fbf5..b7554a7 100644
> > --- a/fs/cifs/cifsproto.h
> > +++ b/fs/cifs/cifsproto.h
> > @@ -88,7 +88,6 @@ extern int CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses,
> >  extern __u16 GetNextMid(struct TCP_Server_Info *server);
> >  extern struct oplock_q_entry *AllocOplockQEntry(struct inode *, u16,
> >                                                 struct cifsTconInfo *);
> > -extern void DeleteOplockQEntry(struct oplock_q_entry *);
> >  extern void DeleteTconOplockQEntries(struct cifsTconInfo *);
> >  extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
> >  extern u64 cifs_UnixTimeToNT(struct timespec);
> > diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
> > index 92e1538..59f0e95 100644
> > --- a/fs/cifs/transport.c
> > +++ b/fs/cifs/transport.c
> > @@ -126,28 +126,15 @@ AllocOplockQEntry(struct inode *pinode, __u16 fid, struct cifsTconInfo *tcon)
> >        return temp;
> >  }
> >
> > -void DeleteOplockQEntry(struct oplock_q_entry *oplockEntry)
> > -{
> > -       mutex_lock(&cifs_oplock_mutex);
> > -    /* should we check if list empty first? */
> > -       list_del(&oplockEntry->qhead);
> > -       mutex_unlock(&cifs_oplock_mutex);
> > -       kmem_cache_free(cifs_oplock_cachep, oplockEntry);
> > -}
> > -
> > -
> >  void DeleteTconOplockQEntries(struct cifsTconInfo *tcon)
> >  {
> > -       struct oplock_q_entry *temp;
> > -
> > -       if (tcon == NULL)
> > -               return;
> > +       struct oplock_q_entry *entry, *next;
> >
> >        mutex_lock(&cifs_oplock_mutex);
> > -       list_for_each_entry(temp, &cifs_oplock_list, qhead) {
> > -               if ((temp->tcon) && (temp->tcon == tcon)) {
> > -                       list_del(&temp->qhead);
> > -                       kmem_cache_free(cifs_oplock_cachep, temp);
> > +       list_for_each_entry_safe(entry, next, &cifs_oplock_list, qhead) {
> 
> Just a nitpick, since we do not use next anywhere, may be use NULL instead?
> 

No. The "next" pointer is used internally by the
list_for_each_entry_safe macro. It's not safe to use
list_for_each_entry if you're removing the entry from the list and
freeing it within the loop. The _safe versions of those routines use
dereference the "next" pointer first, so that it doesn't have to touch
the original entry on the next pass.

> > +               if (entry->tcon && entry->tcon == tcon) {
> > +                       list_del(&entry->qhead);
> > +                       kmem_cache_free(cifs_oplock_cachep, entry);
> >                }
> >        }
> >        mutex_unlock(&cifs_oplock_mutex);
> > --
> > 1.6.0.6
> >
> > _______________________________________________
> > linux-cifs-client mailing list
> > linux-cifs-client@lists.samba.org
> > https://lists.samba.org/mailman/listinfo/linux-cifs-client
> >
diff mbox

Patch

diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 0d4e0da..ab4b373 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -976,7 +976,7 @@  cifs_destroy_mids(void)
 static int cifs_oplock_thread(void *dummyarg)
 {
 	struct oplock_q_entry *oplock_item;
-	struct cifsTconInfo *pTcon;
+	struct cifsTconInfo *tcon;
 	struct inode *inode;
 	__u16  netfid;
 	int rc, waitrc = 0;
@@ -986,20 +986,24 @@  static int cifs_oplock_thread(void *dummyarg)
 		if (try_to_freeze())
 			continue;
 
+		/*
+		 * can't reasonably use list_for_each macros here. It's
+		 * possible that another thread could come along and remove
+		 * some of the entries while the lock is released. It's fine
+		 * though since we're just popping one off the head on each
+		 * iteration anyway.
+		 */
 		mutex_lock(&cifs_oplock_mutex);
-		if (list_empty(&cifs_oplock_list)) {
-			mutex_unlock(&cifs_oplock_mutex);
-			set_current_state(TASK_INTERRUPTIBLE);
-			schedule_timeout(39*HZ);
-		} else {
-			oplock_item = list_entry(cifs_oplock_list.next,
-						struct oplock_q_entry, qhead);
+		while(!list_empty(&cifs_oplock_list)) {
 			cFYI(1, ("found oplock item to write out"));
-			pTcon = oplock_item->tcon;
+			oplock_item = list_entry(cifs_oplock_list.next,
+						 struct oplock_q_entry, qhead);
+			tcon = oplock_item->tcon;
 			inode = oplock_item->pinode;
 			netfid = oplock_item->netfid;
+			list_del(&oplock_item->qhead);
+			kmem_cache_free(cifs_oplock_cachep, oplock_item);
 			mutex_unlock(&cifs_oplock_mutex);
-			DeleteOplockQEntry(oplock_item);
 			/* can not grab inode sem here since it would
 				deadlock when oplock received on delete
 				since vfs_unlink holds the i_mutex across
@@ -1034,16 +1038,21 @@  static int cifs_oplock_thread(void *dummyarg)
 				not bother sending an oplock release if session
 				to server still is disconnected since oplock
 				already released by the server in that case */
-			if (!pTcon->need_reconnect) {
-				rc = CIFSSMBLock(0, pTcon, netfid,
+			if (!tcon->need_reconnect) {
+				rc = CIFSSMBLock(0, tcon, netfid,
 						0 /* len */ , 0 /* offset */, 0,
 						0, LOCKING_ANDX_OPLOCK_RELEASE,
 						false /* wait flag */);
 				cFYI(1, ("Oplock release rc = %d", rc));
 			}
-			set_current_state(TASK_INTERRUPTIBLE);
-			schedule_timeout(1);  /* yield in case q were corrupt */
+			mutex_lock(&cifs_oplock_mutex);
 		}
+		mutex_unlock(&cifs_oplock_mutex);
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (kthread_should_stop())
+			break;
+		/* FIXME: why 39s here? Turn this into a #define constant? */
+		schedule_timeout(39*HZ);
 	} while (!kthread_should_stop());
 
 	return 0;
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index da8fbf5..b7554a7 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -88,7 +88,6 @@  extern int CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses,
 extern __u16 GetNextMid(struct TCP_Server_Info *server);
 extern struct oplock_q_entry *AllocOplockQEntry(struct inode *, u16,
 						 struct cifsTconInfo *);
-extern void DeleteOplockQEntry(struct oplock_q_entry *);
 extern void DeleteTconOplockQEntries(struct cifsTconInfo *);
 extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
 extern u64 cifs_UnixTimeToNT(struct timespec);
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 92e1538..59f0e95 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -126,28 +126,15 @@  AllocOplockQEntry(struct inode *pinode, __u16 fid, struct cifsTconInfo *tcon)
 	return temp;
 }
 
-void DeleteOplockQEntry(struct oplock_q_entry *oplockEntry)
-{
-	mutex_lock(&cifs_oplock_mutex);
-    /* should we check if list empty first? */
-	list_del(&oplockEntry->qhead);
-	mutex_unlock(&cifs_oplock_mutex);
-	kmem_cache_free(cifs_oplock_cachep, oplockEntry);
-}
-
-
 void DeleteTconOplockQEntries(struct cifsTconInfo *tcon)
 {
-	struct oplock_q_entry *temp;
-
-	if (tcon == NULL)
-		return;
+	struct oplock_q_entry *entry, *next;
 
 	mutex_lock(&cifs_oplock_mutex);
-	list_for_each_entry(temp, &cifs_oplock_list, qhead) {
-		if ((temp->tcon) && (temp->tcon == tcon)) {
-			list_del(&temp->qhead);
-			kmem_cache_free(cifs_oplock_cachep, temp);
+	list_for_each_entry_safe(entry, next, &cifs_oplock_list, qhead) {
+		if (entry->tcon && entry->tcon == tcon) {
+			list_del(&entry->qhead);
+			kmem_cache_free(cifs_oplock_cachep, entry);
 		}
 	}
 	mutex_unlock(&cifs_oplock_mutex);