diff mbox series

[v2,1/4] mm/filemap: Add folio_lock_timeout()

Message ID 20230421151135.v2.1.I2b71e11264c5c214bc59744b9e13e4c353bc5714@changeid (mailing list archive)
State Mainlined, archived
Headers show
Series migrate: Avoid unbounded blocks in MIGRATE_SYNC_LIGHT | expand

Commit Message

Doug Anderson April 21, 2023, 10:12 p.m. UTC
Add a variant of folio_lock() that can timeout. This is useful to
avoid unbounded waits for the page lock in kcompactd.

Signed-off-by: Douglas Anderson <dianders@chromium.org>
---

Changes in v2:
- "Add folio_lock_timeout()" new for v2.

 include/linux/pagemap.h | 16 ++++++++++++++
 mm/filemap.c            | 47 +++++++++++++++++++++++++++++------------
 2 files changed, 50 insertions(+), 13 deletions(-)

Comments

Huang, Ying April 23, 2023, 7:50 a.m. UTC | #1
Douglas Anderson <dianders@chromium.org> writes:

> Add a variant of folio_lock() that can timeout. This is useful to
> avoid unbounded waits for the page lock in kcompactd.
>
> Signed-off-by: Douglas Anderson <dianders@chromium.org>
> ---
>
> Changes in v2:
> - "Add folio_lock_timeout()" new for v2.
>
>  include/linux/pagemap.h | 16 ++++++++++++++
>  mm/filemap.c            | 47 +++++++++++++++++++++++++++++------------
>  2 files changed, 50 insertions(+), 13 deletions(-)
>
> diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
> index 0acb8e1fb7af..0f3ef9f79300 100644
> --- a/include/linux/pagemap.h
> +++ b/include/linux/pagemap.h
> @@ -892,6 +892,7 @@ static inline bool wake_page_match(struct wait_page_queue *wait_page,
>  }
>  
>  void __folio_lock(struct folio *folio);
> +int __folio_lock_timeout(struct folio *folio, long timeout);
>  int __folio_lock_killable(struct folio *folio);
>  bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm,
>  				unsigned int flags);
> @@ -952,6 +953,21 @@ static inline void folio_lock(struct folio *folio)
>  		__folio_lock(folio);
>  }
>  
> +/**
> + * folio_lock_timeout() - Lock this folio, with a timeout.
> + * @folio: The folio to lock.
> + * @timeout: The timeout in jiffies; %MAX_SCHEDULE_TIMEOUT means wait forever.
> + *
> + * Return: 0 upon success; -ETIMEDOUT upon failure.

IIUC, the funtion may return -EINTR too.

Otherwise looks good to me.  Thanks!

Best Regards,
Huang, Ying

> + */
> +static inline int folio_lock_timeout(struct folio *folio, long timeout)
> +{
> +	might_sleep();
> +	if (!folio_trylock(folio))
> +		return __folio_lock_timeout(folio, timeout);
> +	return 0;
> +}
> +
>  /**
>   * lock_page() - Lock the folio containing this page.
>   * @page: The page to lock.
> diff --git a/mm/filemap.c b/mm/filemap.c
> index 2723104cc06a..c6056ec41284 100644
> --- a/mm/filemap.c
> +++ b/mm/filemap.c
> @@ -1220,7 +1220,7 @@ static inline bool folio_trylock_flag(struct folio *folio, int bit_nr,
>  int sysctl_page_lock_unfairness = 5;
>  
>  static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
> -		int state, enum behavior behavior)
> +		int state, enum behavior behavior, long timeout)
>  {
>  	wait_queue_head_t *q = folio_waitqueue(folio);
>  	int unfairness = sysctl_page_lock_unfairness;
> @@ -1229,6 +1229,7 @@ static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
>  	bool thrashing = false;
>  	unsigned long pflags;
>  	bool in_thrashing;
> +	int err;
>  
>  	if (bit_nr == PG_locked &&
>  	    !folio_test_uptodate(folio) && folio_test_workingset(folio)) {
> @@ -1295,10 +1296,13 @@ static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
>  		/* Loop until we've been woken or interrupted */
>  		flags = smp_load_acquire(&wait->flags);
>  		if (!(flags & WQ_FLAG_WOKEN)) {
> +			if (!timeout)
> +				break;
> +
>  			if (signal_pending_state(state, current))
>  				break;
>  
> -			io_schedule();
> +			timeout = io_schedule_timeout(timeout);
>  			continue;
>  		}
>  
> @@ -1324,10 +1328,10 @@ static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
>  	}
>  
>  	/*
> -	 * If a signal happened, this 'finish_wait()' may remove the last
> -	 * waiter from the wait-queues, but the folio waiters bit will remain
> -	 * set. That's ok. The next wakeup will take care of it, and trying
> -	 * to do it here would be difficult and prone to races.
> +	 * If a signal/timeout happened, this 'finish_wait()' may remove the
> +	 * last waiter from the wait-queues, but the folio waiters bit will
> +	 * remain set. That's ok. The next wakeup will take care of it, and
> +	 * trying to do it here would be difficult and prone to races.
>  	 */
>  	finish_wait(q, wait);
>  
> @@ -1336,6 +1340,13 @@ static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
>  		psi_memstall_leave(&pflags);
>  	}
>  
> +	/*
> +	 * If we don't meet the success criteria below then we've got an error
> +	 * of some sort. Differentiate between the two error cases. If there's
> +	 * no time left it must have been a timeout.
> +	 */
> +	err = !timeout ? -ETIMEDOUT : -EINTR;
> +
>  	/*
>  	 * NOTE! The wait->flags weren't stable until we've done the
>  	 * 'finish_wait()', and we could have exited the loop above due
> @@ -1350,9 +1361,9 @@ static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
>  	 * waiter, but an exclusive one requires WQ_FLAG_DONE.
>  	 */
>  	if (behavior == EXCLUSIVE)
> -		return wait->flags & WQ_FLAG_DONE ? 0 : -EINTR;
> +		return wait->flags & WQ_FLAG_DONE ? 0 : err;
>  
> -	return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR;
> +	return wait->flags & WQ_FLAG_WOKEN ? 0 : err;
>  }
>  
>  #ifdef CONFIG_MIGRATION
> @@ -1442,13 +1453,15 @@ void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
>  
>  void folio_wait_bit(struct folio *folio, int bit_nr)
>  {
> -	folio_wait_bit_common(folio, bit_nr, TASK_UNINTERRUPTIBLE, SHARED);
> +	folio_wait_bit_common(folio, bit_nr, TASK_UNINTERRUPTIBLE, SHARED,
> +			      MAX_SCHEDULE_TIMEOUT);
>  }
>  EXPORT_SYMBOL(folio_wait_bit);
>  
>  int folio_wait_bit_killable(struct folio *folio, int bit_nr)
>  {
> -	return folio_wait_bit_common(folio, bit_nr, TASK_KILLABLE, SHARED);
> +	return folio_wait_bit_common(folio, bit_nr, TASK_KILLABLE, SHARED,
> +				     MAX_SCHEDULE_TIMEOUT);
>  }
>  EXPORT_SYMBOL(folio_wait_bit_killable);
>  
> @@ -1467,7 +1480,8 @@ EXPORT_SYMBOL(folio_wait_bit_killable);
>   */
>  static int folio_put_wait_locked(struct folio *folio, int state)
>  {
> -	return folio_wait_bit_common(folio, PG_locked, state, DROP);
> +	return folio_wait_bit_common(folio, PG_locked, state, DROP,
> +				     MAX_SCHEDULE_TIMEOUT);
>  }
>  
>  /**
> @@ -1662,17 +1676,24 @@ EXPORT_SYMBOL_GPL(page_endio);
>  void __folio_lock(struct folio *folio)
>  {
>  	folio_wait_bit_common(folio, PG_locked, TASK_UNINTERRUPTIBLE,
> -				EXCLUSIVE);
> +				EXCLUSIVE, MAX_SCHEDULE_TIMEOUT);
>  }
>  EXPORT_SYMBOL(__folio_lock);
>  
>  int __folio_lock_killable(struct folio *folio)
>  {
>  	return folio_wait_bit_common(folio, PG_locked, TASK_KILLABLE,
> -					EXCLUSIVE);
> +					EXCLUSIVE, MAX_SCHEDULE_TIMEOUT);
>  }
>  EXPORT_SYMBOL_GPL(__folio_lock_killable);
>  
> +int __folio_lock_timeout(struct folio *folio, long timeout)
> +{
> +	return folio_wait_bit_common(folio, PG_locked, TASK_KILLABLE,
> +					EXCLUSIVE, timeout);
> +}
> +EXPORT_SYMBOL_GPL(__folio_lock_timeout);
> +
>  static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait)
>  {
>  	struct wait_queue_head *q = folio_waitqueue(folio);
Mel Gorman April 24, 2023, 8:22 a.m. UTC | #2
On Fri, Apr 21, 2023 at 03:12:45PM -0700, Douglas Anderson wrote:
> Add a variant of folio_lock() that can timeout. This is useful to
> avoid unbounded waits for the page lock in kcompactd.
> 
> Signed-off-by: Douglas Anderson <dianders@chromium.org>
> ---
> 
> Changes in v2:
> - "Add folio_lock_timeout()" new for v2.
> 
>  include/linux/pagemap.h | 16 ++++++++++++++
>  mm/filemap.c            | 47 +++++++++++++++++++++++++++++------------
>  2 files changed, 50 insertions(+), 13 deletions(-)
> 
> diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
> index 0acb8e1fb7af..0f3ef9f79300 100644
> --- a/include/linux/pagemap.h
> +++ b/include/linux/pagemap.h
> @@ -892,6 +892,7 @@ static inline bool wake_page_match(struct wait_page_queue *wait_page,
>  }
>  
>  void __folio_lock(struct folio *folio);
> +int __folio_lock_timeout(struct folio *folio, long timeout);
>  int __folio_lock_killable(struct folio *folio);
>  bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm,
>  				unsigned int flags);
> @@ -952,6 +953,21 @@ static inline void folio_lock(struct folio *folio)
>  		__folio_lock(folio);
>  }
>  
> +/**
> + * folio_lock_timeout() - Lock this folio, with a timeout.
> + * @folio: The folio to lock.
> + * @timeout: The timeout in jiffies; %MAX_SCHEDULE_TIMEOUT means wait forever.
> + *
> + * Return: 0 upon success; -ETIMEDOUT upon failure.
> + */

May return -EINTR?

> +static inline int folio_lock_timeout(struct folio *folio, long timeout)
> +{
> +	might_sleep();
> +	if (!folio_trylock(folio))
> +		return __folio_lock_timeout(folio, timeout);
> +	return 0;
> +}
> +
>  /**
>   * lock_page() - Lock the folio containing this page.
>   * @page: The page to lock.
> diff --git a/mm/filemap.c b/mm/filemap.c
> index 2723104cc06a..c6056ec41284 100644
> --- a/mm/filemap.c
> +++ b/mm/filemap.c
> @@ -1220,7 +1220,7 @@ static inline bool folio_trylock_flag(struct folio *folio, int bit_nr,
>  int sysctl_page_lock_unfairness = 5;
>  
>  static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
> -		int state, enum behavior behavior)
> +		int state, enum behavior behavior, long timeout)
>  {
>  	wait_queue_head_t *q = folio_waitqueue(folio);
>  	int unfairness = sysctl_page_lock_unfairness;
> @@ -1229,6 +1229,7 @@ static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
>  	bool thrashing = false;
>  	unsigned long pflags;
>  	bool in_thrashing;
> +	int err;
>  
>  	if (bit_nr == PG_locked &&
>  	    !folio_test_uptodate(folio) && folio_test_workingset(folio)) {
> @@ -1295,10 +1296,13 @@ static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
>  		/* Loop until we've been woken or interrupted */
>  		flags = smp_load_acquire(&wait->flags);
>  		if (!(flags & WQ_FLAG_WOKEN)) {
> +			if (!timeout)
> +				break;
> +

An io_schedule_timeout of 0 is valid so why the special handling? It's
negative timeouts that cause schedule_timeout() to complain.

>  			if (signal_pending_state(state, current))
>  				break;
>  
> -			io_schedule();
> +			timeout = io_schedule_timeout(timeout);
>  			continue;
>  		}
>  
> @@ -1324,10 +1328,10 @@ static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
>  	}
>  
>  	/*
> -	 * If a signal happened, this 'finish_wait()' may remove the last
> -	 * waiter from the wait-queues, but the folio waiters bit will remain
> -	 * set. That's ok. The next wakeup will take care of it, and trying
> -	 * to do it here would be difficult and prone to races.
> +	 * If a signal/timeout happened, this 'finish_wait()' may remove the
> +	 * last waiter from the wait-queues, but the folio waiters bit will
> +	 * remain set. That's ok. The next wakeup will take care of it, and
> +	 * trying to do it here would be difficult and prone to races.
>  	 */
>  	finish_wait(q, wait);
>  
> @@ -1336,6 +1340,13 @@ static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
>  		psi_memstall_leave(&pflags);
>  	}
>  
> +	/*
> +	 * If we don't meet the success criteria below then we've got an error
> +	 * of some sort. Differentiate between the two error cases. If there's
> +	 * no time left it must have been a timeout.
> +	 */
> +	err = !timeout ? -ETIMEDOUT : -EINTR;
> +
>  	/*
>  	 * NOTE! The wait->flags weren't stable until we've done the
>  	 * 'finish_wait()', and we could have exited the loop above due
> @@ -1350,9 +1361,9 @@ static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
>  	 * waiter, but an exclusive one requires WQ_FLAG_DONE.
>  	 */
>  	if (behavior == EXCLUSIVE)
> -		return wait->flags & WQ_FLAG_DONE ? 0 : -EINTR;
> +		return wait->flags & WQ_FLAG_DONE ? 0 : err;
>  
> -	return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR;
> +	return wait->flags & WQ_FLAG_WOKEN ? 0 : err;
>  }
>  
>  #ifdef CONFIG_MIGRATION
> @@ -1442,13 +1453,15 @@ void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
>  
>  void folio_wait_bit(struct folio *folio, int bit_nr)
>  {
> -	folio_wait_bit_common(folio, bit_nr, TASK_UNINTERRUPTIBLE, SHARED);
> +	folio_wait_bit_common(folio, bit_nr, TASK_UNINTERRUPTIBLE, SHARED,
> +			      MAX_SCHEDULE_TIMEOUT);
>  }
>  EXPORT_SYMBOL(folio_wait_bit);
>  
>  int folio_wait_bit_killable(struct folio *folio, int bit_nr)
>  {
> -	return folio_wait_bit_common(folio, bit_nr, TASK_KILLABLE, SHARED);
> +	return folio_wait_bit_common(folio, bit_nr, TASK_KILLABLE, SHARED,
> +				     MAX_SCHEDULE_TIMEOUT);
>  }
>  EXPORT_SYMBOL(folio_wait_bit_killable);
>  
> @@ -1467,7 +1480,8 @@ EXPORT_SYMBOL(folio_wait_bit_killable);
>   */
>  static int folio_put_wait_locked(struct folio *folio, int state)
>  {
> -	return folio_wait_bit_common(folio, PG_locked, state, DROP);
> +	return folio_wait_bit_common(folio, PG_locked, state, DROP,
> +				     MAX_SCHEDULE_TIMEOUT);
>  }
>  
>  /**
> @@ -1662,17 +1676,24 @@ EXPORT_SYMBOL_GPL(page_endio);
>  void __folio_lock(struct folio *folio)
>  {
>  	folio_wait_bit_common(folio, PG_locked, TASK_UNINTERRUPTIBLE,
> -				EXCLUSIVE);
> +				EXCLUSIVE, MAX_SCHEDULE_TIMEOUT);
>  }
>  EXPORT_SYMBOL(__folio_lock);
>  
>  int __folio_lock_killable(struct folio *folio)
>  {
>  	return folio_wait_bit_common(folio, PG_locked, TASK_KILLABLE,
> -					EXCLUSIVE);
> +					EXCLUSIVE, MAX_SCHEDULE_TIMEOUT);
>  }
>  EXPORT_SYMBOL_GPL(__folio_lock_killable);
>  
> +int __folio_lock_timeout(struct folio *folio, long timeout)
> +{
> +	return folio_wait_bit_common(folio, PG_locked, TASK_KILLABLE,
> +					EXCLUSIVE, timeout);
> +}
> +EXPORT_SYMBOL_GPL(__folio_lock_timeout);
> +
>  static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait)
>  {
>  	struct wait_queue_head *q = folio_waitqueue(folio);
> -- 
> 2.40.0.634.g4ca3ef3211-goog
>
Doug Anderson April 24, 2023, 4:22 p.m. UTC | #3
Hi,

On Mon, Apr 24, 2023 at 1:22 AM Mel Gorman <mgorman@techsingularity.net> wrote:
>
> > @@ -1295,10 +1296,13 @@ static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
> >               /* Loop until we've been woken or interrupted */
> >               flags = smp_load_acquire(&wait->flags);
> >               if (!(flags & WQ_FLAG_WOKEN)) {
> > +                     if (!timeout)
> > +                             break;
> > +
>
> An io_schedule_timeout of 0 is valid so why the special handling? It's
> negative timeouts that cause schedule_timeout() to complain.

It's not expected that the caller passes in a timeout of 0 here. The
test here actually handles the case that the previous call to
io_schedule_timeout() returned 0. In my patch, after the call to
io_schedule_timeout() we unconditionally "continue" and end up back at
the top of the loop. The next time through the loop if we don't see
the WOKEN flag then we'll check for the two "error" conditions
(timeout or signal pending) and break for either of them.

To make it clearer, I'll add this comment for the next version:

/* Break if the last io_schedule_timeout() said no time left */
Mel Gorman April 25, 2023, 8 a.m. UTC | #4
On Mon, Apr 24, 2023 at 09:22:55AM -0700, Doug Anderson wrote:
> Hi,
> 
> On Mon, Apr 24, 2023 at 1:22???AM Mel Gorman <mgorman@techsingularity.net> wrote:
> >
> > > @@ -1295,10 +1296,13 @@ static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
> > >               /* Loop until we've been woken or interrupted */
> > >               flags = smp_load_acquire(&wait->flags);
> > >               if (!(flags & WQ_FLAG_WOKEN)) {
> > > +                     if (!timeout)
> > > +                             break;
> > > +
> >
> > An io_schedule_timeout of 0 is valid so why the special handling? It's
> > negative timeouts that cause schedule_timeout() to complain.
> 
> It's not expected that the caller passes in a timeout of 0 here. The
> test here actually handles the case that the previous call to
> io_schedule_timeout() returned 0. In my patch, after the call to
> io_schedule_timeout() we unconditionally "continue" and end up back at
> the top of the loop. The next time through the loop if we don't see
> the WOKEN flag then we'll check for the two "error" conditions
> (timeout or signal pending) and break for either of them.

Ah, I see!

> 
> To make it clearer, I'll add this comment for the next version:
> 
> /* Break if the last io_schedule_timeout() said no time left */

Yes please.
diff mbox series

Patch

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 0acb8e1fb7af..0f3ef9f79300 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -892,6 +892,7 @@  static inline bool wake_page_match(struct wait_page_queue *wait_page,
 }
 
 void __folio_lock(struct folio *folio);
+int __folio_lock_timeout(struct folio *folio, long timeout);
 int __folio_lock_killable(struct folio *folio);
 bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm,
 				unsigned int flags);
@@ -952,6 +953,21 @@  static inline void folio_lock(struct folio *folio)
 		__folio_lock(folio);
 }
 
+/**
+ * folio_lock_timeout() - Lock this folio, with a timeout.
+ * @folio: The folio to lock.
+ * @timeout: The timeout in jiffies; %MAX_SCHEDULE_TIMEOUT means wait forever.
+ *
+ * Return: 0 upon success; -ETIMEDOUT upon failure.
+ */
+static inline int folio_lock_timeout(struct folio *folio, long timeout)
+{
+	might_sleep();
+	if (!folio_trylock(folio))
+		return __folio_lock_timeout(folio, timeout);
+	return 0;
+}
+
 /**
  * lock_page() - Lock the folio containing this page.
  * @page: The page to lock.
diff --git a/mm/filemap.c b/mm/filemap.c
index 2723104cc06a..c6056ec41284 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1220,7 +1220,7 @@  static inline bool folio_trylock_flag(struct folio *folio, int bit_nr,
 int sysctl_page_lock_unfairness = 5;
 
 static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
-		int state, enum behavior behavior)
+		int state, enum behavior behavior, long timeout)
 {
 	wait_queue_head_t *q = folio_waitqueue(folio);
 	int unfairness = sysctl_page_lock_unfairness;
@@ -1229,6 +1229,7 @@  static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
 	bool thrashing = false;
 	unsigned long pflags;
 	bool in_thrashing;
+	int err;
 
 	if (bit_nr == PG_locked &&
 	    !folio_test_uptodate(folio) && folio_test_workingset(folio)) {
@@ -1295,10 +1296,13 @@  static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
 		/* Loop until we've been woken or interrupted */
 		flags = smp_load_acquire(&wait->flags);
 		if (!(flags & WQ_FLAG_WOKEN)) {
+			if (!timeout)
+				break;
+
 			if (signal_pending_state(state, current))
 				break;
 
-			io_schedule();
+			timeout = io_schedule_timeout(timeout);
 			continue;
 		}
 
@@ -1324,10 +1328,10 @@  static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
 	}
 
 	/*
-	 * If a signal happened, this 'finish_wait()' may remove the last
-	 * waiter from the wait-queues, but the folio waiters bit will remain
-	 * set. That's ok. The next wakeup will take care of it, and trying
-	 * to do it here would be difficult and prone to races.
+	 * If a signal/timeout happened, this 'finish_wait()' may remove the
+	 * last waiter from the wait-queues, but the folio waiters bit will
+	 * remain set. That's ok. The next wakeup will take care of it, and
+	 * trying to do it here would be difficult and prone to races.
 	 */
 	finish_wait(q, wait);
 
@@ -1336,6 +1340,13 @@  static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
 		psi_memstall_leave(&pflags);
 	}
 
+	/*
+	 * If we don't meet the success criteria below then we've got an error
+	 * of some sort. Differentiate between the two error cases. If there's
+	 * no time left it must have been a timeout.
+	 */
+	err = !timeout ? -ETIMEDOUT : -EINTR;
+
 	/*
 	 * NOTE! The wait->flags weren't stable until we've done the
 	 * 'finish_wait()', and we could have exited the loop above due
@@ -1350,9 +1361,9 @@  static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
 	 * waiter, but an exclusive one requires WQ_FLAG_DONE.
 	 */
 	if (behavior == EXCLUSIVE)
-		return wait->flags & WQ_FLAG_DONE ? 0 : -EINTR;
+		return wait->flags & WQ_FLAG_DONE ? 0 : err;
 
-	return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR;
+	return wait->flags & WQ_FLAG_WOKEN ? 0 : err;
 }
 
 #ifdef CONFIG_MIGRATION
@@ -1442,13 +1453,15 @@  void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
 
 void folio_wait_bit(struct folio *folio, int bit_nr)
 {
-	folio_wait_bit_common(folio, bit_nr, TASK_UNINTERRUPTIBLE, SHARED);
+	folio_wait_bit_common(folio, bit_nr, TASK_UNINTERRUPTIBLE, SHARED,
+			      MAX_SCHEDULE_TIMEOUT);
 }
 EXPORT_SYMBOL(folio_wait_bit);
 
 int folio_wait_bit_killable(struct folio *folio, int bit_nr)
 {
-	return folio_wait_bit_common(folio, bit_nr, TASK_KILLABLE, SHARED);
+	return folio_wait_bit_common(folio, bit_nr, TASK_KILLABLE, SHARED,
+				     MAX_SCHEDULE_TIMEOUT);
 }
 EXPORT_SYMBOL(folio_wait_bit_killable);
 
@@ -1467,7 +1480,8 @@  EXPORT_SYMBOL(folio_wait_bit_killable);
  */
 static int folio_put_wait_locked(struct folio *folio, int state)
 {
-	return folio_wait_bit_common(folio, PG_locked, state, DROP);
+	return folio_wait_bit_common(folio, PG_locked, state, DROP,
+				     MAX_SCHEDULE_TIMEOUT);
 }
 
 /**
@@ -1662,17 +1676,24 @@  EXPORT_SYMBOL_GPL(page_endio);
 void __folio_lock(struct folio *folio)
 {
 	folio_wait_bit_common(folio, PG_locked, TASK_UNINTERRUPTIBLE,
-				EXCLUSIVE);
+				EXCLUSIVE, MAX_SCHEDULE_TIMEOUT);
 }
 EXPORT_SYMBOL(__folio_lock);
 
 int __folio_lock_killable(struct folio *folio)
 {
 	return folio_wait_bit_common(folio, PG_locked, TASK_KILLABLE,
-					EXCLUSIVE);
+					EXCLUSIVE, MAX_SCHEDULE_TIMEOUT);
 }
 EXPORT_SYMBOL_GPL(__folio_lock_killable);
 
+int __folio_lock_timeout(struct folio *folio, long timeout)
+{
+	return folio_wait_bit_common(folio, PG_locked, TASK_KILLABLE,
+					EXCLUSIVE, timeout);
+}
+EXPORT_SYMBOL_GPL(__folio_lock_timeout);
+
 static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait)
 {
 	struct wait_queue_head *q = folio_waitqueue(folio);