@@ -535,9 +535,22 @@ static struct deferred_split *folio_split_queue_lock(struct folio *folio)
{
struct deferred_split *queue;
+ rcu_read_lock();
+retry:
queue = folio_split_queue(folio);
spin_lock(&queue->split_queue_lock);
+ if (unlikely(split_queue_memcg(queue) != folio_memcg(folio))) {
+ spin_unlock(&queue->split_queue_lock);
+ goto retry;
+ }
+
+ /*
+ * Preemption is disabled in the internal of spin_lock, which can serve
+ * as RCU read-side critical sections.
+ */
+ rcu_read_unlock();
+
return queue;
}
@@ -546,9 +559,19 @@ folio_split_queue_lock_irqsave(struct folio *folio, unsigned long *flags)
{
struct deferred_split *queue;
+ rcu_read_lock();
+retry:
queue = folio_split_queue(folio);
spin_lock_irqsave(&queue->split_queue_lock, *flags);
+ if (unlikely(split_queue_memcg(queue) != folio_memcg(folio))) {
+ spin_unlock_irqrestore(&queue->split_queue_lock, *flags);
+ goto retry;
+ }
+
+ /* See the comments in folio_split_queue_lock(). */
+ rcu_read_unlock();
+
return queue;
}
Similar to the lruvec lock, we use the same approach to make the split queue lock safe when LRU pages are reparented. Signed-off-by: Muchun Song <songmuchun@bytedance.com> --- mm/huge_memory.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+)