diff mbox series

[3/4] mm: mmu_gather: turn delayed rmap macros into inlines

Message ID Y3SWzbS4OJoz6ppv@li-4a3a4a4c-28e5-11b2-a85c-a8d192c6f089.ibm.com (mailing list archive)
State New
Headers show
Series mm: mmu_gather: do not expose delayed_rmap flag | expand

Commit Message

Alexander Gordeev Nov. 16, 2022, 7:52 a.m. UTC
Make tlb_delay_rmap() and friend macros inline functions
by using forward declarations, which allows defining ones
after the 'struct mmu_gather' definition.

Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
---
 include/asm-generic/tlb.h | 56 ++++++++++++++++++++++++++++++---------
 1 file changed, 44 insertions(+), 12 deletions(-)
diff mbox series

Patch

diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 317bef9eee3c..33943a4de5a7 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -261,13 +261,10 @@  extern bool __tlb_remove_page_size(struct mmu_gather *tlb,
 				   int page_size);
 
 #ifdef CONFIG_SMP
-/*
- * This both sets 'delayed_rmap', and returns true. It would be an inline
- * function, except we define it before the 'struct mmu_gather'.
- */
-#define tlb_delay_rmap(tlb)		(((tlb)->delayed_rmap = 1), true)
-#define tlb_reset_delay_rmap(tlb)	((tlb)->delayed_rmap = 0)
-#define tlb_rmap_delayed(tlb)		((tlb)->delayed_rmap)
+#define tlb_delay_rmap tlb_delay_rmap
+static inline bool tlb_delay_rmap(struct mmu_gather *tlb);
+static inline void tlb_reset_delay_rmap(struct mmu_gather *tlb);
+static inline bool tlb_rmap_delayed(struct mmu_gather *tlb);
 extern void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma);
 #endif
 
@@ -338,6 +335,27 @@  struct mmu_gather {
 #endif
 };
 
+#ifdef tlb_delay_rmap
+
+static inline bool tlb_delay_rmap(struct mmu_gather *tlb)
+{
+	tlb->delayed_rmap = 1;
+
+	return true;
+}
+
+static inline void tlb_reset_delay_rmap(struct mmu_gather *tlb)
+{
+	tlb->delayed_rmap = 0;
+}
+
+static inline bool tlb_rmap_delayed(struct mmu_gather *tlb)
+{
+	return tlb->delayed_rmap;
+}
+
+#else
+
 /*
  * We have a no-op version of the rmap removal that doesn't
  * delay anything. That is used on S390, which flushes remote
@@ -345,11 +363,25 @@  struct mmu_gather {
  * remote TLBs to flush and is not preemptible due to this
  * all happening under the page table lock.
  */
-#ifndef tlb_delay_rmap
-#define tlb_delay_rmap(tlb)		(false)
-#define tlb_reset_delay_rmap(tlb)	do { } while (0)
-#define tlb_rmap_delayed(tlb)		(false)
-static inline void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
+#define tlb_delay_rmap tlb_delay_rmap
+static inline bool tlb_delay_rmap(struct mmu_gather *tlb)
+{
+	return false;
+}
+
+static inline void tlb_reset_delay_rmap(struct mmu_gather *tlb)
+{
+}
+
+static inline bool tlb_rmap_delayed(struct mmu_gather *tlb)
+{
+	return false;
+}
+
+static inline void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+}
+
 #endif
 
 void tlb_flush_mmu(struct mmu_gather *tlb);