@@ -347,6 +347,24 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
dsb(ish);
}
+static inline void arch_tlbbatch_clear(struct arch_tlbflush_unmap_batch *batch)
+{
+ /* nothing to do */
+}
+
+static inline void arch_tlbbatch_fold(struct arch_tlbflush_unmap_batch *bdst,
+ struct arch_tlbflush_unmap_batch *bsrc)
+{
+ /* nothing to do */
+}
+
+static inline bool arch_tlbbatch_done(struct arch_tlbflush_unmap_batch *bdst,
+ struct arch_tlbflush_unmap_batch *bsrc)
+{
+ /* Kernel can consider tlb batch always has been done. */
+ return true;
+}
+
/*
* This is meant to avoid soft lock-ups on large TLB flushing ranges and not
* necessarily a performance improvement.
This is a preparation for migrc mechanism that requires to manipulate tlb batch's arch data. Even though arm64 does nothing for it, arch with CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH should provide the APIs. Signed-off-by: Byungchul Park <byungchul@sk.com> --- arch/arm64/include/asm/tlbflush.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+)