@@ -30,6 +30,9 @@ static inline void copy_page(void *to, void *from)
{
memcpy(to, from, PAGE_SIZE);
}
+
+#define untagged_addr(addr) (addr)
+#define untagged_ptr(ptr) (ptr)
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_PAGE_32_H */
@@ -90,6 +90,32 @@ static __always_inline unsigned long task_size_max(void)
}
#endif /* CONFIG_X86_5LEVEL */
+#ifdef CONFIG_CPU_SUP_AMD
+/*
+ * Tag bits are in same position [63:57] for both 4 and 5 level page
+ * tables. For both the cases we sign-extend from 56th bit since
+ * bits [56:48] are anyway expected to be canonical for 4 level page tables.
+ */
+#define __untagged_addr(addr) \
+ ((__force __typeof__(addr))sign_extend64((__force u64)(addr), 56))
+
+#define untagged_addr(addr) ({ \
+ u64 __addr = (__force u64)(addr); \
+ __addr &= __untagged_addr(__addr); \
+ (__force __typeof__(addr))__addr; \
+})
+
+#define untagged_ptr(ptr) ({ \
+ u64 __ptrval = (__force u64)(ptr); \
+ __ptrval = untagged_addr(__ptrval); \
+ (__force __typeof__(*(ptr)) *)__ptrval; \
+})
+
+#else
+#define untagged_addr(addr) (addr)
+#define untagged_ptr(ptr) (ptr)
+#endif
+
#endif /* !__ASSEMBLY__ */
#ifdef CONFIG_X86_VSYSCALL_EMULATION
untagged_addr() will be used by core mm routines to remove the tag bits and convert the address to canonical form. Limit the implementation to AMD CPUs as Intel's version of the same is likely to be different. Signed-off-by: Bharata B Rao <bharata@amd.com> --- arch/x86/include/asm/page_32.h | 3 +++ arch/x86/include/asm/page_64.h | 26 ++++++++++++++++++++++++++ 2 files changed, 29 insertions(+)