@@ -295,21 +295,20 @@ static inline void *phys_to_virt(phys_addr_t x)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#else
-#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
-#define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
+#define __virt_to_pgoff(kaddr) (((u64)(kaddr) - PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
+#define __page_to_voff(kaddr) (((u64)(kaddr) - VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
#define page_to_virt(page) ({ \
unsigned long __addr = \
- ((__page_to_voff(page)) | PAGE_OFFSET); \
+ ((__page_to_voff(page)) + PAGE_OFFSET); \
unsigned long __addr_tag = \
__tag_set(__addr, page_kasan_tag(page)); \
((void *)__addr_tag); \
})
-#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
+#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) + VMEMMAP_START))
-#define _virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \
- + PHYS_OFFSET) >> PAGE_SHIFT)
+#define _virt_addr_valid(kaddr) pfn_valid(__virt_to_phys((u64)(kaddr)) >> PAGE_SHIFT)
#endif
#endif
Currently there are assumptions about the alignment of VMEMMAP_START and PAGE_OFFSET that won't be valid after this series is applied. These assumptions are in the form of bitwise operators being used instead of addition and subtraction when calculating addresses. This patch replaces these bitwise operators with addition/subtraction. Signed-off-by: Steve Capper <steve.capper@arm.com> --- New in V4 --- arch/arm64/include/asm/memory.h | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-)