@@ -5957,8 +5957,8 @@ void destroy_xen_mappings(unsigned long s, unsigned long e)
unsigned int i;
unsigned long v = s;
- ASSERT((s & ~PAGE_MASK) == 0);
- ASSERT((e & ~PAGE_MASK) == 0);
+ ASSERT(IS_ALIGNED(s, PAGE_SIZE));
+ ASSERT(IS_ALIGNED(e, PAGE_SIZE));
while ( v < e )
{
@@ -6369,8 +6369,8 @@ static void __memguard_change_range(void *p, unsigned long l, int guard)
unsigned int flags = __PAGE_HYPERVISOR_RW | MAP_SMALL_PAGES;
/* Ensure we are dealing with a page-aligned whole number of pages. */
- ASSERT((_p&~PAGE_MASK) == 0);
- ASSERT((_l&~PAGE_MASK) == 0);
+ ASSERT(IS_ALIGNED(_p, PAGE_SIZE));
+ ASSERT(IS_ALIGNED(_l, PAGE_SIZE));
if ( guard )
flags &= ~_PAGE_PRESENT;
@@ -229,4 +229,4 @@ ASSERT(__image_base__ > XEN_VIRT_START ||
ASSERT(kexec_reloc_size - kexec_reloc <= PAGE_SIZE, "kexec_reloc is too large")
#endif
-ASSERT((cpu0_stack & (STACK_SIZE - 1)) == 0, "cpu0_stack misaligned")
+ASSERT(IS_ALIGNED(cpu0_stack, STACK_SIZE), "cpu0_stack misaligned")
@@ -74,6 +74,8 @@
#define MB(_mb) (_AC(_mb, ULL) << 20)
#define GB(_gb) (_AC(_gb, ULL) << 30)
+#define IS_ALIGNED(val, align) (((val) & ((align) - 1)) == 0)
+
#define __STR(...) #__VA_ARGS__
#define STR(...) __STR(__VA_ARGS__)
@@ -23,8 +23,7 @@
#endif
typedef uint32_t pagesize_t; /* like size_t, must handle largest PAGE_SIZE */
-#define IS_PAGE_ALIGNED(addr) \
- ((void *)((((unsigned long)addr + (PAGE_SIZE - 1)) & PAGE_MASK)) == addr)
+#define IS_PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
#define IS_VALID_PAGE(_pi) ( mfn_valid(page_to_mfn(_pi)) )
extern struct page_list_head tmem_page_list;
And a few open-coded alignment checks which I encountered Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> --- CC: Jan Beulich <JBeulich@suse.com> CC: Tim Deegan <tim@xen.org> CC: Ian Campbell <Ian.Campbell@citrix.com> --- xen/arch/x86/mm.c | 8 ++++---- xen/arch/x86/xen.lds.S | 2 +- xen/include/xen/config.h | 2 ++ xen/include/xen/tmem_xen.h | 3 +-- 4 files changed, 8 insertions(+), 7 deletions(-)