diff mbox

[v2,14/30] xen/mm: add a ceil sufix to current page calculation routine

Message ID 1474991845-27962-15-git-send-email-roger.pau@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Roger Pau Monné Sept. 27, 2016, 3:57 p.m. UTC
... and introduce a floor variant.

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
---
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Julien Grall <julien.grall@arm.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
---
 xen/arch/arm/domain.c                    |  2 +-
 xen/arch/arm/domain_build.c              | 16 +++++-----------
 xen/arch/arm/kernel.c                    |  4 ++--
 xen/arch/arm/percpu.c                    |  3 ++-
 xen/arch/x86/domain.c                    |  2 +-
 xen/arch/x86/domain_build.c              |  4 ++--
 xen/arch/x86/hvm/svm/nestedsvm.c         |  8 ++++----
 xen/arch/x86/hvm/svm/vmcb.c              |  5 +++--
 xen/arch/x86/percpu.c                    |  3 ++-
 xen/arch/x86/smpboot.c                   |  4 ++--
 xen/common/kexec.c                       |  2 +-
 xen/common/page_alloc.c                  |  2 +-
 xen/common/tmem_xen.c                    |  2 +-
 xen/common/xmalloc_tlsf.c                |  6 +++---
 xen/drivers/char/console.c               |  6 +++---
 xen/drivers/char/serial.c                |  2 +-
 xen/drivers/passthrough/amd/iommu_init.c | 17 +++++++++--------
 xen/drivers/passthrough/pci.c            |  2 +-
 xen/include/asm-x86/flushtlb.h           |  2 +-
 xen/include/xen/mm.h                     | 12 +++++++++++-
 20 files changed, 56 insertions(+), 48 deletions(-)

Comments

Jan Beulich Sept. 30, 2016, 3:20 p.m. UTC | #1
>>> On 27.09.16 at 17:57, <roger.pau@citrix.com> wrote:
> ... and introduce a floor variant.

I dislike this, not the least because of the many places you touch
just to tack that odd suffix on. Unless you plan on adding half a
dozen or more callers to that floor variant, I think it would be
prudent to not introduce such a variant, but instead make those
callers simply obtain what they need by calling the existing one.
After all gofb_floor(x) = gofb(x + 1) - 1 if I'm not mistaken.

Jan
diff mbox

Patch

diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index 20bb2ba..1f6b0a4 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -661,7 +661,7 @@  void arch_domain_destroy(struct domain *d)
     free_xenheap_page(d->shared_info);
 #ifdef CONFIG_ACPI
     free_xenheap_pages(d->arch.efi_acpi_table,
-                       get_order_from_bytes(d->arch.efi_acpi_len));
+                       get_order_from_bytes_ceil(d->arch.efi_acpi_len));
 #endif
     domain_io_free(d);
 }
diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index 35ab08d..cabe030 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -73,14 +73,8 @@  struct vcpu *__init alloc_dom0_vcpu0(struct domain *dom0)
 
 static unsigned int get_11_allocation_size(paddr_t size)
 {
-    /*
-     * get_order_from_bytes returns the order greater than or equal to
-     * the given size, but we need less than or equal. Adding one to
-     * the size pushes an evenly aligned size into the next order, so
-     * we can then unconditionally subtract 1 from the order which is
-     * returned.
-     */
-    return get_order_from_bytes(size + 1) - 1;
+
+    return get_order_from_bytes_floor(size);
 }
 
 /*
@@ -238,8 +232,8 @@  fail:
 static void allocate_memory(struct domain *d, struct kernel_info *kinfo)
 {
     const unsigned int min_low_order =
-        get_order_from_bytes(min_t(paddr_t, dom0_mem, MB(128)));
-    const unsigned int min_order = get_order_from_bytes(MB(4));
+        get_order_from_bytes_ceil(min_t(paddr_t, dom0_mem, MB(128)));
+    const unsigned int min_order = get_order_from_bytes_ceil(MB(4));
     struct page_info *pg;
     unsigned int order = get_11_allocation_size(kinfo->unassigned_mem);
     int i;
@@ -1828,7 +1822,7 @@  static int prepare_acpi(struct domain *d, struct kernel_info *kinfo)
     if ( rc != 0 )
         return rc;
 
-    order = get_order_from_bytes(d->arch.efi_acpi_len);
+    order = get_order_from_bytes_ceil(d->arch.efi_acpi_len);
     d->arch.efi_acpi_table = alloc_xenheap_pages(order, 0);
     if ( d->arch.efi_acpi_table == NULL )
     {
diff --git a/xen/arch/arm/kernel.c b/xen/arch/arm/kernel.c
index 3f6cce3..0d9986b 100644
--- a/xen/arch/arm/kernel.c
+++ b/xen/arch/arm/kernel.c
@@ -291,7 +291,7 @@  static __init int kernel_decompress(struct bootmodule *mod)
         return -EFAULT;
 
     output_size = output_length(input, size);
-    kernel_order_out = get_order_from_bytes(output_size);
+    kernel_order_out = get_order_from_bytes_ceil(output_size);
     pages = alloc_domheap_pages(NULL, kernel_order_out, 0);
     if ( pages == NULL )
     {
@@ -463,7 +463,7 @@  static int kernel_elf_probe(struct kernel_info *info,
 
     memset(&info->elf.elf, 0, sizeof(info->elf.elf));
 
-    info->elf.kernel_order = get_order_from_bytes(size);
+    info->elf.kernel_order = get_order_from_bytes_ceil(size);
     info->elf.kernel_img = alloc_xenheap_pages(info->elf.kernel_order, 0);
     if ( info->elf.kernel_img == NULL )
         panic("Cannot allocate temporary buffer for kernel");
diff --git a/xen/arch/arm/percpu.c b/xen/arch/arm/percpu.c
index e545024..954e92f 100644
--- a/xen/arch/arm/percpu.c
+++ b/xen/arch/arm/percpu.c
@@ -7,7 +7,8 @@ 
 
 unsigned long __per_cpu_offset[NR_CPUS];
 #define INVALID_PERCPU_AREA (-(long)__per_cpu_start)
-#define PERCPU_ORDER (get_order_from_bytes(__per_cpu_data_end-__per_cpu_start))
+#define PERCPU_ORDER \
+    (get_order_from_bytes_ceil(__per_cpu_data_end-__per_cpu_start))
 
 void __init percpu_init_areas(void)
 {
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 332e7f0..3d70720 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -236,7 +236,7 @@  static unsigned int __init noinline _domain_struct_bits(void)
 struct domain *alloc_domain_struct(void)
 {
     struct domain *d;
-    unsigned int order = get_order_from_bytes(sizeof(*d));
+    unsigned int order = get_order_from_bytes_ceil(sizeof(*d));
 #ifdef CONFIG_BIGMEM
     const unsigned int bits = 0;
 #else
diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
index 78980ae..982bb5f 100644
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -290,7 +290,7 @@  static unsigned long __init compute_dom0_nr_pages(
 
     /* Reserve memory for further dom0 vcpu-struct allocations... */
     avail -= (d->max_vcpus - 1UL)
-             << get_order_from_bytes(sizeof(struct vcpu));
+             << get_order_from_bytes_ceil(sizeof(struct vcpu));
     /* ...and compat_l4's, if needed. */
     if ( is_pv_32bit_domain(d) )
         avail -= d->max_vcpus - 1;
@@ -1172,7 +1172,7 @@  static int __init construct_dom0_pv(
     count = v_end - v_start;
     if ( vinitrd_start )
         count -= PAGE_ALIGN(initrd_len);
-    order = get_order_from_bytes(count);
+    order = get_order_from_bytes_ceil(count);
     if ( (1UL << order) + PFN_UP(initrd_len) > nr_pages )
         panic("Domain 0 allocation is too small for kernel image");
 
diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index f9b38ab..7b3af39 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -101,13 +101,13 @@  int nsvm_vcpu_initialise(struct vcpu *v)
     struct nestedvcpu *nv = &vcpu_nestedhvm(v);
     struct nestedsvm *svm = &vcpu_nestedsvm(v);
 
-    msrpm = alloc_xenheap_pages(get_order_from_bytes(MSRPM_SIZE), 0);
+    msrpm = alloc_xenheap_pages(get_order_from_bytes_ceil(MSRPM_SIZE), 0);
     svm->ns_cached_msrpm = msrpm;
     if (msrpm == NULL)
         goto err;
     memset(msrpm, 0x0, MSRPM_SIZE);
 
-    msrpm = alloc_xenheap_pages(get_order_from_bytes(MSRPM_SIZE), 0);
+    msrpm = alloc_xenheap_pages(get_order_from_bytes_ceil(MSRPM_SIZE), 0);
     svm->ns_merged_msrpm = msrpm;
     if (msrpm == NULL)
         goto err;
@@ -141,12 +141,12 @@  void nsvm_vcpu_destroy(struct vcpu *v)
 
     if (svm->ns_cached_msrpm) {
         free_xenheap_pages(svm->ns_cached_msrpm,
-                           get_order_from_bytes(MSRPM_SIZE));
+                           get_order_from_bytes_ceil(MSRPM_SIZE));
         svm->ns_cached_msrpm = NULL;
     }
     if (svm->ns_merged_msrpm) {
         free_xenheap_pages(svm->ns_merged_msrpm,
-                           get_order_from_bytes(MSRPM_SIZE));
+                           get_order_from_bytes_ceil(MSRPM_SIZE));
         svm->ns_merged_msrpm = NULL;
     }
     hvm_unmap_guest_frame(nv->nv_vvmcx, 1);
diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c
index 9ea014f..c763b75 100644
--- a/xen/arch/x86/hvm/svm/vmcb.c
+++ b/xen/arch/x86/hvm/svm/vmcb.c
@@ -98,7 +98,8 @@  static int construct_vmcb(struct vcpu *v)
                              CR_INTERCEPT_CR8_WRITE);
 
     /* I/O and MSR permission bitmaps. */
-    arch_svm->msrpm = alloc_xenheap_pages(get_order_from_bytes(MSRPM_SIZE), 0);
+    arch_svm->msrpm = alloc_xenheap_pages(
+                        get_order_from_bytes_ceil(MSRPM_SIZE), 0);
     if ( arch_svm->msrpm == NULL )
         return -ENOMEM;
     memset(arch_svm->msrpm, 0xff, MSRPM_SIZE);
@@ -268,7 +269,7 @@  void svm_destroy_vmcb(struct vcpu *v)
     if ( arch_svm->msrpm != NULL )
     {
         free_xenheap_pages(
-            arch_svm->msrpm, get_order_from_bytes(MSRPM_SIZE));
+            arch_svm->msrpm, get_order_from_bytes_ceil(MSRPM_SIZE));
         arch_svm->msrpm = NULL;
     }
 
diff --git a/xen/arch/x86/percpu.c b/xen/arch/x86/percpu.c
index 1c1dad9..d44e7e2 100644
--- a/xen/arch/x86/percpu.c
+++ b/xen/arch/x86/percpu.c
@@ -14,7 +14,8 @@  unsigned long __per_cpu_offset[NR_CPUS];
  * context of PV guests.
  */
 #define INVALID_PERCPU_AREA (0x8000000000000000L - (long)__per_cpu_start)
-#define PERCPU_ORDER (get_order_from_bytes(__per_cpu_data_end-__per_cpu_start))
+#define PERCPU_ORDER \
+    (get_order_from_bytes_ceil(__per_cpu_data_end-__per_cpu_start))
 
 void __init percpu_init_areas(void)
 {
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index 3a9dd3e..5597675 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -669,7 +669,7 @@  static void cpu_smpboot_free(unsigned int cpu)
 
     free_xenheap_pages(per_cpu(compat_gdt_table, cpu), order);
 
-    order = get_order_from_bytes(IDT_ENTRIES * sizeof(idt_entry_t));
+    order = get_order_from_bytes_ceil(IDT_ENTRIES * sizeof(idt_entry_t));
     free_xenheap_pages(idt_tables[cpu], order);
     idt_tables[cpu] = NULL;
 
@@ -710,7 +710,7 @@  static int cpu_smpboot_alloc(unsigned int cpu)
     memcpy(gdt, boot_cpu_compat_gdt_table, NR_RESERVED_GDT_PAGES * PAGE_SIZE);
     gdt[PER_CPU_GDT_ENTRY - FIRST_RESERVED_GDT_ENTRY].a = cpu;
 
-    order = get_order_from_bytes(IDT_ENTRIES * sizeof(idt_entry_t));
+    order = get_order_from_bytes_ceil(IDT_ENTRIES * sizeof(idt_entry_t));
     idt_tables[cpu] = alloc_xenheap_pages(order, memflags);
     if ( idt_tables[cpu] == NULL )
         goto oom;
diff --git a/xen/common/kexec.c b/xen/common/kexec.c
index c83d48f..f557475 100644
--- a/xen/common/kexec.c
+++ b/xen/common/kexec.c
@@ -556,7 +556,7 @@  static int __init kexec_init(void)
         crash_heap_size = PAGE_ALIGN(crash_heap_size);
 
         crash_heap_current = alloc_xenheap_pages(
-            get_order_from_bytes(crash_heap_size),
+            get_order_from_bytes_ceil(crash_heap_size),
             MEMF_bits(crashinfo_maxaddr_bits) );
 
         if ( ! crash_heap_current )
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index ae2476d..7f0381e 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -553,7 +553,7 @@  static unsigned long init_node_heap(int node, unsigned long mfn,
         *use_tail = 0;
     }
 #endif
-    else if ( get_order_from_bytes(sizeof(**_heap)) ==
+    else if ( get_order_from_bytes_ceil(sizeof(**_heap)) ==
               get_order_from_pages(needed) )
     {
         _heap[node] = alloc_xenheap_pages(get_order_from_pages(needed), 0);
diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
index 71cb7d5..6c630b6 100644
--- a/xen/common/tmem_xen.c
+++ b/xen/common/tmem_xen.c
@@ -292,7 +292,7 @@  int __init tmem_init(void)
     unsigned int cpu;
 
     dstmem_order = get_order_from_pages(LZO_DSTMEM_PAGES);
-    workmem_order = get_order_from_bytes(LZO1X_1_MEM_COMPRESS);
+    workmem_order = get_order_from_bytes_ceil(LZO1X_1_MEM_COMPRESS);
 
     for_each_online_cpu ( cpu )
     {
diff --git a/xen/common/xmalloc_tlsf.c b/xen/common/xmalloc_tlsf.c
index 6c1b882..32800e1 100644
--- a/xen/common/xmalloc_tlsf.c
+++ b/xen/common/xmalloc_tlsf.c
@@ -298,7 +298,7 @@  struct xmem_pool *xmem_pool_create(
     BUG_ON(max_size && (max_size < init_size));
 
     pool_bytes = ROUNDUP_SIZE(sizeof(*pool));
-    pool_order = get_order_from_bytes(pool_bytes);
+    pool_order = get_order_from_bytes_ceil(pool_bytes);
 
     pool = (void *)alloc_xenheap_pages(pool_order, 0);
     if ( pool == NULL )
@@ -371,7 +371,7 @@  void xmem_pool_destroy(struct xmem_pool *pool)
     spin_unlock(&pool_list_lock);
 
     pool_bytes = ROUNDUP_SIZE(sizeof(*pool));
-    pool_order = get_order_from_bytes(pool_bytes);
+    pool_order = get_order_from_bytes_ceil(pool_bytes);
     free_xenheap_pages(pool,pool_order);
 }
 
@@ -530,7 +530,7 @@  static void *xmalloc_whole_pages(unsigned long size, unsigned long align)
     unsigned int i, order;
     void *res, *p;
 
-    order = get_order_from_bytes(max(align, size));
+    order = get_order_from_bytes_ceil(max(align, size));
 
     res = alloc_xenheap_pages(order, 0);
     if ( res == NULL )
diff --git a/xen/drivers/char/console.c b/xen/drivers/char/console.c
index 55ae31a..605639e 100644
--- a/xen/drivers/char/console.c
+++ b/xen/drivers/char/console.c
@@ -301,7 +301,7 @@  static void dump_console_ring_key(unsigned char key)
 
     /* create a buffer in which we'll copy the ring in the correct
        order and NUL terminate */
-    order = get_order_from_bytes(conring_size + 1);
+    order = get_order_from_bytes_ceil(conring_size + 1);
     buf = alloc_xenheap_pages(order, 0);
     if ( buf == NULL )
     {
@@ -759,7 +759,7 @@  void __init console_init_ring(void)
     if ( !opt_conring_size )
         return;
 
-    order = get_order_from_bytes(max(opt_conring_size, conring_size));
+    order = get_order_from_bytes_ceil(max(opt_conring_size, conring_size));
     memflags = MEMF_bits(crashinfo_maxaddr_bits);
     while ( (ring = alloc_xenheap_pages(order, memflags)) == NULL )
     {
@@ -1080,7 +1080,7 @@  static int __init debugtrace_init(void)
     if ( bytes == 0 )
         return 0;
 
-    order = get_order_from_bytes(bytes);
+    order = get_order_from_bytes_ceil(bytes);
     debugtrace_buf = alloc_xenheap_pages(order, 0);
     ASSERT(debugtrace_buf != NULL);
 
diff --git a/xen/drivers/char/serial.c b/xen/drivers/char/serial.c
index 0fc5ced..5ac75bb 100644
--- a/xen/drivers/char/serial.c
+++ b/xen/drivers/char/serial.c
@@ -577,7 +577,7 @@  void __init serial_async_transmit(struct serial_port *port)
     while ( serial_txbufsz & (serial_txbufsz - 1) )
         serial_txbufsz &= serial_txbufsz - 1;
     port->txbuf = alloc_xenheap_pages(
-        get_order_from_bytes(serial_txbufsz), 0);
+        get_order_from_bytes_ceil(serial_txbufsz), 0);
 }
 
 /*
diff --git a/xen/drivers/passthrough/amd/iommu_init.c b/xen/drivers/passthrough/amd/iommu_init.c
index ea9f7e7..696ff1a 100644
--- a/xen/drivers/passthrough/amd/iommu_init.c
+++ b/xen/drivers/passthrough/amd/iommu_init.c
@@ -136,7 +136,8 @@  static void register_iommu_cmd_buffer_in_mmio_space(struct amd_iommu *iommu)
     iommu_set_addr_lo_to_reg(&entry, addr_lo >> PAGE_SHIFT);
     writel(entry, iommu->mmio_base + IOMMU_CMD_BUFFER_BASE_LOW_OFFSET);
 
-    power_of2_entries = get_order_from_bytes(iommu->cmd_buffer.alloc_size) +
+    power_of2_entries =
+        get_order_from_bytes_ceil(iommu->cmd_buffer.alloc_size) +
         IOMMU_CMD_BUFFER_POWER_OF2_ENTRIES_PER_PAGE;
 
     entry = 0;
@@ -164,7 +165,7 @@  static void register_iommu_event_log_in_mmio_space(struct amd_iommu *iommu)
     iommu_set_addr_lo_to_reg(&entry, addr_lo >> PAGE_SHIFT);
     writel(entry, iommu->mmio_base + IOMMU_EVENT_LOG_BASE_LOW_OFFSET);
 
-    power_of2_entries = get_order_from_bytes(iommu->event_log.alloc_size) +
+    power_of2_entries = get_order_from_bytes_ceil(iommu->event_log.alloc_size) +
                         IOMMU_EVENT_LOG_POWER_OF2_ENTRIES_PER_PAGE;
 
     entry = 0;
@@ -192,7 +193,7 @@  static void register_iommu_ppr_log_in_mmio_space(struct amd_iommu *iommu)
     iommu_set_addr_lo_to_reg(&entry, addr_lo >> PAGE_SHIFT);
     writel(entry, iommu->mmio_base + IOMMU_PPR_LOG_BASE_LOW_OFFSET);
 
-    power_of2_entries = get_order_from_bytes(iommu->ppr_log.alloc_size) +
+    power_of2_entries = get_order_from_bytes_ceil(iommu->ppr_log.alloc_size) +
                         IOMMU_PPR_LOG_POWER_OF2_ENTRIES_PER_PAGE;
 
     entry = 0;
@@ -918,7 +919,7 @@  static void __init deallocate_buffer(void *buf, uint32_t sz)
     int order = 0;
     if ( buf )
     {
-        order = get_order_from_bytes(sz);
+        order = get_order_from_bytes_ceil(sz);
         __free_amd_iommu_tables(buf, order);
     }
 }
@@ -940,7 +941,7 @@  static void __init deallocate_ring_buffer(struct ring_buffer *ring_buf)
 static void * __init allocate_buffer(uint32_t alloc_size, const char *name)
 {
     void * buffer;
-    int order = get_order_from_bytes(alloc_size);
+    int order = get_order_from_bytes_ceil(alloc_size);
 
     buffer = __alloc_amd_iommu_tables(order);
 
@@ -963,8 +964,8 @@  static void * __init allocate_ring_buffer(struct ring_buffer *ring_buf,
 
     spin_lock_init(&ring_buf->lock);
     
-    ring_buf->alloc_size = PAGE_SIZE << get_order_from_bytes(entries *
-                                                             entry_size);
+    ring_buf->alloc_size = PAGE_SIZE << get_order_from_bytes_ceil(entries *
+                                                                  entry_size);
     ring_buf->entries = ring_buf->alloc_size / entry_size;
     ring_buf->buffer = allocate_buffer(ring_buf->alloc_size, name);
     return ring_buf->buffer;
@@ -1163,7 +1164,7 @@  static int __init amd_iommu_setup_device_table(
 
     /* allocate 'device table' on a 4K boundary */
     device_table.alloc_size = PAGE_SIZE <<
-                              get_order_from_bytes(
+                              get_order_from_bytes_ceil(
                               PAGE_ALIGN(ivrs_bdf_entries *
                               IOMMU_DEV_TABLE_ENTRY_SIZE));
     device_table.entries = device_table.alloc_size /
diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c
index 338d6b4..dd291a2 100644
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -460,7 +460,7 @@  int __init pci_ro_device(int seg, int bus, int devfn)
     {
         size_t sz = BITS_TO_LONGS(PCI_BDF(-1, -1, -1) + 1) * sizeof(long);
 
-        pseg->ro_map = alloc_xenheap_pages(get_order_from_bytes(sz), 0);
+        pseg->ro_map = alloc_xenheap_pages(get_order_from_bytes_ceil(sz), 0);
         if ( !pseg->ro_map )
             return -ENOMEM;
         memset(pseg->ro_map, 0, sz);
diff --git a/xen/include/asm-x86/flushtlb.h b/xen/include/asm-x86/flushtlb.h
index 2e7ed6b..45d6b0a 100644
--- a/xen/include/asm-x86/flushtlb.h
+++ b/xen/include/asm-x86/flushtlb.h
@@ -125,7 +125,7 @@  static inline int invalidate_dcache_va_range(const void *p,
 static inline int clean_and_invalidate_dcache_va_range(const void *p,
                                                        unsigned long size)
 {
-    unsigned int order = get_order_from_bytes(size);
+    unsigned int order = get_order_from_bytes_ceil(size);
     /* sub-page granularity support needs to be added if necessary */
     flush_area_local(p, FLUSH_CACHE|FLUSH_ORDER(order));
     return 0;
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 76fbb82..5357a08 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -519,7 +519,7 @@  page_list_splice(struct page_list_head *list, struct page_list_head *head)
     list_for_each_entry_safe_reverse(pos, tmp, head, list)
 #endif
 
-static inline unsigned int get_order_from_bytes(paddr_t size)
+static inline unsigned int get_order_from_bytes_ceil(paddr_t size)
 {
     unsigned int order;
 
@@ -530,6 +530,16 @@  static inline unsigned int get_order_from_bytes(paddr_t size)
     return order;
 }
 
+static inline unsigned int get_order_from_bytes_floor(paddr_t size)
+{
+    unsigned int order;
+
+    size >>= PAGE_SHIFT;
+    for ( order = 0; size >= (1 << (order + 1)); order++ );
+
+    return order;
+}
+
 static inline unsigned int get_order_from_pages(unsigned long nr_pages)
 {
     unsigned int order;