@@ -661,7 +661,7 @@ void arch_domain_destroy(struct domain *d)
free_xenheap_page(d->shared_info);
#ifdef CONFIG_ACPI
free_xenheap_pages(d->arch.efi_acpi_table,
- get_order_from_bytes(d->arch.efi_acpi_len));
+ get_order_from_bytes_ceil(d->arch.efi_acpi_len));
#endif
domain_io_free(d);
}
@@ -73,14 +73,8 @@ struct vcpu *__init alloc_dom0_vcpu0(struct domain *dom0)
static unsigned int get_11_allocation_size(paddr_t size)
{
- /*
- * get_order_from_bytes returns the order greater than or equal to
- * the given size, but we need less than or equal. Adding one to
- * the size pushes an evenly aligned size into the next order, so
- * we can then unconditionally subtract 1 from the order which is
- * returned.
- */
- return get_order_from_bytes(size + 1) - 1;
+
+ return get_order_from_bytes_floor(size);
}
/*
@@ -238,8 +232,8 @@ fail:
static void allocate_memory(struct domain *d, struct kernel_info *kinfo)
{
const unsigned int min_low_order =
- get_order_from_bytes(min_t(paddr_t, dom0_mem, MB(128)));
- const unsigned int min_order = get_order_from_bytes(MB(4));
+ get_order_from_bytes_ceil(min_t(paddr_t, dom0_mem, MB(128)));
+ const unsigned int min_order = get_order_from_bytes_ceil(MB(4));
struct page_info *pg;
unsigned int order = get_11_allocation_size(kinfo->unassigned_mem);
int i;
@@ -1828,7 +1822,7 @@ static int prepare_acpi(struct domain *d, struct kernel_info *kinfo)
if ( rc != 0 )
return rc;
- order = get_order_from_bytes(d->arch.efi_acpi_len);
+ order = get_order_from_bytes_ceil(d->arch.efi_acpi_len);
d->arch.efi_acpi_table = alloc_xenheap_pages(order, 0);
if ( d->arch.efi_acpi_table == NULL )
{
@@ -291,7 +291,7 @@ static __init int kernel_decompress(struct bootmodule *mod)
return -EFAULT;
output_size = output_length(input, size);
- kernel_order_out = get_order_from_bytes(output_size);
+ kernel_order_out = get_order_from_bytes_ceil(output_size);
pages = alloc_domheap_pages(NULL, kernel_order_out, 0);
if ( pages == NULL )
{
@@ -463,7 +463,7 @@ static int kernel_elf_probe(struct kernel_info *info,
memset(&info->elf.elf, 0, sizeof(info->elf.elf));
- info->elf.kernel_order = get_order_from_bytes(size);
+ info->elf.kernel_order = get_order_from_bytes_ceil(size);
info->elf.kernel_img = alloc_xenheap_pages(info->elf.kernel_order, 0);
if ( info->elf.kernel_img == NULL )
panic("Cannot allocate temporary buffer for kernel");
@@ -7,7 +7,8 @@
unsigned long __per_cpu_offset[NR_CPUS];
#define INVALID_PERCPU_AREA (-(long)__per_cpu_start)
-#define PERCPU_ORDER (get_order_from_bytes(__per_cpu_data_end-__per_cpu_start))
+#define PERCPU_ORDER \
+ (get_order_from_bytes_ceil(__per_cpu_data_end-__per_cpu_start))
void __init percpu_init_areas(void)
{
@@ -236,7 +236,7 @@ static unsigned int __init noinline _domain_struct_bits(void)
struct domain *alloc_domain_struct(void)
{
struct domain *d;
- unsigned int order = get_order_from_bytes(sizeof(*d));
+ unsigned int order = get_order_from_bytes_ceil(sizeof(*d));
#ifdef CONFIG_BIGMEM
const unsigned int bits = 0;
#else
@@ -290,7 +290,7 @@ static unsigned long __init compute_dom0_nr_pages(
/* Reserve memory for further dom0 vcpu-struct allocations... */
avail -= (d->max_vcpus - 1UL)
- << get_order_from_bytes(sizeof(struct vcpu));
+ << get_order_from_bytes_ceil(sizeof(struct vcpu));
/* ...and compat_l4's, if needed. */
if ( is_pv_32bit_domain(d) )
avail -= d->max_vcpus - 1;
@@ -1172,7 +1172,7 @@ static int __init construct_dom0_pv(
count = v_end - v_start;
if ( vinitrd_start )
count -= PAGE_ALIGN(initrd_len);
- order = get_order_from_bytes(count);
+ order = get_order_from_bytes_ceil(count);
if ( (1UL << order) + PFN_UP(initrd_len) > nr_pages )
panic("Domain 0 allocation is too small for kernel image");
@@ -101,13 +101,13 @@ int nsvm_vcpu_initialise(struct vcpu *v)
struct nestedvcpu *nv = &vcpu_nestedhvm(v);
struct nestedsvm *svm = &vcpu_nestedsvm(v);
- msrpm = alloc_xenheap_pages(get_order_from_bytes(MSRPM_SIZE), 0);
+ msrpm = alloc_xenheap_pages(get_order_from_bytes_ceil(MSRPM_SIZE), 0);
svm->ns_cached_msrpm = msrpm;
if (msrpm == NULL)
goto err;
memset(msrpm, 0x0, MSRPM_SIZE);
- msrpm = alloc_xenheap_pages(get_order_from_bytes(MSRPM_SIZE), 0);
+ msrpm = alloc_xenheap_pages(get_order_from_bytes_ceil(MSRPM_SIZE), 0);
svm->ns_merged_msrpm = msrpm;
if (msrpm == NULL)
goto err;
@@ -141,12 +141,12 @@ void nsvm_vcpu_destroy(struct vcpu *v)
if (svm->ns_cached_msrpm) {
free_xenheap_pages(svm->ns_cached_msrpm,
- get_order_from_bytes(MSRPM_SIZE));
+ get_order_from_bytes_ceil(MSRPM_SIZE));
svm->ns_cached_msrpm = NULL;
}
if (svm->ns_merged_msrpm) {
free_xenheap_pages(svm->ns_merged_msrpm,
- get_order_from_bytes(MSRPM_SIZE));
+ get_order_from_bytes_ceil(MSRPM_SIZE));
svm->ns_merged_msrpm = NULL;
}
hvm_unmap_guest_frame(nv->nv_vvmcx, 1);
@@ -98,7 +98,8 @@ static int construct_vmcb(struct vcpu *v)
CR_INTERCEPT_CR8_WRITE);
/* I/O and MSR permission bitmaps. */
- arch_svm->msrpm = alloc_xenheap_pages(get_order_from_bytes(MSRPM_SIZE), 0);
+ arch_svm->msrpm = alloc_xenheap_pages(
+ get_order_from_bytes_ceil(MSRPM_SIZE), 0);
if ( arch_svm->msrpm == NULL )
return -ENOMEM;
memset(arch_svm->msrpm, 0xff, MSRPM_SIZE);
@@ -268,7 +269,7 @@ void svm_destroy_vmcb(struct vcpu *v)
if ( arch_svm->msrpm != NULL )
{
free_xenheap_pages(
- arch_svm->msrpm, get_order_from_bytes(MSRPM_SIZE));
+ arch_svm->msrpm, get_order_from_bytes_ceil(MSRPM_SIZE));
arch_svm->msrpm = NULL;
}
@@ -14,7 +14,8 @@ unsigned long __per_cpu_offset[NR_CPUS];
* context of PV guests.
*/
#define INVALID_PERCPU_AREA (0x8000000000000000L - (long)__per_cpu_start)
-#define PERCPU_ORDER (get_order_from_bytes(__per_cpu_data_end-__per_cpu_start))
+#define PERCPU_ORDER \
+ (get_order_from_bytes_ceil(__per_cpu_data_end-__per_cpu_start))
void __init percpu_init_areas(void)
{
@@ -669,7 +669,7 @@ static void cpu_smpboot_free(unsigned int cpu)
free_xenheap_pages(per_cpu(compat_gdt_table, cpu), order);
- order = get_order_from_bytes(IDT_ENTRIES * sizeof(idt_entry_t));
+ order = get_order_from_bytes_ceil(IDT_ENTRIES * sizeof(idt_entry_t));
free_xenheap_pages(idt_tables[cpu], order);
idt_tables[cpu] = NULL;
@@ -710,7 +710,7 @@ static int cpu_smpboot_alloc(unsigned int cpu)
memcpy(gdt, boot_cpu_compat_gdt_table, NR_RESERVED_GDT_PAGES * PAGE_SIZE);
gdt[PER_CPU_GDT_ENTRY - FIRST_RESERVED_GDT_ENTRY].a = cpu;
- order = get_order_from_bytes(IDT_ENTRIES * sizeof(idt_entry_t));
+ order = get_order_from_bytes_ceil(IDT_ENTRIES * sizeof(idt_entry_t));
idt_tables[cpu] = alloc_xenheap_pages(order, memflags);
if ( idt_tables[cpu] == NULL )
goto oom;
@@ -556,7 +556,7 @@ static int __init kexec_init(void)
crash_heap_size = PAGE_ALIGN(crash_heap_size);
crash_heap_current = alloc_xenheap_pages(
- get_order_from_bytes(crash_heap_size),
+ get_order_from_bytes_ceil(crash_heap_size),
MEMF_bits(crashinfo_maxaddr_bits) );
if ( ! crash_heap_current )
@@ -553,7 +553,7 @@ static unsigned long init_node_heap(int node, unsigned long mfn,
*use_tail = 0;
}
#endif
- else if ( get_order_from_bytes(sizeof(**_heap)) ==
+ else if ( get_order_from_bytes_ceil(sizeof(**_heap)) ==
get_order_from_pages(needed) )
{
_heap[node] = alloc_xenheap_pages(get_order_from_pages(needed), 0);
@@ -292,7 +292,7 @@ int __init tmem_init(void)
unsigned int cpu;
dstmem_order = get_order_from_pages(LZO_DSTMEM_PAGES);
- workmem_order = get_order_from_bytes(LZO1X_1_MEM_COMPRESS);
+ workmem_order = get_order_from_bytes_ceil(LZO1X_1_MEM_COMPRESS);
for_each_online_cpu ( cpu )
{
@@ -298,7 +298,7 @@ struct xmem_pool *xmem_pool_create(
BUG_ON(max_size && (max_size < init_size));
pool_bytes = ROUNDUP_SIZE(sizeof(*pool));
- pool_order = get_order_from_bytes(pool_bytes);
+ pool_order = get_order_from_bytes_ceil(pool_bytes);
pool = (void *)alloc_xenheap_pages(pool_order, 0);
if ( pool == NULL )
@@ -371,7 +371,7 @@ void xmem_pool_destroy(struct xmem_pool *pool)
spin_unlock(&pool_list_lock);
pool_bytes = ROUNDUP_SIZE(sizeof(*pool));
- pool_order = get_order_from_bytes(pool_bytes);
+ pool_order = get_order_from_bytes_ceil(pool_bytes);
free_xenheap_pages(pool,pool_order);
}
@@ -530,7 +530,7 @@ static void *xmalloc_whole_pages(unsigned long size, unsigned long align)
unsigned int i, order;
void *res, *p;
- order = get_order_from_bytes(max(align, size));
+ order = get_order_from_bytes_ceil(max(align, size));
res = alloc_xenheap_pages(order, 0);
if ( res == NULL )
@@ -301,7 +301,7 @@ static void dump_console_ring_key(unsigned char key)
/* create a buffer in which we'll copy the ring in the correct
order and NUL terminate */
- order = get_order_from_bytes(conring_size + 1);
+ order = get_order_from_bytes_ceil(conring_size + 1);
buf = alloc_xenheap_pages(order, 0);
if ( buf == NULL )
{
@@ -759,7 +759,7 @@ void __init console_init_ring(void)
if ( !opt_conring_size )
return;
- order = get_order_from_bytes(max(opt_conring_size, conring_size));
+ order = get_order_from_bytes_ceil(max(opt_conring_size, conring_size));
memflags = MEMF_bits(crashinfo_maxaddr_bits);
while ( (ring = alloc_xenheap_pages(order, memflags)) == NULL )
{
@@ -1080,7 +1080,7 @@ static int __init debugtrace_init(void)
if ( bytes == 0 )
return 0;
- order = get_order_from_bytes(bytes);
+ order = get_order_from_bytes_ceil(bytes);
debugtrace_buf = alloc_xenheap_pages(order, 0);
ASSERT(debugtrace_buf != NULL);
@@ -577,7 +577,7 @@ void __init serial_async_transmit(struct serial_port *port)
while ( serial_txbufsz & (serial_txbufsz - 1) )
serial_txbufsz &= serial_txbufsz - 1;
port->txbuf = alloc_xenheap_pages(
- get_order_from_bytes(serial_txbufsz), 0);
+ get_order_from_bytes_ceil(serial_txbufsz), 0);
}
/*
@@ -136,7 +136,8 @@ static void register_iommu_cmd_buffer_in_mmio_space(struct amd_iommu *iommu)
iommu_set_addr_lo_to_reg(&entry, addr_lo >> PAGE_SHIFT);
writel(entry, iommu->mmio_base + IOMMU_CMD_BUFFER_BASE_LOW_OFFSET);
- power_of2_entries = get_order_from_bytes(iommu->cmd_buffer.alloc_size) +
+ power_of2_entries =
+ get_order_from_bytes_ceil(iommu->cmd_buffer.alloc_size) +
IOMMU_CMD_BUFFER_POWER_OF2_ENTRIES_PER_PAGE;
entry = 0;
@@ -164,7 +165,7 @@ static void register_iommu_event_log_in_mmio_space(struct amd_iommu *iommu)
iommu_set_addr_lo_to_reg(&entry, addr_lo >> PAGE_SHIFT);
writel(entry, iommu->mmio_base + IOMMU_EVENT_LOG_BASE_LOW_OFFSET);
- power_of2_entries = get_order_from_bytes(iommu->event_log.alloc_size) +
+ power_of2_entries = get_order_from_bytes_ceil(iommu->event_log.alloc_size) +
IOMMU_EVENT_LOG_POWER_OF2_ENTRIES_PER_PAGE;
entry = 0;
@@ -192,7 +193,7 @@ static void register_iommu_ppr_log_in_mmio_space(struct amd_iommu *iommu)
iommu_set_addr_lo_to_reg(&entry, addr_lo >> PAGE_SHIFT);
writel(entry, iommu->mmio_base + IOMMU_PPR_LOG_BASE_LOW_OFFSET);
- power_of2_entries = get_order_from_bytes(iommu->ppr_log.alloc_size) +
+ power_of2_entries = get_order_from_bytes_ceil(iommu->ppr_log.alloc_size) +
IOMMU_PPR_LOG_POWER_OF2_ENTRIES_PER_PAGE;
entry = 0;
@@ -918,7 +919,7 @@ static void __init deallocate_buffer(void *buf, uint32_t sz)
int order = 0;
if ( buf )
{
- order = get_order_from_bytes(sz);
+ order = get_order_from_bytes_ceil(sz);
__free_amd_iommu_tables(buf, order);
}
}
@@ -940,7 +941,7 @@ static void __init deallocate_ring_buffer(struct ring_buffer *ring_buf)
static void * __init allocate_buffer(uint32_t alloc_size, const char *name)
{
void * buffer;
- int order = get_order_from_bytes(alloc_size);
+ int order = get_order_from_bytes_ceil(alloc_size);
buffer = __alloc_amd_iommu_tables(order);
@@ -963,8 +964,8 @@ static void * __init allocate_ring_buffer(struct ring_buffer *ring_buf,
spin_lock_init(&ring_buf->lock);
- ring_buf->alloc_size = PAGE_SIZE << get_order_from_bytes(entries *
- entry_size);
+ ring_buf->alloc_size = PAGE_SIZE << get_order_from_bytes_ceil(entries *
+ entry_size);
ring_buf->entries = ring_buf->alloc_size / entry_size;
ring_buf->buffer = allocate_buffer(ring_buf->alloc_size, name);
return ring_buf->buffer;
@@ -1163,7 +1164,7 @@ static int __init amd_iommu_setup_device_table(
/* allocate 'device table' on a 4K boundary */
device_table.alloc_size = PAGE_SIZE <<
- get_order_from_bytes(
+ get_order_from_bytes_ceil(
PAGE_ALIGN(ivrs_bdf_entries *
IOMMU_DEV_TABLE_ENTRY_SIZE));
device_table.entries = device_table.alloc_size /
@@ -460,7 +460,7 @@ int __init pci_ro_device(int seg, int bus, int devfn)
{
size_t sz = BITS_TO_LONGS(PCI_BDF(-1, -1, -1) + 1) * sizeof(long);
- pseg->ro_map = alloc_xenheap_pages(get_order_from_bytes(sz), 0);
+ pseg->ro_map = alloc_xenheap_pages(get_order_from_bytes_ceil(sz), 0);
if ( !pseg->ro_map )
return -ENOMEM;
memset(pseg->ro_map, 0, sz);
@@ -125,7 +125,7 @@ static inline int invalidate_dcache_va_range(const void *p,
static inline int clean_and_invalidate_dcache_va_range(const void *p,
unsigned long size)
{
- unsigned int order = get_order_from_bytes(size);
+ unsigned int order = get_order_from_bytes_ceil(size);
/* sub-page granularity support needs to be added if necessary */
flush_area_local(p, FLUSH_CACHE|FLUSH_ORDER(order));
return 0;
@@ -519,7 +519,7 @@ page_list_splice(struct page_list_head *list, struct page_list_head *head)
list_for_each_entry_safe_reverse(pos, tmp, head, list)
#endif
-static inline unsigned int get_order_from_bytes(paddr_t size)
+static inline unsigned int get_order_from_bytes_ceil(paddr_t size)
{
unsigned int order;
@@ -530,6 +530,16 @@ static inline unsigned int get_order_from_bytes(paddr_t size)
return order;
}
+static inline unsigned int get_order_from_bytes_floor(paddr_t size)
+{
+ unsigned int order;
+
+ size >>= PAGE_SHIFT;
+ for ( order = 0; size >= (1 << (order + 1)); order++ );
+
+ return order;
+}
+
static inline unsigned int get_order_from_pages(unsigned long nr_pages)
{
unsigned int order;
... and introduce a floor variant. Signed-off-by: Roger Pau Monné <roger.pau@citrix.com> --- Cc: Stefano Stabellini <sstabellini@kernel.org> Cc: Julien Grall <julien.grall@arm.com> Cc: Jan Beulich <jbeulich@suse.com> Cc: Andrew Cooper <andrew.cooper3@citrix.com> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> --- xen/arch/arm/domain.c | 2 +- xen/arch/arm/domain_build.c | 16 +++++----------- xen/arch/arm/kernel.c | 4 ++-- xen/arch/arm/percpu.c | 3 ++- xen/arch/x86/domain.c | 2 +- xen/arch/x86/domain_build.c | 4 ++-- xen/arch/x86/hvm/svm/nestedsvm.c | 8 ++++---- xen/arch/x86/hvm/svm/vmcb.c | 5 +++-- xen/arch/x86/percpu.c | 3 ++- xen/arch/x86/smpboot.c | 4 ++-- xen/common/kexec.c | 2 +- xen/common/page_alloc.c | 2 +- xen/common/tmem_xen.c | 2 +- xen/common/xmalloc_tlsf.c | 6 +++--- xen/drivers/char/console.c | 6 +++--- xen/drivers/char/serial.c | 2 +- xen/drivers/passthrough/amd/iommu_init.c | 17 +++++++++-------- xen/drivers/passthrough/pci.c | 2 +- xen/include/asm-x86/flushtlb.h | 2 +- xen/include/xen/mm.h | 12 +++++++++++- 20 files changed, 56 insertions(+), 48 deletions(-)