@@ -1158,6 +1158,8 @@ static int transfer_pages_to_heap(struct mem_hotadd_info *info)
static int mem_hotadd_check(unsigned long spfn, unsigned long epfn)
{
unsigned long s, e, length, sidx, eidx;
+ paddr_t mem_base = pfn_to_paddr(spfn);
+ unsigned long mem_npages = epfn - spfn;
if ( (spfn >= epfn) )
return 0;
@@ -1168,7 +1170,7 @@ static int mem_hotadd_check(unsigned long spfn, unsigned long epfn)
if ( (spfn | epfn) & ((1UL << PAGETABLE_ORDER) - 1) )
return 0;
- if ( (spfn | epfn) & pfn_hole_mask )
+ if ( !pdx_is_region_compressible(mem_base, mem_npages) )
return 0;
/* Make sure the new range is not present now */
@@ -1207,7 +1209,7 @@ static int mem_hotadd_check(unsigned long spfn, unsigned long epfn)
length += (e - s) * sizeof(struct page_info);
- if ((length >> PAGE_SHIFT) > (epfn - spfn))
+ if ((length >> PAGE_SHIFT) > mem_npages)
return 0;
return 1;
@@ -14,6 +14,7 @@
#include <xen/multiboot.h>
#include <xen/param.h>
#include <xen/pci_regs.h>
+#include <xen/pdx.h>
#include <xen/pfn.h>
#if EFI_PAGE_SIZE != PAGE_SIZE
# error Cannot use xen/pfn.h here!
@@ -1645,9 +1646,11 @@ static __init void copy_mapping(unsigned long mfn, unsigned long end,
static bool __init cf_check ram_range_valid(unsigned long smfn, unsigned long emfn)
{
+ paddr_t ram_base = pfn_to_paddr(smfn);
+ unsigned long ram_npages = emfn - smfn;
unsigned long sz = pfn_to_pdx(emfn - 1) / PDX_GROUP_COUNT + 1;
- return !(smfn & pfn_hole_mask) &&
+ return pdx_is_region_compressible(ram_base, ram_npages) &&
find_next_bit(pdx_group_valid, sz,
pfn_to_pdx(smfn) / PDX_GROUP_COUNT) < sz;
}
@@ -1681,6 +1684,8 @@ void __init efi_init_memory(void)
u64 len = desc->NumberOfPages << EFI_PAGE_SHIFT;
unsigned long smfn, emfn;
unsigned int prot = PAGE_HYPERVISOR_RWX;
+ paddr_t mem_base;
+ unsigned long mem_npages;
printk(XENLOG_INFO " %013" PRIx64 "-%013" PRIx64
" type=%u attr=%016" PRIx64 "\n",
@@ -1732,6 +1737,9 @@ void __init efi_init_memory(void)
smfn = PFN_DOWN(desc->PhysicalStart);
emfn = PFN_UP(desc->PhysicalStart + len);
+ mem_base = pfn_to_paddr(smfn);
+ mem_npages = emfn - smfn;
+
if ( desc->Attribute & EFI_MEMORY_WB )
prot |= _PAGE_WB;
else if ( desc->Attribute & EFI_MEMORY_WT )
@@ -1759,8 +1767,7 @@ void __init efi_init_memory(void)
prot |= _PAGE_NX;
if ( pfn_to_pdx(emfn - 1) < (DIRECTMAP_SIZE >> PAGE_SHIFT) &&
- !(smfn & pfn_hole_mask) &&
- !((smfn ^ (emfn - 1)) & ~pfn_pdx_bottom_mask) )
+ pdx_is_region_compressible(mem_base, mem_npages))
{
if ( (unsigned long)mfn_to_virt(emfn - 1) >= HYPERVISOR_VIRT_END )
prot &= ~_PAGE_GLOBAL;
@@ -88,7 +88,7 @@ bool __mfn_valid(unsigned long mfn)
}
/* Sets all bits from the most-significant 1-bit down to the LSB */
-static uint64_t __init fill_mask(uint64_t mask)
+static uint64_t fill_mask(uint64_t mask)
{
while (mask & (mask + 1))
mask |= mask + 1;
@@ -96,6 +96,12 @@ static uint64_t __init fill_mask(uint64_t mask)
return mask;
}
+bool pdx_is_region_compressible(paddr_t base, unsigned long npages)
+{
+ return !(paddr_to_pfn(base) & pfn_hole_mask) &&
+ !(pdx_region_mask(base, npages * PAGE_SIZE) & ~ma_va_bottom_mask);
+}
+
/* We don't want to compress the low MAX_ORDER bits of the addresses. */
uint64_t __init pdx_init_mask(uint64_t base_addr)
{
@@ -103,7 +109,7 @@ uint64_t __init pdx_init_mask(uint64_t base_addr)
(uint64_t)1 << (MAX_ORDER + PAGE_SHIFT)) - 1);
}
-uint64_t __init pdx_region_mask(uint64_t base, uint64_t len)
+uint64_t pdx_region_mask(uint64_t base, uint64_t len)
{
/*
* We say a bit "moves" in a range if there exist 2 addresses in that
@@ -79,6 +79,15 @@ extern unsigned long pfn_top_mask, ma_top_mask;
(sizeof(*frame_table) & -sizeof(*frame_table)))
extern unsigned long pdx_group_valid[];
+/**
+ * Validate a region's compatibility with the current compression runtime
+ *
+ * @param base Base address of the region
+ * @param npages Number of PAGE_SIZE-sized pages in the region
+ * @return True iff the region can be used with the current compression
+ */
+bool pdx_is_region_compressible(paddr_t base, unsigned long npages);
+
/**
* Calculates a mask covering "moving" bits of all addresses of a region
*