@@ -5903,7 +5903,7 @@ int modify_xen_mappings(unsigned long s,
{
l3_pgentry_t *pl3e = virt_to_xen_l3e(v);
- if ( !(l3e_get_flags(*pl3e) & _PAGE_PRESENT) )
+ if ( !pl3e || !(l3e_get_flags(*pl3e) & _PAGE_PRESENT) )
{
/* Confirm the caller isn't trying to create new mappings. */
ASSERT(!(nf & _PAGE_PRESENT));
@@ -5931,6 +5931,8 @@ int modify_xen_mappings(unsigned long s,
/* PAGE1GB: shatter the superpage and fall through. */
pl2e = alloc_xen_pagetable();
+ if ( !pl2e )
+ return -ENOMEM;
for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
l2e_write(pl2e + i,
l2e_from_pfn(l3e_get_pfn(*pl3e) +
@@ -5951,7 +5953,11 @@ int modify_xen_mappings(unsigned long s,
free_xen_pagetable(pl2e);
}
- pl2e = virt_to_xen_l2e(v);
+ /*
+ * The L3 entry has been verified to be present, and we've dealt with
+ * 1G pages as well, so the L2 table cannot require allocation.
+ */
+ pl2e = l3e_to_l2e(*pl3e) + l2_table_offset(v);
if ( !(l2e_get_flags(*pl2e) & _PAGE_PRESENT) )
{
@@ -5980,6 +5986,8 @@ int modify_xen_mappings(unsigned long s,
{
/* PSE: shatter the superpage and try again. */
pl1e = alloc_xen_pagetable();
+ if ( !pl1e )
+ return -ENOMEM;
for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
l1e_write(&pl1e[i],
l1e_from_pfn(l2e_get_pfn(*pl2e) + i,
@@ -6003,7 +6011,11 @@ int modify_xen_mappings(unsigned long s,
{
l1_pgentry_t nl1e;
- /* Ordinary 4kB mapping. */
+ /*
+ * Ordinary 4kB mapping: The L2 entry has been verified to be
+ * present, and we've dealt with 2M pages as well, so the L1 table
+ * cannot require allocation.
+ */
pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(v);
/* Confirm the caller isn't trying to create new mappings. */
Reported-by: Julien Grall <julien.grall@arm.com> Signed-off-by: Jan Beulich <jbeulich@suse.com> --- v2: Comment the pl2e related ASSERT().