@@ -16,6 +16,8 @@
* along with GRUB. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <grub/i386/memory.h>
+#include <grub/i386/types.h>
#include <grub/symbol.h>
#include <grub/xen.h>
@@ -23,78 +25,86 @@
VARIABLE(grub_relocator_xen_remap_start)
LOCAL(base):
- /* mov imm32, %ebx */
+ /* Remap the remapper to it's new address. */
+ /* mov imm32, %ebx - %ebx: new virtual address of remapper */
.byte 0xbb
VARIABLE(grub_relocator_xen_remapper_virt)
.long 0
- /* mov imm32, %ecx */
+ /* mov imm32, %ecx - %ecx: low part of page table entry */
.byte 0xb9
VARIABLE(grub_relocator_xen_remapper_map)
.long 0
- /* mov imm32, %edx */
+ /* mov imm32, %edx - %edx: high part of page table entry */
.byte 0xba
VARIABLE(grub_relocator_xen_remapper_map_high)
.long 0
- movl %ebx, %ebp
+ movl %ebx, %ebp /* %ebx is clobbered by hypercall */
- movl $2, %esi
+ movl $UVMF_INVLPG, %esi /* esi: flags (inv. single entry) */
movl $__HYPERVISOR_update_va_mapping, %eax
int $0x82
movl %ebp, %ebx
addl $(LOCAL(cont) - LOCAL(base)), %ebx
- jmp *%ebx
+ jmp *%ebx /* Continue with new virtual address */
LOCAL(cont):
- xorl %eax, %eax
- movl %eax, %ebp
+ /* Modify mappings of new page tables to be read-only. */
+ /* mov imm32, %eax */
+ .byte 0xb8
+VARIABLE(grub_relocator_xen_paging_areas_addr)
+ .long 0
+ movl %eax, %ebx
1:
+ movl 0(%ebx), %ebp /* Get start pfn of the current area */
+ movl GRUB_TARGET_SIZEOF_LONG(%ebx), %ecx /* Get # of pg tables */
+ testl %ecx, %ecx /* 0 -> last area reached */
+ jz 3f
+ addl $(2 * GRUB_TARGET_SIZEOF_LONG), %ebx
+ movl %ebx, %esp /* Save current area pointer */
+2:
+ movl %ecx, %edi
/* mov imm32, %eax */
.byte 0xb8
VARIABLE(grub_relocator_xen_mfn_list)
.long 0
- movl %eax, %edi
- movl %ebp, %eax
- movl 0(%edi, %eax, 4), %ecx
-
- /* mov imm32, %ebx */
- .byte 0xbb
-VARIABLE(grub_relocator_xen_paging_start)
- .long 0
- shll $12, %eax
- addl %eax, %ebx
+ movl 0(%eax, %ebp, 4), %ecx /* mfn */
+ movl %ebp, %ebx
+ shll $PAGE_SHIFT, %ebx /* virtual address (1:1 mapping) */
movl %ecx, %edx
- shll $12, %ecx
- shrl $20, %edx
- orl $5, %ecx
- movl $2, %esi
+ shll $PAGE_SHIFT, %ecx /* prepare pte low part */
+ shrl $(32 - PAGE_SHIFT), %edx /* pte high part */
+ orl $(GRUB_PAGE_PRESENT | GRUB_PAGE_USER), %ecx /* pte low */
+ movl $UVMF_INVLPG, %esi
movl $__HYPERVISOR_update_va_mapping, %eax
- int $0x82
+ int $0x82 /* parameters: eax, ebx, ecx, edx, esi */
- incl %ebp
- /* mov imm32, %ecx */
- .byte 0xb9
-VARIABLE(grub_relocator_xen_paging_size)
- .long 0
- cmpl %ebp, %ecx
+ incl %ebp /* next pfn */
+ movl %edi, %ecx
- ja 1b
+ loop 2b
+ mov %esp, %ebx /* restore area poniter */
+ jmp 1b
+
+3:
+ /* Switch page tables: pin new L3 pt, load cr3, unpin old L3. */
/* mov imm32, %ebx */
.byte 0xbb
VARIABLE(grub_relocator_xen_mmu_op_addr)
.long 0
- movl $3, %ecx
- movl $0, %edx
- movl $0x7FF0, %esi
+ movl $3, %ecx /* 3 mmu ops */
+ movl $0, %edx /* pdone (not used) */
+ movl $DOMID_SELF, %esi
movl $__HYPERVISOR_mmuext_op, %eax
int $0x82
+ /* Continue in virtual kernel mapping. */
/* mov imm32, %eax */
.byte 0xb8
VARIABLE(grub_relocator_xen_remap_continue)
@@ -102,6 +112,9 @@ VARIABLE(grub_relocator_xen_remap_continue)
jmp *%eax
+VARIABLE(grub_relocator_xen_paging_areas)
+ .long 0, 0, 0, 0, 0, 0, 0, 0
+
VARIABLE(grub_relocator_xen_mmu_op)
.space 256
@@ -109,6 +122,7 @@ VARIABLE(grub_relocator_xen_remap_end)
VARIABLE(grub_relocator_xen_start)
+ /* Unmap old remapper area. */
/* mov imm32, %eax */
.byte 0xb8
VARIABLE(grub_relocator_xen_remapper_virt2)
@@ -116,14 +130,14 @@ VARIABLE(grub_relocator_xen_remapper_virt2)
movl %eax, %edi
- xorl %ecx, %ecx
+ xorl %ecx, %ecx /* Invalid pte */
xorl %edx, %edx
- movl $2, %esi
+ movl $UVMF_INVLPG, %esi
movl $__HYPERVISOR_update_va_mapping, %eax
int $0x82
-
+ /* Prepare registers for starting kernel. */
/* mov imm32, %eax */
.byte 0xb8
VARIABLE(grub_relocator_xen_stack)
@@ -145,6 +159,7 @@ VARIABLE(grub_relocator_xen_start_info)
VARIABLE(grub_relocator_xen_entry_point)
.long 0
+ /* Now start the new kernel. */
jmp *%eax
VARIABLE(grub_relocator_xen_end)
@@ -16,95 +16,85 @@
* along with GRUB. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <grub/x86_64/memory.h>
+#include <grub/x86_64/types.h>
#include <grub/symbol.h>
#include <grub/xen.h>
+/* Macro to load an imm64 value stored by the C-part into %rax: */
+#define MOV_IMM64_RAX(var) .byte 0x48, 0xb8; VARIABLE(var); .quad 0
+
.p2align 4 /* force 16-byte alignment */
VARIABLE(grub_relocator_xen_remap_start)
LOCAL(base):
- /* mov imm64, %rax */
- .byte 0x48
- .byte 0xb8
-VARIABLE(grub_relocator_xen_remapper_virt)
- .quad 0
+ /* Remap the remapper to it's new address. */
+ MOV_IMM64_RAX(grub_relocator_xen_remapper_virt)
- movq %rax, %rdi
- movq %rax, %rbx
+ movq %rax, %rdi /* %rdi: new virtual address of remapper */
+ movq %rax, %rbx /* Remember new virtual address */
- /* mov imm64, %rax */
- .byte 0x48
- .byte 0xb8
-VARIABLE(grub_relocator_xen_remapper_map)
- .quad 0
+ MOV_IMM64_RAX(grub_relocator_xen_remapper_map)
- movq %rax, %rsi
+ movq %rax, %rsi /* %rsi: page table entry */
- movq $2, %rdx
+ movq $UVMF_INVLPG, %rdx /* %rdx: flags (inv. single entry) */
movq $__HYPERVISOR_update_va_mapping, %rax
- syscall
+ syscall /* Do the remap operation */
addq $(LOCAL(cont) - LOCAL(base)), %rbx
- jmp *%rbx
+ jmp *%rbx /* Continue with new virtual address */
LOCAL(cont):
-
- /* mov imm64, %rcx */
- .byte 0x48
- .byte 0xb9
-VARIABLE(grub_relocator_xen_paging_size)
- .quad 0
+ /* Modify mappings of new page tables to be read-only. */
+ MOV_IMM64_RAX(grub_relocator_xen_mfn_list)
- /* mov imm64, %rax */
- .byte 0x48
- .byte 0xb8
-VARIABLE(grub_relocator_xen_paging_start)
- .quad 0
+ movq %rax, %rbx /* %rbx is the base of the p2m list */
+ leaq EXT_C(grub_relocator_xen_paging_areas) (%rip), %r8
- movq %rax, %r12
-
- /* mov imm64, %rax */
- .byte 0x48
- .byte 0xb8
-VARIABLE(grub_relocator_xen_mfn_list)
- .quad 0
-
- movq %rax, %rsi
1:
+ movq 0(%r8), %r12 /* Get start pfn of the current area */
+ movq GRUB_TARGET_SIZEOF_LONG(%r8), %rcx /* Get # of pg tables */
+ testq %rcx, %rcx /* 0 -> last area reached */
+ jz 3f
+2:
movq %r12, %rdi
- movq %rsi, %rbx
- movq 0(%rsi), %rsi
- shlq $12, %rsi
- orq $5, %rsi
- movq $2, %rdx
- movq %rcx, %r9
+ shlq $PAGE_SHIFT, %rdi /* virtual address (1:1 mapping) */
+ movq (%rbx, %r12, 8), %rsi /* mfn */
+ shlq $PAGE_SHIFT, %rsi
+ orq $(GRUB_PAGE_PRESENT | GRUB_PAGE_USER), %rsi /* Build pte */
+ movq $UVMF_INVLPG, %rdx
+ movq %rcx, %r9 /* %rcx clobbered by hypercall */
movq $__HYPERVISOR_update_va_mapping, %rax
syscall
movq %r9, %rcx
- addq $8, %rbx
- addq $4096, %r12
- movq %rbx, %rsi
+ incq %r12 /* next pfn */
- loop 1b
+ loop 2b
- leaq LOCAL(mmu_op) (%rip), %rdi
- movq $3, %rsi
- movq $0, %rdx
- movq $0x7FF0, %r10
+ addq $(2 * GRUB_TARGET_SIZEOF_LONG), %r8 /* next pg table area */
+ jmp 1b
+
+3:
+ /* Switch page tables: pin new L4 pt, load cr3, unpin old L4. */
+ leaq EXT_C(grub_relocator_xen_mmu_op) (%rip), %rdi
+ movq $3, %rsi /* 3 mmu ops */
+ movq $0, %rdx /* pdone (not used) */
+ movq $DOMID_SELF, %r10
movq $__HYPERVISOR_mmuext_op, %rax
syscall
- /* mov imm64, %rax */
- .byte 0x48
- .byte 0xb8
-VARIABLE(grub_relocator_xen_remap_continue)
- .quad 0
+ /* Continue in virtual kernel mapping. */
+ MOV_IMM64_RAX(grub_relocator_xen_remap_continue)
jmp *%rax
-LOCAL(mmu_op):
+VARIABLE(grub_relocator_xen_paging_areas)
+ /* array of start, size pairs, size 0 is end marker */
+ .quad 0, 0, 0, 0, 0, 0, 0, 0
+
VARIABLE(grub_relocator_xen_mmu_op)
.space 256
@@ -112,46 +102,32 @@ VARIABLE(grub_relocator_xen_remap_end)
VARIABLE(grub_relocator_xen_start)
- /* mov imm64, %rax */
- .byte 0x48
- .byte 0xb8
-VARIABLE(grub_relocator_xen_remapper_virt2)
- .quad 0
+ /* Unmap old remapper area. */
+ MOV_IMM64_RAX(grub_relocator_xen_remapper_virt2)
movq %rax, %rdi
- xorq %rax, %rax
+ xorq %rax, %rax /* Invalid pte */
movq %rax, %rsi
- movq $2, %rdx
+ movq $UVMF_INVLPG, %rdx
movq $__HYPERVISOR_update_va_mapping, %rax
syscall
-
- /* mov imm64, %rax */
- .byte 0x48
- .byte 0xb8
-VARIABLE(grub_relocator_xen_stack)
- .quad 0
+ /* Prepare registers for starting kernel. */
+ MOV_IMM64_RAX(grub_relocator_xen_stack)
movq %rax, %rsp
- /* mov imm64, %rax */
- .byte 0x48
- .byte 0xb8
-VARIABLE(grub_relocator_xen_start_info)
- .quad 0
+ MOV_IMM64_RAX(grub_relocator_xen_start_info)
movq %rax, %rsi
cld
- /* mov imm64, %rax */
- .byte 0x48
- .byte 0xb8
-VARIABLE(grub_relocator_xen_entry_point)
- .quad 0
+ MOV_IMM64_RAX(grub_relocator_xen_entry_point)
+ /* Now start the new kernel. */
jmp *%rax
VARIABLE(grub_relocator_xen_end)
@@ -36,15 +36,18 @@ extern grub_uint8_t grub_relocator_xen_remap_end;
extern grub_xen_reg_t grub_relocator_xen_stack;
extern grub_xen_reg_t grub_relocator_xen_start_info;
extern grub_xen_reg_t grub_relocator_xen_entry_point;
-extern grub_xen_reg_t grub_relocator_xen_paging_start;
-extern grub_xen_reg_t grub_relocator_xen_paging_size;
extern grub_xen_reg_t grub_relocator_xen_remapper_virt;
extern grub_xen_reg_t grub_relocator_xen_remapper_virt2;
extern grub_xen_reg_t grub_relocator_xen_remapper_map;
extern grub_xen_reg_t grub_relocator_xen_mfn_list;
+extern struct {
+ grub_xen_reg_t start;
+ grub_xen_reg_t size;
+} grub_relocator_xen_paging_areas[XEN_MAX_MAPPINGS];
extern grub_xen_reg_t grub_relocator_xen_remap_continue;
#ifdef __i386__
extern grub_xen_reg_t grub_relocator_xen_mmu_op_addr;
+extern grub_xen_reg_t grub_relocator_xen_paging_areas_addr;
extern grub_xen_reg_t grub_relocator_xen_remapper_map_high;
#endif
extern mmuext_op_t grub_relocator_xen_mmu_op[3];
@@ -61,6 +64,7 @@ grub_relocator_xen_boot (struct grub_relocator *rel,
{
grub_err_t err;
void *relst;
+ int i;
grub_relocator_chunk_t ch, ch_tramp;
grub_xen_mfn_t *mfn_list =
(grub_xen_mfn_t *) grub_xen_start_page_addr->mfn_list;
@@ -77,8 +81,11 @@ grub_relocator_xen_boot (struct grub_relocator *rel,
grub_relocator_xen_stack = state.stack;
grub_relocator_xen_start_info = state.start_info;
grub_relocator_xen_entry_point = state.entry_point;
- grub_relocator_xen_paging_start = state.paging_start << 12;
- grub_relocator_xen_paging_size = state.paging_size;
+ for (i = 0; i < XEN_MAX_MAPPINGS; i++)
+ {
+ grub_relocator_xen_paging_areas[i].start = state.paging_start[i];
+ grub_relocator_xen_paging_areas[i].size = state.paging_size[i];
+ }
grub_relocator_xen_remapper_virt = remapper_virt;
grub_relocator_xen_remapper_virt2 = remapper_virt;
grub_relocator_xen_remap_continue = trampoline_virt;
@@ -88,10 +95,12 @@ grub_relocator_xen_boot (struct grub_relocator *rel,
grub_relocator_xen_remapper_map_high = (mfn_list[remapper_pfn] >> 20);
grub_relocator_xen_mmu_op_addr = (char *) &grub_relocator_xen_mmu_op
- (char *) &grub_relocator_xen_remap_start + remapper_virt;
+ grub_relocator_xen_paging_areas_addr =
+ (char *) &grub_relocator_xen_paging_areas
+ - (char *) &grub_relocator_xen_remap_start + remapper_virt;
#endif
- grub_relocator_xen_mfn_list = state.mfn_list
- + state.paging_start * sizeof (grub_addr_t);
+ grub_relocator_xen_mfn_list = state.mfn_list;
grub_memset (grub_relocator_xen_mmu_op, 0,
sizeof (grub_relocator_xen_mmu_op));
@@ -100,9 +109,9 @@ grub_relocator_xen_boot (struct grub_relocator *rel,
#else
grub_relocator_xen_mmu_op[0].cmd = MMUEXT_PIN_L4_TABLE;
#endif
- grub_relocator_xen_mmu_op[0].arg1.mfn = mfn_list[state.paging_start];
+ grub_relocator_xen_mmu_op[0].arg1.mfn = mfn_list[state.paging_start[0]];
grub_relocator_xen_mmu_op[1].cmd = MMUEXT_NEW_BASEPTR;
- grub_relocator_xen_mmu_op[1].arg1.mfn = mfn_list[state.paging_start];
+ grub_relocator_xen_mmu_op[1].arg1.mfn = mfn_list[state.paging_start[0]];
grub_relocator_xen_mmu_op[2].cmd = MMUEXT_UNPIN_TABLE;
grub_relocator_xen_mmu_op[2].arg1.mfn =
mfn_list[grub_xen_start_page_addr->pt_base >> 12];
@@ -39,9 +39,34 @@
#include <grub/xen.h>
#include <grub/xen_file.h>
#include <grub/linux.h>
+#include <grub/i386/memory.h>
GRUB_MOD_LICENSE ("GPLv3+");
+#ifdef __x86_64__
+#define NUMBER_OF_LEVELS 4
+#define INTERMEDIATE_OR (GRUB_PAGE_PRESENT | GRUB_PAGE_RW | GRUB_PAGE_USER)
+#define VIRT_MASK 0x0000ffffffffffffULL
+#else
+#define NUMBER_OF_LEVELS 3
+#define INTERMEDIATE_OR (GRUB_PAGE_PRESENT | GRUB_PAGE_RW)
+#define VIRT_MASK 0x00000000ffffffffULL
+#define HYPERVISOR_PUD_ADDRESS 0xc0000000ULL
+#endif
+
+struct grub_xen_mapping_lvl {
+ grub_uint64_t virt_start;
+ grub_uint64_t virt_end;
+ grub_uint64_t pfn_start;
+ grub_uint64_t n_pt_pages;
+};
+
+struct grub_xen_mapping {
+ grub_uint64_t *where;
+ struct grub_xen_mapping_lvl area;
+ struct grub_xen_mapping_lvl lvls[NUMBER_OF_LEVELS];
+};
+
struct xen_loader_state {
struct grub_relocator *relocator;
struct grub_relocator_xen_state state;
@@ -57,6 +82,9 @@ struct xen_loader_state {
struct xen_multiboot_mod_list *module_info_page;
grub_uint64_t modules_target_start;
grub_size_t n_modules;
+ struct grub_xen_mapping *map_reloc;
+ struct grub_xen_mapping mappings[XEN_MAX_MAPPINGS];
+ int n_mappings;
int loaded;
int alloc_end_called;
};
@@ -65,9 +93,8 @@ static struct xen_loader_state xen_state;
static grub_dl_t my_mod;
-#define PAGE_SIZE 4096
+#define PAGE_SIZE (1UL << PAGE_SHIFT)
#define MAX_MODULES (PAGE_SIZE / sizeof (struct xen_multiboot_mod_list))
-#define PAGE_SHIFT 12
#define STACK_SIZE 1048576
#define ADDITIONAL_SIZE (1 << 19)
#define ALIGN_SIZE (1 << 22)
@@ -80,100 +107,163 @@ page2offset (grub_uint64_t page)
return page << PAGE_SHIFT;
}
-#ifdef __x86_64__
-#define NUMBER_OF_LEVELS 4
-#define INTERMEDIATE_OR 7
-#else
-#define NUMBER_OF_LEVELS 3
-#define INTERMEDIATE_OR 3
+static grub_err_t
+get_pgtable_size (grub_uint64_t from, grub_uint64_t to, grub_uint64_t pfn)
+{
+ struct grub_xen_mapping *map, *map_cmp;
+ grub_uint64_t mask, bits;
+ int i, m;
+
+ if (xen_state.n_mappings == XEN_MAX_MAPPINGS)
+ return grub_error (GRUB_ERR_BUG, "too many mapped areas");
+
+ grub_dprintf ("xen", "get_pgtable_size %d from=%llx, to=%llx, pfn=%llx\n",
+ xen_state.n_mappings, (unsigned long long) from,
+ (unsigned long long) to, (unsigned long long) pfn);
+
+ map = xen_state.mappings + xen_state.n_mappings;
+ grub_memset (map, 0, sizeof (*map));
+
+ map->area.virt_start = from & VIRT_MASK;
+ map->area.virt_end = (to - 1) & VIRT_MASK;
+ map->area.n_pt_pages = 0;
+
+ for (i = NUMBER_OF_LEVELS - 1; i >= 0; i--)
+ {
+ map->lvls[i].pfn_start = pfn + map->area.n_pt_pages;
+ if (i == NUMBER_OF_LEVELS - 1)
+ {
+ if (xen_state.n_mappings == 0)
+ {
+ map->lvls[i].virt_start = 0;
+ map->lvls[i].virt_end = VIRT_MASK;
+ map->lvls[i].n_pt_pages = 1;
+ map->area.n_pt_pages++;
+ }
+ continue;
+ }
+
+ bits = PAGE_SHIFT + (i + 1) * LOG_POINTERS_PER_PAGE;
+ mask = (1ULL << bits) - 1;
+ map->lvls[i].virt_start = map->area.virt_start & ~mask;
+ map->lvls[i].virt_end = map->area.virt_end | mask;
+#ifdef __i386__
+ /* PAE wants last root directory present. */
+ if (i == 1 && to <= HYPERVISOR_PUD_ADDRESS && xen_state.n_mappings == 0)
+ map->lvls[i].virt_end = VIRT_MASK;
#endif
+ for (m = 0; m < xen_state.n_mappings; m++)
+ {
+ map_cmp = xen_state.mappings + m;
+ if (map_cmp->lvls[i].virt_start == map_cmp->lvls[i].virt_end)
+ continue;
+ if (map->lvls[i].virt_start >= map_cmp->lvls[i].virt_start &&
+ map->lvls[i].virt_end <= map_cmp->lvls[i].virt_end)
+ {
+ map->lvls[i].virt_start = 0;
+ map->lvls[i].virt_end = 0;
+ break;
+ }
+ if (map->lvls[i].virt_start >= map_cmp->lvls[i].virt_start &&
+ map->lvls[i].virt_start <= map_cmp->lvls[i].virt_end)
+ map->lvls[i].virt_start = map_cmp->lvls[i].virt_end + 1;
+ if (map->lvls[i].virt_end >= map_cmp->lvls[i].virt_start &&
+ map->lvls[i].virt_end <= map_cmp->lvls[i].virt_end)
+ map->lvls[i].virt_end = map_cmp->lvls[i].virt_start - 1;
+ }
+ if (map->lvls[i].virt_start < map->lvls[i].virt_end)
+ map->lvls[i].n_pt_pages =
+ ((map->lvls[i].virt_end - map->lvls[i].virt_start) >> bits) + 1;
+ map->area.n_pt_pages += map->lvls[i].n_pt_pages;
+ grub_dprintf ("xen", "get_pgtable_size level %d: virt %llx-%llx %d pts\n",
+ i, (unsigned long long) map->lvls[i].virt_start,
+ (unsigned long long) map->lvls[i].virt_end,
+ (int) map->lvls[i].n_pt_pages);
+ }
+
+ grub_dprintf ("xen", "get_pgtable_size return: %d page tables\n",
+ (int) map->area.n_pt_pages);
+
+ xen_state.state.paging_start[xen_state.n_mappings] = pfn;
+ xen_state.state.paging_size[xen_state.n_mappings] = map->area.n_pt_pages;
+
+ return GRUB_ERR_NONE;
+}
+
+static grub_uint64_t *
+get_pg_table_virt (int mapping, int level)
+{
+ grub_uint64_t pfn;
+ struct grub_xen_mapping *map;
+
+ map = xen_state.mappings + mapping;
+ pfn = map->lvls[level].pfn_start - map->lvls[NUMBER_OF_LEVELS - 1].pfn_start;
+ return map->where + pfn * POINTERS_PER_PAGE;
+}
static grub_uint64_t
-get_pgtable_size (grub_uint64_t total_pages, grub_uint64_t virt_base)
+get_pg_table_prot (int level, grub_uint64_t pfn)
{
- if (!virt_base)
- total_pages++;
- grub_uint64_t ret = 0;
- grub_uint64_t ll = total_pages;
- int i;
- for (i = 0; i < NUMBER_OF_LEVELS; i++)
+ int m;
+ grub_uint64_t pfn_s, pfn_e;
+
+ if (level > 0)
+ return INTERMEDIATE_OR;
+ for (m = 0; m < xen_state.n_mappings; m++)
{
- ll = (ll + POINTERS_PER_PAGE - 1) >> LOG_POINTERS_PER_PAGE;
- /* PAE wants all 4 root directories present. */
-#ifdef __i386__
- if (i == 1)
- ll = 4;
-#endif
- ret += ll;
+ pfn_s = xen_state.mappings[m].lvls[NUMBER_OF_LEVELS - 1].pfn_start;
+ pfn_e = xen_state.mappings[m].area.n_pt_pages + pfn_s;
+ if (pfn >= pfn_s && pfn < pfn_e)
+ return GRUB_PAGE_PRESENT | GRUB_PAGE_USER;
}
- for (i = 1; i < NUMBER_OF_LEVELS; i++)
- if (virt_base >> (PAGE_SHIFT + i * LOG_POINTERS_PER_PAGE))
- ret++;
- return ret;
+ return GRUB_PAGE_PRESENT | GRUB_PAGE_RW | GRUB_PAGE_USER;
}
static void
-generate_page_table (grub_uint64_t *where, grub_uint64_t paging_start,
- grub_uint64_t paging_end, grub_uint64_t total_pages,
- grub_uint64_t virt_base, grub_xen_mfn_t *mfn_list)
+generate_page_table (grub_xen_mfn_t *mfn_list)
{
- if (!virt_base)
- paging_end++;
+ int l, m1, m2;
+ long p, p_s, p_e;
+ grub_uint64_t start, end, pfn;
+ grub_uint64_t *pg;
+ struct grub_xen_mapping_lvl *lvl;
- grub_uint64_t lx[NUMBER_OF_LEVELS], lxs[NUMBER_OF_LEVELS];
- grub_uint64_t nlx, nls, sz = 0;
- int l;
+ for (m1 = 0; m1 < xen_state.n_mappings; m1++)
+ grub_memset (xen_state.mappings[m1].where, 0,
+ xen_state.mappings[m1].area.n_pt_pages * PAGE_SIZE);
- nlx = paging_end;
- nls = virt_base >> PAGE_SHIFT;
- for (l = 0; l < NUMBER_OF_LEVELS; l++)
+ for (l = NUMBER_OF_LEVELS - 1; l >= 0; l--)
{
- nlx = (nlx + POINTERS_PER_PAGE - 1) >> LOG_POINTERS_PER_PAGE;
- /* PAE wants all 4 root directories present. */
-#ifdef __i386__
- if (l == 1)
- nlx = 4;
-#endif
- lx[l] = nlx;
- sz += lx[l];
- lxs[l] = nls & (POINTERS_PER_PAGE - 1);
- if (nls && l != 0)
- sz++;
- nls >>= LOG_POINTERS_PER_PAGE;
- }
-
- grub_uint64_t lp;
- grub_uint64_t j;
- grub_uint64_t *pg = (grub_uint64_t *) where;
- int pr = 0;
-
- grub_memset (pg, 0, sz * PAGE_SIZE);
-
- lp = paging_start + lx[NUMBER_OF_LEVELS - 1];
- for (l = NUMBER_OF_LEVELS - 1; l >= 1; l--)
- {
- if (lxs[l] || pr)
- pg[0] = page2offset (mfn_list[lp++]) | INTERMEDIATE_OR;
- if (pr)
- pg += POINTERS_PER_PAGE;
- for (j = 0; j < lx[l - 1]; j++)
- pg[j + lxs[l]] = page2offset (mfn_list[lp++]) | INTERMEDIATE_OR;
- pg += lx[l] * POINTERS_PER_PAGE;
- if (lxs[l])
- pr = 1;
- }
-
- if (lxs[0] || pr)
- pg[0] = page2offset (mfn_list[total_pages]) | 5;
- if (pr)
- pg += POINTERS_PER_PAGE;
-
- for (j = 0; j < paging_end; j++)
- {
- if (j >= paging_start && j < lp)
- pg[j + lxs[0]] = page2offset (mfn_list[j]) | 5;
- else
- pg[j + lxs[0]] = page2offset (mfn_list[j]) | 7;
+ for (m1 = 0; m1 < xen_state.n_mappings; m1++)
+ {
+ start = xen_state.mappings[m1].lvls[l].virt_start;
+ end = xen_state.mappings[m1].lvls[l].virt_end;
+ pg = get_pg_table_virt(m1, l);
+ for (m2 = 0; m2 < xen_state.n_mappings; m2++)
+ {
+ lvl = (l > 0) ? xen_state.mappings[m2].lvls + l - 1
+ : &xen_state.mappings[m2].area;
+ if (l > 0 && lvl->n_pt_pages == 0)
+ continue;
+ if (lvl->virt_start >= end || lvl->virt_end <= start)
+ continue;
+ p_s = (grub_max (start, lvl->virt_start) - start) >>
+ (PAGE_SHIFT + l * LOG_POINTERS_PER_PAGE);
+ p_e = (grub_min (end, lvl->virt_end) - start) >>
+ (PAGE_SHIFT + l * LOG_POINTERS_PER_PAGE);
+ pfn = ((grub_max (start, lvl->virt_start) - lvl->virt_start) >>
+ (PAGE_SHIFT + l * LOG_POINTERS_PER_PAGE)) + lvl->pfn_start;
+ grub_dprintf ("xen", "write page table entries level %d pg %p "
+ "mapping %d/%d index %lx-%lx pfn %llx\n",
+ l, pg, m1, m2, p_s, p_e, (unsigned long long) pfn);
+ for (p = p_s; p <= p_e; p++)
+ {
+ pg[p] = page2offset (mfn_list[pfn]) |
+ get_pg_table_prot (l, pfn);
+ pfn++;
+ }
+ }
+ }
}
}
@@ -280,42 +370,68 @@ grub_xen_pt_alloc (void)
grub_relocator_chunk_t ch;
grub_err_t err;
grub_uint64_t nr_info_pages;
- grub_uint64_t nr_pages, nr_pt_pages, nr_need_pages;
+ grub_uint64_t nr_need_pages;
+ grub_uint64_t try_virt_end;
+ struct grub_xen_mapping *map;
+
+ map = xen_state.mappings + xen_state.n_mappings;
+ xen_state.map_reloc = map + 1;
xen_state.next_start.pt_base =
xen_state.max_addr + xen_state.xen_inf.virt_base;
- xen_state.state.paging_start = xen_state.max_addr >> PAGE_SHIFT;
-
nr_info_pages = xen_state.max_addr >> PAGE_SHIFT;
- nr_pages = nr_info_pages;
+ nr_need_pages = nr_info_pages;
while (1)
{
- nr_pages = ALIGN_UP (nr_pages, (ALIGN_SIZE >> PAGE_SHIFT));
- nr_pt_pages = get_pgtable_size (nr_pages, xen_state.xen_inf.virt_base);
- nr_need_pages =
- nr_info_pages + nr_pt_pages +
- ((ADDITIONAL_SIZE + STACK_SIZE) >> PAGE_SHIFT);
- if (nr_pages >= nr_need_pages)
+ try_virt_end = ALIGN_UP (xen_state.xen_inf.virt_base +
+ page2offset (nr_need_pages) +
+ ADDITIONAL_SIZE + STACK_SIZE, ALIGN_SIZE);
+ if (!xen_state.xen_inf.virt_base)
+ try_virt_end += PAGE_SIZE;
+
+ err = get_pgtable_size (xen_state.xen_inf.virt_base, try_virt_end,
+ nr_info_pages);
+ if (err)
+ return err;
+ xen_state.n_mappings++;
+
+ /* Map the relocator page either at virtual 0 or after end of area. */
+ nr_need_pages = nr_info_pages + map->area.n_pt_pages;
+ if (xen_state.xen_inf.virt_base)
+ err = get_pgtable_size (0, PAGE_SIZE, nr_need_pages);
+ else
+ err = get_pgtable_size (try_virt_end - PAGE_SIZE, try_virt_end,
+ nr_need_pages);
+ if (err)
+ return err;
+ nr_need_pages += xen_state.map_reloc->area.n_pt_pages;
+
+ if (xen_state.xen_inf.virt_base + page2offset (nr_need_pages) <=
+ try_virt_end)
break;
- nr_pages = nr_need_pages;
+
+ xen_state.n_mappings--;
}
+ xen_state.n_mappings++;
+ nr_need_pages = map->area.n_pt_pages + xen_state.map_reloc->area.n_pt_pages;
err = grub_relocator_alloc_chunk_addr (xen_state.relocator, &ch,
xen_state.max_addr,
- page2offset (nr_pt_pages));
+ page2offset (nr_need_pages));
if (err)
return err;
- xen_state.virt_pgtable = get_virtual_current_address (ch);
- xen_state.pgtbl_start = xen_state.max_addr >> PAGE_SHIFT;
- xen_state.max_addr += page2offset (nr_pt_pages);
+ map->where = get_virtual_current_address (ch);
+ map->area.pfn_start = 0;
+ xen_state.max_addr += page2offset (nr_need_pages);
xen_state.state.stack =
xen_state.max_addr + STACK_SIZE + xen_state.xen_inf.virt_base;
- xen_state.state.paging_size = nr_pt_pages;
- xen_state.next_start.nr_pt_frames = nr_pt_pages;
- xen_state.max_addr = page2offset (nr_pages);
- xen_state.pgtbl_end = nr_pages;
+ xen_state.next_start.nr_pt_frames = nr_need_pages;
+ xen_state.max_addr = try_virt_end - xen_state.xen_inf.virt_base;
+ xen_state.pgtbl_end = xen_state.max_addr >> PAGE_SHIFT;
+ xen_state.map_reloc->where = (grub_uint64_t *) ((char *) map->where +
+ page2offset (map->area.n_pt_pages));
return GRUB_ERR_NONE;
}
@@ -367,9 +483,8 @@ grub_xen_boot (void)
(unsigned long long) xen_state.xen_inf.virt_base,
(unsigned long long) page2offset (nr_pages));
- generate_page_table (xen_state.virt_pgtable, xen_state.pgtbl_start,
- xen_state.pgtbl_end, nr_pages,
- xen_state.xen_inf.virt_base, xen_state.virt_mfn_list);
+ xen_state.map_reloc->area.pfn_start = nr_pages;
+ generate_page_table (xen_state.virt_mfn_list);
xen_state.state.entry_point = xen_state.xen_inf.entry_point;
@@ -20,6 +20,8 @@
#ifndef GRUB_MEMORY_CPU_HEADER
#define GRUB_MEMORY_CPU_HEADER 1
+#define PAGE_SHIFT 12
+
/* The flag for protected mode. */
#define GRUB_MEMORY_CPU_CR0_PE_ON 0x1
#define GRUB_MEMORY_CPU_CR4_PAE_ON 0x00000020
@@ -31,6 +33,11 @@
#define GRUB_MEMORY_MACHINE_UPPER_START 0x100000 /* 1 MiB */
#define GRUB_MEMORY_MACHINE_LOWER_SIZE GRUB_MEMORY_MACHINE_UPPER_START
+/* Some PTE definitions. */
+#define GRUB_PAGE_PRESENT 0x00000001
+#define GRUB_PAGE_RW 0x00000002
+#define GRUB_PAGE_USER 0x00000004
+
#ifndef ASM_FILE
#define GRUB_MMAP_MALLOC_LOW 1
@@ -23,11 +23,13 @@
#include <grub/err.h>
#include <grub/relocator.h>
+#define XEN_MAX_MAPPINGS 3
+
struct grub_relocator_xen_state
{
grub_addr_t start_info;
- grub_addr_t paging_start;
- grub_addr_t paging_size;
+ grub_addr_t paging_start[XEN_MAX_MAPPINGS];
+ grub_addr_t paging_size[XEN_MAX_MAPPINGS];
grub_addr_t mfn_list;
grub_addr_t stack;
grub_addr_t entry_point;
Modify the page table construction to allow multiple virtual regions to be mapped. This is done as preparation for removing the p2m list from the initial kernel mapping in order to support huge pv domains. This allows a cleaner approach for mapping the relocator page by using this capability. The interface to the assembler level of the relocator has to be changed in order to be able to process multiple page table areas. Signed-off-by: Juergen Gross <jgross@suse.com> --- V3: use constants instead of numbers as requested by Daniel Kiper add lots of comments to assembly code as requested by Daniel Kiper --- grub-core/lib/i386/xen/relocator.S | 87 ++++++---- grub-core/lib/x86_64/xen/relocator.S | 134 ++++++--------- grub-core/lib/xen/relocator.c | 25 ++- grub-core/loader/i386/xen.c | 325 ++++++++++++++++++++++++----------- include/grub/i386/memory.h | 7 + include/grub/xen/relocator.h | 6 +- 6 files changed, 354 insertions(+), 230 deletions(-)