diff mbox series

[v3,04/11] KVM: selftests: Add memslot parameter to elf_load

Message ID 20220330174621.1567317-5-bgardon@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86: Add a cap to disable NX hugepages on a VM | expand

Commit Message

Ben Gardon March 30, 2022, 5:46 p.m. UTC
Currently elf_load loads code into memslot 0. Add a parameter to allow
loading code into any memslot. This will be useful for backing code
pages with huge pages in future commits.

No functional change intended.

Signed-off-by: Ben Gardon <bgardon@google.com>
---
 .../testing/selftests/kvm/include/kvm_util_base.h  |  5 +++++
 tools/testing/selftests/kvm/lib/elf.c              | 13 +++++++++++--
 tools/testing/selftests/kvm/lib/kvm_util.c         | 14 ++++++++++----
 3 files changed, 26 insertions(+), 6 deletions(-)

Comments

David Matlack April 5, 2022, 10:27 p.m. UTC | #1
On Wed, Mar 30, 2022 at 10:46:14AM -0700, Ben Gardon wrote:
> Currently elf_load loads code into memslot 0. Add a parameter to allow
> loading code into any memslot. This will be useful for backing code
> pages with huge pages in future commits.
> 
> No functional change intended.
> 
> Signed-off-by: Ben Gardon <bgardon@google.com>
> ---
>  .../testing/selftests/kvm/include/kvm_util_base.h  |  5 +++++
>  tools/testing/selftests/kvm/lib/elf.c              | 13 +++++++++++--
>  tools/testing/selftests/kvm/lib/kvm_util.c         | 14 ++++++++++----
>  3 files changed, 26 insertions(+), 6 deletions(-)
> 
> diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
> index 78c4407f36b4..72163ba2f878 100644
> --- a/tools/testing/selftests/kvm/include/kvm_util_base.h
> +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
> @@ -122,7 +122,10 @@ uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm);
>  int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva,
>  		       size_t len);
>  
> +void kvm_vm_elf_load_memslot(struct kvm_vm *vm, const char *filename,
> +			     uint32_t memslot);
>  void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename);
> +
>  int kvm_memfd_alloc(size_t size, bool hugepages);
>  
>  void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
> @@ -169,6 +172,8 @@ void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
>  void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
>  void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
>  void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid);
> +vm_vaddr_t vm_vaddr_alloc_memslot(struct kvm_vm *vm, size_t sz,
> +				  vm_vaddr_t vaddr_min, uint32_t memslot);
>  vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
>  vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
>  vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
> diff --git a/tools/testing/selftests/kvm/lib/elf.c b/tools/testing/selftests/kvm/lib/elf.c
> index 13e8e3dcf984..899418e65f60 100644
> --- a/tools/testing/selftests/kvm/lib/elf.c
> +++ b/tools/testing/selftests/kvm/lib/elf.c
> @@ -97,6 +97,7 @@ static void elfhdr_get(const char *filename, Elf64_Ehdr *hdrp)
>   *
>   * Input Args:
>   *   filename - Path to ELF file
> + *   memslot - the memslot into which the elf should be loaded
>   *
>   * Output Args: None
>   *
> @@ -111,7 +112,8 @@ static void elfhdr_get(const char *filename, Elf64_Ehdr *hdrp)
>   * by the image and it needs to have sufficient available physical pages, to
>   * back the virtual pages used to load the image.
>   */
> -void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename)
> +void kvm_vm_elf_load_memslot(struct kvm_vm *vm, const char *filename,
> +			     uint32_t memslot)

Feedback I've gotten in the past for kernel code and selftests is to
just use double-underscores (i.e. __kvm_vm_elf_load()) for situations
like this, rather than trying to encode the extra parameters in the
function name.

>  {
>  	off_t offset, offset_rv;
>  	Elf64_Ehdr hdr;
> @@ -162,7 +164,9 @@ void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename)
>  		seg_vend |= vm->page_size - 1;
>  		size_t seg_size = seg_vend - seg_vstart + 1;
>  
> -		vm_vaddr_t vaddr = vm_vaddr_alloc(vm, seg_size, seg_vstart);
> +		vm_vaddr_t vaddr = vm_vaddr_alloc_memslot(vm, seg_size,
> +							  seg_vstart,
> +							  memslot);
>  		TEST_ASSERT(vaddr == seg_vstart, "Unable to allocate "
>  			"virtual memory for segment at requested min addr,\n"
>  			"  segment idx: %u\n"
> @@ -191,3 +195,8 @@ void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename)
>  		}
>  	}
>  }
> +
> +void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename)
> +{
> +	kvm_vm_elf_load_memslot(vm, filename, 0);
> +}
> diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
> index 9c4574381daa..09742a787546 100644
> --- a/tools/testing/selftests/kvm/lib/kvm_util.c
> +++ b/tools/testing/selftests/kvm/lib/kvm_util.c
> @@ -1336,8 +1336,7 @@ static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
>   *   vm - Virtual Machine
>   *   sz - Size in bytes
>   *   vaddr_min - Minimum starting virtual address
> - *   data_memslot - Memory region slot for data pages
> - *   pgd_memslot - Memory region slot for new virtual translation tables
> + *   memslot - Memory region slot for data pages
>   *
>   * Output Args: None
>   *
> @@ -1350,13 +1349,15 @@ static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
>   * a unique set of pages, with the minimum real allocation being at least
>   * a page.
>   */
> -vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
> +vm_vaddr_t vm_vaddr_alloc_memslot(struct kvm_vm *vm, size_t sz,
> +				  vm_vaddr_t vaddr_min, uint32_t memslot)

Same feedback here; use __vm_vaddr_alloc().

>  {
>  	uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
>  
>  	virt_pgd_alloc(vm);
>  	vm_paddr_t paddr = vm_phy_pages_alloc(vm, pages,
> -					      KVM_UTIL_MIN_PFN * vm->page_size, 0);
> +					      KVM_UTIL_MIN_PFN * vm->page_size,
> +					      memslot);
>  
>  	/*
>  	 * Find an unused range of virtual page addresses of at least
> @@ -1377,6 +1378,11 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
>  	return vaddr_start;
>  }
>  
> +vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
> +{
> +	return vm_vaddr_alloc_memslot(vm, sz, vaddr_min, 0);
> +}
> +
>  /*
>   * VM Virtual Address Allocate Pages
>   *
> -- 
> 2.35.1.1021.g381101b075-goog
>
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
index 78c4407f36b4..72163ba2f878 100644
--- a/tools/testing/selftests/kvm/include/kvm_util_base.h
+++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
@@ -122,7 +122,10 @@  uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm);
 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva,
 		       size_t len);
 
+void kvm_vm_elf_load_memslot(struct kvm_vm *vm, const char *filename,
+			     uint32_t memslot);
 void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename);
+
 int kvm_memfd_alloc(size_t size, bool hugepages);
 
 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
@@ -169,6 +172,8 @@  void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
 void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid);
+vm_vaddr_t vm_vaddr_alloc_memslot(struct kvm_vm *vm, size_t sz,
+				  vm_vaddr_t vaddr_min, uint32_t memslot);
 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
diff --git a/tools/testing/selftests/kvm/lib/elf.c b/tools/testing/selftests/kvm/lib/elf.c
index 13e8e3dcf984..899418e65f60 100644
--- a/tools/testing/selftests/kvm/lib/elf.c
+++ b/tools/testing/selftests/kvm/lib/elf.c
@@ -97,6 +97,7 @@  static void elfhdr_get(const char *filename, Elf64_Ehdr *hdrp)
  *
  * Input Args:
  *   filename - Path to ELF file
+ *   memslot - the memslot into which the elf should be loaded
  *
  * Output Args: None
  *
@@ -111,7 +112,8 @@  static void elfhdr_get(const char *filename, Elf64_Ehdr *hdrp)
  * by the image and it needs to have sufficient available physical pages, to
  * back the virtual pages used to load the image.
  */
-void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename)
+void kvm_vm_elf_load_memslot(struct kvm_vm *vm, const char *filename,
+			     uint32_t memslot)
 {
 	off_t offset, offset_rv;
 	Elf64_Ehdr hdr;
@@ -162,7 +164,9 @@  void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename)
 		seg_vend |= vm->page_size - 1;
 		size_t seg_size = seg_vend - seg_vstart + 1;
 
-		vm_vaddr_t vaddr = vm_vaddr_alloc(vm, seg_size, seg_vstart);
+		vm_vaddr_t vaddr = vm_vaddr_alloc_memslot(vm, seg_size,
+							  seg_vstart,
+							  memslot);
 		TEST_ASSERT(vaddr == seg_vstart, "Unable to allocate "
 			"virtual memory for segment at requested min addr,\n"
 			"  segment idx: %u\n"
@@ -191,3 +195,8 @@  void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename)
 		}
 	}
 }
+
+void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename)
+{
+	kvm_vm_elf_load_memslot(vm, filename, 0);
+}
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 9c4574381daa..09742a787546 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -1336,8 +1336,7 @@  static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
  *   vm - Virtual Machine
  *   sz - Size in bytes
  *   vaddr_min - Minimum starting virtual address
- *   data_memslot - Memory region slot for data pages
- *   pgd_memslot - Memory region slot for new virtual translation tables
+ *   memslot - Memory region slot for data pages
  *
  * Output Args: None
  *
@@ -1350,13 +1349,15 @@  static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
  * a unique set of pages, with the minimum real allocation being at least
  * a page.
  */
-vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
+vm_vaddr_t vm_vaddr_alloc_memslot(struct kvm_vm *vm, size_t sz,
+				  vm_vaddr_t vaddr_min, uint32_t memslot)
 {
 	uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
 
 	virt_pgd_alloc(vm);
 	vm_paddr_t paddr = vm_phy_pages_alloc(vm, pages,
-					      KVM_UTIL_MIN_PFN * vm->page_size, 0);
+					      KVM_UTIL_MIN_PFN * vm->page_size,
+					      memslot);
 
 	/*
 	 * Find an unused range of virtual page addresses of at least
@@ -1377,6 +1378,11 @@  vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
 	return vaddr_start;
 }
 
+vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
+{
+	return vm_vaddr_alloc_memslot(vm, sz, vaddr_min, 0);
+}
+
 /*
  * VM Virtual Address Allocate Pages
  *