diff mbox series

[v3,11/11] selftests: KVM: Test disabling NX hugepages on a VM

Message ID 20220330174621.1567317-12-bgardon@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86: Add a cap to disable NX hugepages on a VM | expand

Commit Message

Ben Gardon March 30, 2022, 5:46 p.m. UTC
Add an argument to the NX huge pages test to test disabling the feature
on a VM using the new capability.

Signed-off-by: Ben Gardon <bgardon@google.com>
---
 .../selftests/kvm/include/kvm_util_base.h     |  2 +
 tools/testing/selftests/kvm/lib/kvm_util.c    |  7 ++
 .../selftests/kvm/x86_64/nx_huge_pages_test.c | 67 ++++++++++++++++---
 .../kvm/x86_64/nx_huge_pages_test.sh          |  2 +-
 4 files changed, 66 insertions(+), 12 deletions(-)

Comments

David Matlack April 5, 2022, 10:55 p.m. UTC | #1
On Wed, Mar 30, 2022 at 10:46:21AM -0700, Ben Gardon wrote:
> Add an argument to the NX huge pages test to test disabling the feature
> on a VM using the new capability.
> 
> Signed-off-by: Ben Gardon <bgardon@google.com>
> ---
>  .../selftests/kvm/include/kvm_util_base.h     |  2 +
>  tools/testing/selftests/kvm/lib/kvm_util.c    |  7 ++
>  .../selftests/kvm/x86_64/nx_huge_pages_test.c | 67 ++++++++++++++++---
>  .../kvm/x86_64/nx_huge_pages_test.sh          |  2 +-
>  4 files changed, 66 insertions(+), 12 deletions(-)
> 
> diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
> index 72163ba2f878..4db8251c3ce5 100644
> --- a/tools/testing/selftests/kvm/include/kvm_util_base.h
> +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
> @@ -411,4 +411,6 @@ uint64_t vm_get_single_stat(struct kvm_vm *vm, const char *stat_name);
>  
>  uint32_t guest_get_vcpuid(void);
>  
> +void vm_disable_nx_huge_pages(struct kvm_vm *vm);
> +
>  #endif /* SELFTEST_KVM_UTIL_BASE_H */
> diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
> index 9d72d1bb34fa..46a7fa08d3e0 100644
> --- a/tools/testing/selftests/kvm/lib/kvm_util.c
> +++ b/tools/testing/selftests/kvm/lib/kvm_util.c
> @@ -2765,3 +2765,10 @@ uint64_t vm_get_single_stat(struct kvm_vm *vm, const char *stat_name)
>  	return value;
>  }
>  
> +void vm_disable_nx_huge_pages(struct kvm_vm *vm)
> +{
> +	struct kvm_enable_cap cap = { 0 };
> +
> +	cap.cap = KVM_CAP_VM_DISABLE_NX_HUGE_PAGES;
> +	vm_enable_cap(vm, &cap);
> +}
> diff --git a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
> index 2bcbe4efdc6a..a0c79f6ddc08 100644
> --- a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
> +++ b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
> @@ -13,6 +13,8 @@
>  #include <fcntl.h>
>  #include <stdint.h>
>  #include <time.h>
> +#include <linux/reboot.h>
> +#include <sys/syscall.h>
>  
>  #include <test_util.h>
>  #include "kvm_util.h"
> @@ -57,13 +59,56 @@ static void check_split_count(struct kvm_vm *vm, int expected_splits)
>  		    expected_splits, actual_splits);
>  }
>  
> +static void help(void)
> +{
> +	puts("");
> +	printf("usage: nx_huge_pages_test.sh [-x]\n");
> +	puts("");
> +	printf(" -x: Allow executable huge pages on the VM.\n");

Making it a flag means we won't exercise it by default. Is there are
reason to avoid exercising KVM_CAP_VM_DISABLE_NX_HUGE_PAGES by default?

Assuming no, I would recommend factoring out the test to a helper
function that takes a parameter that tells it if nx_huge_pages is
enabled or disabled. Then run this helper function multiple times. E.g.
once with nx_huge_pages enabled, once with nx_huge_pages disabled via
KVM_CAP_VM_DISABLE_NX_HUGE_PAGES. This would also then let you test that
disabling via module param also works.

By the way, that brings up another issue. What if NX HugePages is not
enabled on this host? e.g. we're running on AMD, or we're running on a
non-affected Intel host, or we're running on a machine where nx huge
pages has been disabled by the admin? The test should probably return
KSFT_SKIP in those cases.

> +	puts("");
> +	exit(0);
> +}
> +
>  int main(int argc, char **argv)
>  {
>  	struct kvm_vm *vm;
>  	struct timespec ts;
> +	bool disable_nx = false;
> +	int opt;
> +	int r;
> +
> +	while ((opt = getopt(argc, argv, "x")) != -1) {
> +		switch (opt) {
> +		case 'x':
> +			disable_nx = true;
> +			break;
> +		case 'h':
> +		default:
> +			help();
> +			break;
> +		}
> +	}
>  
>  	vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
>  
> +	if (disable_nx) {
> +		/*
> +		 * Check if this process has the reboot permissions needed to
> +		 * disable NX huge pages on a VM.
> +		 *
> +		 * The reboot call below will never have any effect because
> +		 * the magic values are not set correctly, however the
> +		 * permission check is done before the magic value check.
> +		 */
> +		r = syscall(SYS_reboot, 0, 0, 0, NULL);
> +		if (r == -EPERM)
> +			return KSFT_SKIP;
> +		TEST_ASSERT(r == -EINVAL,
> +			    "Reboot syscall should fail with -EINVAL");

Just check if KVM_CAP_VM_DISABLE_NX_HUGE_PAGES returns -EPERM?

> +
> +		vm_disable_nx_huge_pages(vm);
> +	}
> +
>  	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_HUGETLB,
>  				    HPAGE_PADDR_START, HPAGE_SLOT,
>  				    HPAGE_SLOT_NPAGES, 0);
> @@ -83,21 +128,21 @@ int main(int argc, char **argv)
>  	 * at 2M.
>  	 */
>  	run_guest_code(vm, guest_code0);
> -	check_2m_page_count(vm, 2);
> -	check_split_count(vm, 2);
> +	check_2m_page_count(vm, disable_nx ? 4 : 2);
> +	check_split_count(vm, disable_nx ? 0 : 2);
>  
>  	/*
>  	 * guest_code1 is in the same huge page as data1, so it will cause
>  	 * that huge page to be remapped at 4k.
>  	 */
>  	run_guest_code(vm, guest_code1);
> -	check_2m_page_count(vm, 1);
> -	check_split_count(vm, 3);
> +	check_2m_page_count(vm, disable_nx ? 4 : 1);
> +	check_split_count(vm, disable_nx ? 0 : 3);
>  
>  	/* Run guest_code0 again to check that is has no effect. */
>  	run_guest_code(vm, guest_code0);
> -	check_2m_page_count(vm, 1);
> -	check_split_count(vm, 3);
> +	check_2m_page_count(vm, disable_nx ? 4 : 1);
> +	check_split_count(vm, disable_nx ? 0 : 3);
>  
>  	/*
>  	 * Give recovery thread time to run. The wrapper script sets
> @@ -110,7 +155,7 @@ int main(int argc, char **argv)
>  	/*
>  	 * Now that the reclaimer has run, all the split pages should be gone.
>  	 */
> -	check_2m_page_count(vm, 1);
> +	check_2m_page_count(vm, disable_nx ? 4 : 1);
>  	check_split_count(vm, 0);
>  
>  	/*
> @@ -118,13 +163,13 @@ int main(int argc, char **argv)
>  	 * again to check that pages are mapped at 2M again.
>  	 */
>  	run_guest_code(vm, guest_code0);
> -	check_2m_page_count(vm, 2);
> -	check_split_count(vm, 2);
> +	check_2m_page_count(vm, disable_nx ? 4 : 2);
> +	check_split_count(vm, disable_nx ? 0 : 2);
>  
>  	/* Pages are once again split from running guest_code1. */
>  	run_guest_code(vm, guest_code1);
> -	check_2m_page_count(vm, 1);
> -	check_split_count(vm, 3);
> +	check_2m_page_count(vm, disable_nx ? 4 : 1);
> +	check_split_count(vm, disable_nx ? 0 : 3);
>  
>  	kvm_vm_free(vm);
>  
> diff --git a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh
> index 19fc95723fcb..29f999f48848 100755
> --- a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh
> +++ b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh
> @@ -14,7 +14,7 @@ echo 1 > /sys/module/kvm/parameters/nx_huge_pages_recovery_ratio
>  echo 100 > /sys/module/kvm/parameters/nx_huge_pages_recovery_period_ms
>  echo 200 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
>  
> -./nx_huge_pages_test
> +./nx_huge_pages_test "${@}"
>  RET=$?
>  
>  echo $NX_HUGE_PAGES > /sys/module/kvm/parameters/nx_huge_pages
> -- 
> 2.35.1.1021.g381101b075-goog
>
Ben Gardon April 7, 2022, 6:26 p.m. UTC | #2
On Tue, Apr 5, 2022 at 3:55 PM David Matlack <dmatlack@google.com> wrote:
>
> On Wed, Mar 30, 2022 at 10:46:21AM -0700, Ben Gardon wrote:
> > Add an argument to the NX huge pages test to test disabling the feature
> > on a VM using the new capability.
> >
> > Signed-off-by: Ben Gardon <bgardon@google.com>
> > ---
> >  .../selftests/kvm/include/kvm_util_base.h     |  2 +
> >  tools/testing/selftests/kvm/lib/kvm_util.c    |  7 ++
> >  .../selftests/kvm/x86_64/nx_huge_pages_test.c | 67 ++++++++++++++++---
> >  .../kvm/x86_64/nx_huge_pages_test.sh          |  2 +-
> >  4 files changed, 66 insertions(+), 12 deletions(-)
> >
> > diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
> > index 72163ba2f878..4db8251c3ce5 100644
> > --- a/tools/testing/selftests/kvm/include/kvm_util_base.h
> > +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
> > @@ -411,4 +411,6 @@ uint64_t vm_get_single_stat(struct kvm_vm *vm, const char *stat_name);
> >
> >  uint32_t guest_get_vcpuid(void);
> >
> > +void vm_disable_nx_huge_pages(struct kvm_vm *vm);
> > +
> >  #endif /* SELFTEST_KVM_UTIL_BASE_H */
> > diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
> > index 9d72d1bb34fa..46a7fa08d3e0 100644
> > --- a/tools/testing/selftests/kvm/lib/kvm_util.c
> > +++ b/tools/testing/selftests/kvm/lib/kvm_util.c
> > @@ -2765,3 +2765,10 @@ uint64_t vm_get_single_stat(struct kvm_vm *vm, const char *stat_name)
> >       return value;
> >  }
> >
> > +void vm_disable_nx_huge_pages(struct kvm_vm *vm)
> > +{
> > +     struct kvm_enable_cap cap = { 0 };
> > +
> > +     cap.cap = KVM_CAP_VM_DISABLE_NX_HUGE_PAGES;
> > +     vm_enable_cap(vm, &cap);
> > +}
> > diff --git a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
> > index 2bcbe4efdc6a..a0c79f6ddc08 100644
> > --- a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
> > +++ b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
> > @@ -13,6 +13,8 @@
> >  #include <fcntl.h>
> >  #include <stdint.h>
> >  #include <time.h>
> > +#include <linux/reboot.h>
> > +#include <sys/syscall.h>
> >
> >  #include <test_util.h>
> >  #include "kvm_util.h"
> > @@ -57,13 +59,56 @@ static void check_split_count(struct kvm_vm *vm, int expected_splits)
> >                   expected_splits, actual_splits);
> >  }
> >
> > +static void help(void)
> > +{
> > +     puts("");
> > +     printf("usage: nx_huge_pages_test.sh [-x]\n");
> > +     puts("");
> > +     printf(" -x: Allow executable huge pages on the VM.\n");
>
> Making it a flag means we won't exercise it by default. Is there are
> reason to avoid exercising KVM_CAP_VM_DISABLE_NX_HUGE_PAGES by default?
>
> Assuming no, I would recommend factoring out the test to a helper
> function that takes a parameter that tells it if nx_huge_pages is
> enabled or disabled. Then run this helper function multiple times. E.g.
> once with nx_huge_pages enabled, once with nx_huge_pages disabled via
> KVM_CAP_VM_DISABLE_NX_HUGE_PAGES. This would also then let you test that
> disabling via module param also works.
>
> By the way, that brings up another issue. What if NX HugePages is not
> enabled on this host? e.g. we're running on AMD, or we're running on a
> non-affected Intel host, or we're running on a machine where nx huge
> pages has been disabled by the admin? The test should probably return
> KSFT_SKIP in those cases.

That's all a good idea. Will do.

>
> > +     puts("");
> > +     exit(0);
> > +}
> > +
> >  int main(int argc, char **argv)
> >  {
> >       struct kvm_vm *vm;
> >       struct timespec ts;
> > +     bool disable_nx = false;
> > +     int opt;
> > +     int r;
> > +
> > +     while ((opt = getopt(argc, argv, "x")) != -1) {
> > +             switch (opt) {
> > +             case 'x':
> > +                     disable_nx = true;
> > +                     break;
> > +             case 'h':
> > +             default:
> > +                     help();
> > +                     break;
> > +             }
> > +     }
> >
> >       vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
> >
> > +     if (disable_nx) {
> > +             /*
> > +              * Check if this process has the reboot permissions needed to
> > +              * disable NX huge pages on a VM.
> > +              *
> > +              * The reboot call below will never have any effect because
> > +              * the magic values are not set correctly, however the
> > +              * permission check is done before the magic value check.
> > +              */
> > +             r = syscall(SYS_reboot, 0, 0, 0, NULL);
> > +             if (r == -EPERM)
> > +                     return KSFT_SKIP;
> > +             TEST_ASSERT(r == -EINVAL,
> > +                         "Reboot syscall should fail with -EINVAL");
>
> Just check if KVM_CAP_VM_DISABLE_NX_HUGE_PAGES returns -EPERM?
>
> > +
> > +             vm_disable_nx_huge_pages(vm);
> > +     }
> > +
> >       vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_HUGETLB,
> >                                   HPAGE_PADDR_START, HPAGE_SLOT,
> >                                   HPAGE_SLOT_NPAGES, 0);
> > @@ -83,21 +128,21 @@ int main(int argc, char **argv)
> >        * at 2M.
> >        */
> >       run_guest_code(vm, guest_code0);
> > -     check_2m_page_count(vm, 2);
> > -     check_split_count(vm, 2);
> > +     check_2m_page_count(vm, disable_nx ? 4 : 2);
> > +     check_split_count(vm, disable_nx ? 0 : 2);
> >
> >       /*
> >        * guest_code1 is in the same huge page as data1, so it will cause
> >        * that huge page to be remapped at 4k.
> >        */
> >       run_guest_code(vm, guest_code1);
> > -     check_2m_page_count(vm, 1);
> > -     check_split_count(vm, 3);
> > +     check_2m_page_count(vm, disable_nx ? 4 : 1);
> > +     check_split_count(vm, disable_nx ? 0 : 3);
> >
> >       /* Run guest_code0 again to check that is has no effect. */
> >       run_guest_code(vm, guest_code0);
> > -     check_2m_page_count(vm, 1);
> > -     check_split_count(vm, 3);
> > +     check_2m_page_count(vm, disable_nx ? 4 : 1);
> > +     check_split_count(vm, disable_nx ? 0 : 3);
> >
> >       /*
> >        * Give recovery thread time to run. The wrapper script sets
> > @@ -110,7 +155,7 @@ int main(int argc, char **argv)
> >       /*
> >        * Now that the reclaimer has run, all the split pages should be gone.
> >        */
> > -     check_2m_page_count(vm, 1);
> > +     check_2m_page_count(vm, disable_nx ? 4 : 1);
> >       check_split_count(vm, 0);
> >
> >       /*
> > @@ -118,13 +163,13 @@ int main(int argc, char **argv)
> >        * again to check that pages are mapped at 2M again.
> >        */
> >       run_guest_code(vm, guest_code0);
> > -     check_2m_page_count(vm, 2);
> > -     check_split_count(vm, 2);
> > +     check_2m_page_count(vm, disable_nx ? 4 : 2);
> > +     check_split_count(vm, disable_nx ? 0 : 2);
> >
> >       /* Pages are once again split from running guest_code1. */
> >       run_guest_code(vm, guest_code1);
> > -     check_2m_page_count(vm, 1);
> > -     check_split_count(vm, 3);
> > +     check_2m_page_count(vm, disable_nx ? 4 : 1);
> > +     check_split_count(vm, disable_nx ? 0 : 3);
> >
> >       kvm_vm_free(vm);
> >
> > diff --git a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh
> > index 19fc95723fcb..29f999f48848 100755
> > --- a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh
> > +++ b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh
> > @@ -14,7 +14,7 @@ echo 1 > /sys/module/kvm/parameters/nx_huge_pages_recovery_ratio
> >  echo 100 > /sys/module/kvm/parameters/nx_huge_pages_recovery_period_ms
> >  echo 200 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
> >
> > -./nx_huge_pages_test
> > +./nx_huge_pages_test "${@}"
> >  RET=$?
> >
> >  echo $NX_HUGE_PAGES > /sys/module/kvm/parameters/nx_huge_pages
> > --
> > 2.35.1.1021.g381101b075-goog
> >
Ben Gardon April 7, 2022, 6:39 p.m. UTC | #3
On Thu, Apr 7, 2022 at 11:26 AM Ben Gardon <bgardon@google.com> wrote:
>
> On Tue, Apr 5, 2022 at 3:55 PM David Matlack <dmatlack@google.com> wrote:
> >
> > On Wed, Mar 30, 2022 at 10:46:21AM -0700, Ben Gardon wrote:
> > > Add an argument to the NX huge pages test to test disabling the feature
> > > on a VM using the new capability.
> > >
> > > Signed-off-by: Ben Gardon <bgardon@google.com>
> > > ---
> > >  .../selftests/kvm/include/kvm_util_base.h     |  2 +
> > >  tools/testing/selftests/kvm/lib/kvm_util.c    |  7 ++
> > >  .../selftests/kvm/x86_64/nx_huge_pages_test.c | 67 ++++++++++++++++---
> > >  .../kvm/x86_64/nx_huge_pages_test.sh          |  2 +-
> > >  4 files changed, 66 insertions(+), 12 deletions(-)
> > >
> > > diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
> > > index 72163ba2f878..4db8251c3ce5 100644
> > > --- a/tools/testing/selftests/kvm/include/kvm_util_base.h
> > > +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
> > > @@ -411,4 +411,6 @@ uint64_t vm_get_single_stat(struct kvm_vm *vm, const char *stat_name);
> > >
> > >  uint32_t guest_get_vcpuid(void);
> > >
> > > +void vm_disable_nx_huge_pages(struct kvm_vm *vm);
> > > +
> > >  #endif /* SELFTEST_KVM_UTIL_BASE_H */
> > > diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
> > > index 9d72d1bb34fa..46a7fa08d3e0 100644
> > > --- a/tools/testing/selftests/kvm/lib/kvm_util.c
> > > +++ b/tools/testing/selftests/kvm/lib/kvm_util.c
> > > @@ -2765,3 +2765,10 @@ uint64_t vm_get_single_stat(struct kvm_vm *vm, const char *stat_name)
> > >       return value;
> > >  }
> > >
> > > +void vm_disable_nx_huge_pages(struct kvm_vm *vm)
> > > +{
> > > +     struct kvm_enable_cap cap = { 0 };
> > > +
> > > +     cap.cap = KVM_CAP_VM_DISABLE_NX_HUGE_PAGES;
> > > +     vm_enable_cap(vm, &cap);
> > > +}
> > > diff --git a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
> > > index 2bcbe4efdc6a..a0c79f6ddc08 100644
> > > --- a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
> > > +++ b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
> > > @@ -13,6 +13,8 @@
> > >  #include <fcntl.h>
> > >  #include <stdint.h>
> > >  #include <time.h>
> > > +#include <linux/reboot.h>
> > > +#include <sys/syscall.h>
> > >
> > >  #include <test_util.h>
> > >  #include "kvm_util.h"
> > > @@ -57,13 +59,56 @@ static void check_split_count(struct kvm_vm *vm, int expected_splits)
> > >                   expected_splits, actual_splits);
> > >  }
> > >
> > > +static void help(void)
> > > +{
> > > +     puts("");
> > > +     printf("usage: nx_huge_pages_test.sh [-x]\n");
> > > +     puts("");
> > > +     printf(" -x: Allow executable huge pages on the VM.\n");
> >
> > Making it a flag means we won't exercise it by default. Is there are
> > reason to avoid exercising KVM_CAP_VM_DISABLE_NX_HUGE_PAGES by default?
> >
> > Assuming no, I would recommend factoring out the test to a helper
> > function that takes a parameter that tells it if nx_huge_pages is
> > enabled or disabled. Then run this helper function multiple times. E.g.
> > once with nx_huge_pages enabled, once with nx_huge_pages disabled via
> > KVM_CAP_VM_DISABLE_NX_HUGE_PAGES. This would also then let you test that
> > disabling via module param also works.
> >
> > By the way, that brings up another issue. What if NX HugePages is not
> > enabled on this host? e.g. we're running on AMD, or we're running on a
> > non-affected Intel host, or we're running on a machine where nx huge
> > pages has been disabled by the admin? The test should probably return
> > KSFT_SKIP in those cases.

The wrapper script just always turns nx_huge_pages on, which I think
is a better solution, but perhaps it should check for permission
errors when doing that.

>
> That's all a good idea. Will do.
>
> >
> > > +     puts("");
> > > +     exit(0);
> > > +}
> > > +
> > >  int main(int argc, char **argv)
> > >  {
> > >       struct kvm_vm *vm;
> > >       struct timespec ts;
> > > +     bool disable_nx = false;
> > > +     int opt;
> > > +     int r;
> > > +
> > > +     while ((opt = getopt(argc, argv, "x")) != -1) {
> > > +             switch (opt) {
> > > +             case 'x':
> > > +                     disable_nx = true;
> > > +                     break;
> > > +             case 'h':
> > > +             default:
> > > +                     help();
> > > +                     break;
> > > +             }
> > > +     }
> > >
> > >       vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
> > >
> > > +     if (disable_nx) {
> > > +             /*
> > > +              * Check if this process has the reboot permissions needed to
> > > +              * disable NX huge pages on a VM.
> > > +              *
> > > +              * The reboot call below will never have any effect because
> > > +              * the magic values are not set correctly, however the
> > > +              * permission check is done before the magic value check.
> > > +              */
> > > +             r = syscall(SYS_reboot, 0, 0, 0, NULL);
> > > +             if (r == -EPERM)
> > > +                     return KSFT_SKIP;
> > > +             TEST_ASSERT(r == -EINVAL,
> > > +                         "Reboot syscall should fail with -EINVAL");
> >
> > Just check if KVM_CAP_VM_DISABLE_NX_HUGE_PAGES returns -EPERM?

We could do that but then we wouldn't be checking that the permission
checks work as expected.

> >
> > > +
> > > +             vm_disable_nx_huge_pages(vm);
> > > +     }
> > > +
> > >       vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_HUGETLB,
> > >                                   HPAGE_PADDR_START, HPAGE_SLOT,
> > >                                   HPAGE_SLOT_NPAGES, 0);
> > > @@ -83,21 +128,21 @@ int main(int argc, char **argv)
> > >        * at 2M.
> > >        */
> > >       run_guest_code(vm, guest_code0);
> > > -     check_2m_page_count(vm, 2);
> > > -     check_split_count(vm, 2);
> > > +     check_2m_page_count(vm, disable_nx ? 4 : 2);
> > > +     check_split_count(vm, disable_nx ? 0 : 2);
> > >
> > >       /*
> > >        * guest_code1 is in the same huge page as data1, so it will cause
> > >        * that huge page to be remapped at 4k.
> > >        */
> > >       run_guest_code(vm, guest_code1);
> > > -     check_2m_page_count(vm, 1);
> > > -     check_split_count(vm, 3);
> > > +     check_2m_page_count(vm, disable_nx ? 4 : 1);
> > > +     check_split_count(vm, disable_nx ? 0 : 3);
> > >
> > >       /* Run guest_code0 again to check that is has no effect. */
> > >       run_guest_code(vm, guest_code0);
> > > -     check_2m_page_count(vm, 1);
> > > -     check_split_count(vm, 3);
> > > +     check_2m_page_count(vm, disable_nx ? 4 : 1);
> > > +     check_split_count(vm, disable_nx ? 0 : 3);
> > >
> > >       /*
> > >        * Give recovery thread time to run. The wrapper script sets
> > > @@ -110,7 +155,7 @@ int main(int argc, char **argv)
> > >       /*
> > >        * Now that the reclaimer has run, all the split pages should be gone.
> > >        */
> > > -     check_2m_page_count(vm, 1);
> > > +     check_2m_page_count(vm, disable_nx ? 4 : 1);
> > >       check_split_count(vm, 0);
> > >
> > >       /*
> > > @@ -118,13 +163,13 @@ int main(int argc, char **argv)
> > >        * again to check that pages are mapped at 2M again.
> > >        */
> > >       run_guest_code(vm, guest_code0);
> > > -     check_2m_page_count(vm, 2);
> > > -     check_split_count(vm, 2);
> > > +     check_2m_page_count(vm, disable_nx ? 4 : 2);
> > > +     check_split_count(vm, disable_nx ? 0 : 2);
> > >
> > >       /* Pages are once again split from running guest_code1. */
> > >       run_guest_code(vm, guest_code1);
> > > -     check_2m_page_count(vm, 1);
> > > -     check_split_count(vm, 3);
> > > +     check_2m_page_count(vm, disable_nx ? 4 : 1);
> > > +     check_split_count(vm, disable_nx ? 0 : 3);
> > >
> > >       kvm_vm_free(vm);
> > >
> > > diff --git a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh
> > > index 19fc95723fcb..29f999f48848 100755
> > > --- a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh
> > > +++ b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh
> > > @@ -14,7 +14,7 @@ echo 1 > /sys/module/kvm/parameters/nx_huge_pages_recovery_ratio
> > >  echo 100 > /sys/module/kvm/parameters/nx_huge_pages_recovery_period_ms
> > >  echo 200 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
> > >
> > > -./nx_huge_pages_test
> > > +./nx_huge_pages_test "${@}"
> > >  RET=$?
> > >
> > >  echo $NX_HUGE_PAGES > /sys/module/kvm/parameters/nx_huge_pages
> > > --
> > > 2.35.1.1021.g381101b075-goog
> > >
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
index 72163ba2f878..4db8251c3ce5 100644
--- a/tools/testing/selftests/kvm/include/kvm_util_base.h
+++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
@@ -411,4 +411,6 @@  uint64_t vm_get_single_stat(struct kvm_vm *vm, const char *stat_name);
 
 uint32_t guest_get_vcpuid(void);
 
+void vm_disable_nx_huge_pages(struct kvm_vm *vm);
+
 #endif /* SELFTEST_KVM_UTIL_BASE_H */
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 9d72d1bb34fa..46a7fa08d3e0 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -2765,3 +2765,10 @@  uint64_t vm_get_single_stat(struct kvm_vm *vm, const char *stat_name)
 	return value;
 }
 
+void vm_disable_nx_huge_pages(struct kvm_vm *vm)
+{
+	struct kvm_enable_cap cap = { 0 };
+
+	cap.cap = KVM_CAP_VM_DISABLE_NX_HUGE_PAGES;
+	vm_enable_cap(vm, &cap);
+}
diff --git a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
index 2bcbe4efdc6a..a0c79f6ddc08 100644
--- a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
+++ b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
@@ -13,6 +13,8 @@ 
 #include <fcntl.h>
 #include <stdint.h>
 #include <time.h>
+#include <linux/reboot.h>
+#include <sys/syscall.h>
 
 #include <test_util.h>
 #include "kvm_util.h"
@@ -57,13 +59,56 @@  static void check_split_count(struct kvm_vm *vm, int expected_splits)
 		    expected_splits, actual_splits);
 }
 
+static void help(void)
+{
+	puts("");
+	printf("usage: nx_huge_pages_test.sh [-x]\n");
+	puts("");
+	printf(" -x: Allow executable huge pages on the VM.\n");
+	puts("");
+	exit(0);
+}
+
 int main(int argc, char **argv)
 {
 	struct kvm_vm *vm;
 	struct timespec ts;
+	bool disable_nx = false;
+	int opt;
+	int r;
+
+	while ((opt = getopt(argc, argv, "x")) != -1) {
+		switch (opt) {
+		case 'x':
+			disable_nx = true;
+			break;
+		case 'h':
+		default:
+			help();
+			break;
+		}
+	}
 
 	vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
 
+	if (disable_nx) {
+		/*
+		 * Check if this process has the reboot permissions needed to
+		 * disable NX huge pages on a VM.
+		 *
+		 * The reboot call below will never have any effect because
+		 * the magic values are not set correctly, however the
+		 * permission check is done before the magic value check.
+		 */
+		r = syscall(SYS_reboot, 0, 0, 0, NULL);
+		if (r == -EPERM)
+			return KSFT_SKIP;
+		TEST_ASSERT(r == -EINVAL,
+			    "Reboot syscall should fail with -EINVAL");
+
+		vm_disable_nx_huge_pages(vm);
+	}
+
 	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_HUGETLB,
 				    HPAGE_PADDR_START, HPAGE_SLOT,
 				    HPAGE_SLOT_NPAGES, 0);
@@ -83,21 +128,21 @@  int main(int argc, char **argv)
 	 * at 2M.
 	 */
 	run_guest_code(vm, guest_code0);
-	check_2m_page_count(vm, 2);
-	check_split_count(vm, 2);
+	check_2m_page_count(vm, disable_nx ? 4 : 2);
+	check_split_count(vm, disable_nx ? 0 : 2);
 
 	/*
 	 * guest_code1 is in the same huge page as data1, so it will cause
 	 * that huge page to be remapped at 4k.
 	 */
 	run_guest_code(vm, guest_code1);
-	check_2m_page_count(vm, 1);
-	check_split_count(vm, 3);
+	check_2m_page_count(vm, disable_nx ? 4 : 1);
+	check_split_count(vm, disable_nx ? 0 : 3);
 
 	/* Run guest_code0 again to check that is has no effect. */
 	run_guest_code(vm, guest_code0);
-	check_2m_page_count(vm, 1);
-	check_split_count(vm, 3);
+	check_2m_page_count(vm, disable_nx ? 4 : 1);
+	check_split_count(vm, disable_nx ? 0 : 3);
 
 	/*
 	 * Give recovery thread time to run. The wrapper script sets
@@ -110,7 +155,7 @@  int main(int argc, char **argv)
 	/*
 	 * Now that the reclaimer has run, all the split pages should be gone.
 	 */
-	check_2m_page_count(vm, 1);
+	check_2m_page_count(vm, disable_nx ? 4 : 1);
 	check_split_count(vm, 0);
 
 	/*
@@ -118,13 +163,13 @@  int main(int argc, char **argv)
 	 * again to check that pages are mapped at 2M again.
 	 */
 	run_guest_code(vm, guest_code0);
-	check_2m_page_count(vm, 2);
-	check_split_count(vm, 2);
+	check_2m_page_count(vm, disable_nx ? 4 : 2);
+	check_split_count(vm, disable_nx ? 0 : 2);
 
 	/* Pages are once again split from running guest_code1. */
 	run_guest_code(vm, guest_code1);
-	check_2m_page_count(vm, 1);
-	check_split_count(vm, 3);
+	check_2m_page_count(vm, disable_nx ? 4 : 1);
+	check_split_count(vm, disable_nx ? 0 : 3);
 
 	kvm_vm_free(vm);
 
diff --git a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh
index 19fc95723fcb..29f999f48848 100755
--- a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh
+++ b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh
@@ -14,7 +14,7 @@  echo 1 > /sys/module/kvm/parameters/nx_huge_pages_recovery_ratio
 echo 100 > /sys/module/kvm/parameters/nx_huge_pages_recovery_period_ms
 echo 200 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
 
-./nx_huge_pages_test
+./nx_huge_pages_test "${@}"
 RET=$?
 
 echo $NX_HUGE_PAGES > /sys/module/kvm/parameters/nx_huge_pages