@@ -563,7 +563,7 @@ static void check_tsx_cycles(void)
cnt.ctr = MSR_GP_COUNTERx(i);
if (i == 2) {
- /* Transactional cycles commited only on gp counter 2 */
+ /* Transactional cycles committed only on gp counter 2 */
cnt.config = EVNTSEL_OS | EVNTSEL_USR | 0x30000003c;
} else {
/* Transactional cycles */
@@ -308,7 +308,7 @@ static void check_pebs_records(u64 bitmask, u64 pebs_data_cfg)
(pebs_rec->format_size & GENMASK_ULL(47, 0)) == pebs_data_cfg;
expected = pebs_idx_match && pebs_size_match && data_cfg_match;
report(expected,
- "PEBS record (written seq %d) is verified (inclduing size, counters and cfg).", count);
+ "PEBS record (written seq %d) is verified (including size, counters and cfg).", count);
cur_record = cur_record + pebs_record_size;
count++;
} while (expected && (void *)cur_record < (void *)ds->pebs_index);
@@ -395,7 +395,7 @@ static bool msr_intercept_finished(struct svm_test *test)
}
/*
- * Warn that #GP exception occured instead.
+ * Warn that #GP exception occurred instead.
* RCX holds the MSR index.
*/
printf("%s 0x%lx #GP exception\n",
@@ -2776,7 +2776,7 @@ static void svm_no_nm_test(void)
vmcb->save.cr0 = vmcb->save.cr0 & ~(X86_CR0_TS | X86_CR0_EM);
report(svm_vmrun() == SVM_EXIT_VMMCALL,
- "fnop with CR0.TS and CR0.EM unset no #NM excpetion");
+ "fnop with CR0.TS and CR0.EM unset no #NM exception");
}
static u64 amd_get_lbr_rip(u32 msr)
@@ -7667,7 +7667,7 @@ static void test_host_addr_size(void)
* testcases as needed, but don't guarantee a VM-Exit and so the active
* CR4 and RIP may still hold a test value. Running with the test CR4
* and RIP values at some point is unavoidable, and the active values
- * are unlikely to affect VM-Enter, so the above doen't force a VM-Exit
+ * are unlikely to affect VM-Enter, so the above doesn't force a VM-exit
* between testcases. Note, if VM-Enter is surrounded by CALL+RET then
* the active RIP will already be restored, but that's also not
* guaranteed, and CR4 needs to be restored regardless.
@@ -9382,7 +9382,7 @@ static void vmx_eoi_bitmap_ioapic_scan_test(void)
/*
* Launch L2.
* We expect the exit reason to be VMX_VMCALL (and not EOI INDUCED).
- * In case the reason isn't VMX_VMCALL, the asserion inside
+ * In case the reason isn't VMX_VMCALL, the assertion inside
* skip_exit_vmcall() will fail.
*/
enter_guest();
@@ -9698,7 +9698,7 @@ static void vmx_init_signal_test(void)
init_signal_test_exit_reason = -1ull;
vmx_set_test_stage(4);
/*
- * Wait reasonable amont of time for other CPU
+ * Wait reasonable amount of time for other CPU
* to exit to VMX root mode
*/
delay(INIT_SIGNAL_TEST_DELAY);
Fix typos that have been discovered with the "codespell" utility. Signed-off-by: Thomas Huth <thuth@redhat.com> --- x86/pmu.c | 2 +- x86/pmu_pebs.c | 2 +- x86/svm_tests.c | 4 ++-- x86/vmx_tests.c | 6 +++--- 4 files changed, 7 insertions(+), 7 deletions(-)