diff mbox series

[XEN,3/7] x86: add deviation comments for asm-only functions

Message ID cf9012d5e4fbe514e9cce3f0074b1a87df505bc0.1696232393.git.nicola.vetrini@bugseng.com (mailing list archive)
State Superseded
Headers show
Series Fix or deviate various instances of missing declarations | expand

Commit Message

Nicola Vetrini Oct. 2, 2023, 7:49 a.m. UTC
As stated in rules.rst, functions used only in asm code
are allowed to have no prior declaration visible when being
defined, hence these functions are deviated.
This also fixes violations of MISRA C:2012 Rule 8.4.

Signed-off-by: Nicola Vetrini <nicola.vetrini@bugseng.com>
---
 xen/arch/x86/hvm/svm/intr.c      | 1 +
 xen/arch/x86/hvm/svm/nestedsvm.c | 1 +
 xen/arch/x86/hvm/svm/svm.c       | 2 ++
 xen/arch/x86/traps.c             | 1 +
 xen/arch/x86/x86_64/traps.c      | 1 +
 5 files changed, 6 insertions(+)

Comments

Stefano Stabellini Oct. 2, 2023, 10:40 p.m. UTC | #1
On Mon, 2 Oct 2023, Nicola Vetrini wrote:
> As stated in rules.rst, functions used only in asm code
> are allowed to have no prior declaration visible when being
> defined, hence these functions are deviated.
> This also fixes violations of MISRA C:2012 Rule 8.4.
> 
> Signed-off-by: Nicola Vetrini <nicola.vetrini@bugseng.com>

Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
diff mbox series

Patch

diff --git a/xen/arch/x86/hvm/svm/intr.c b/xen/arch/x86/hvm/svm/intr.c
index 192e17ebbfbb..bd9dc560bbc6 100644
--- a/xen/arch/x86/hvm/svm/intr.c
+++ b/xen/arch/x86/hvm/svm/intr.c
@@ -123,6 +123,7 @@  static void svm_enable_intr_window(struct vcpu *v, struct hvm_intack intack)
         vmcb, general1_intercepts | GENERAL1_INTERCEPT_VINTR);
 }
 
+/* SAF-1-safe */
 void svm_intr_assist(void)
 {
     struct vcpu *v = current;
diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index a09b6abaaeaf..c80d59e0728e 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -1441,6 +1441,7 @@  nestedsvm_vcpu_vmexit(struct vcpu *v, struct cpu_user_regs *regs,
 }
 
 /* VCPU switch */
+/* SAF-1-safe */
 void nsvm_vcpu_switch(void)
 {
     struct cpu_user_regs *regs = guest_cpu_user_regs();
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index beb076ea8d62..b9fabd45a119 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1044,6 +1044,7 @@  static void noreturn cf_check svm_do_resume(void)
     reset_stack_and_jump(svm_asm_do_resume);
 }
 
+/* SAF-1-safe */
 void svm_vmenter_helper(void)
 {
     const struct cpu_user_regs *regs = guest_cpu_user_regs();
@@ -2574,6 +2575,7 @@  const struct hvm_function_table * __init start_svm(void)
     return &svm_function_table;
 }
 
+/* SAF-1-safe */
 void svm_vmexit_handler(void)
 {
     struct cpu_user_regs *regs = guest_cpu_user_regs();
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 0a005f088bca..f27ddb728b2c 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -2260,6 +2260,7 @@  void asm_domain_crash_synchronous(unsigned long addr)
 }
 
 #ifdef CONFIG_DEBUG
+/* SAF-1-safe */
 void check_ist_exit(const struct cpu_user_regs *regs, bool ist_exit)
 {
     const unsigned int ist_mask =
diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c
index f4d17b483032..bcb7559b21c3 100644
--- a/xen/arch/x86/x86_64/traps.c
+++ b/xen/arch/x86/x86_64/traps.c
@@ -266,6 +266,7 @@  void show_page_walk(unsigned long addr)
            l1_table_offset(addr), l1e_get_intpte(l1e), pfn);
 }
 
+/* SAF-1-safe */
 void do_double_fault(struct cpu_user_regs *regs)
 {
     unsigned int cpu;