diff mbox series

KVM: x86/xen: Fix memory leak in kvm_xen_write_hypercall_page()

Message ID 20221216005204.4091927-1-mhal@rbox.co (mailing list archive)
State New, archived
Headers show
Series KVM: x86/xen: Fix memory leak in kvm_xen_write_hypercall_page() | expand

Commit Message

Michal Luczaj Dec. 16, 2022, 12:52 a.m. UTC
Release page irrespectively of kvm_vcpu_write_guest() return value.

Signed-off-by: Michal Luczaj <mhal@rbox.co>
---
# cat /sys/kernel/debug/kmemleak
unreferenced object 0xffff888131eff000 (size 4096):
  comm "xen_hcall_leak", pid 949, jiffies 4294753212 (age 11.943s)
  hex dump (first 32 bytes):
    00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00  ................
    00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00  ................
  backtrace:
    [<00000000e2915da4>] __kmalloc_node_track_caller+0x44/0xa0
    [<00000000a9f05df2>] memdup_user+0x26/0x90
    [<000000008e647779>] kvm_xen_write_hypercall_page+0xaa/0x160 [kvm]
    [<00000000e5da0818>] vmx_set_msr+0x8d3/0x1090 [kvm_intel]
    [<000000003f0226a5>] __kvm_set_msr+0x6f/0x1a0 [kvm]
    [<00000000d3dc90c4>] kvm_emulate_wrmsr+0x4b/0x120 [kvm]
    [<00000000093585d7>] vmx_handle_exit+0x1b6/0x710 [kvm_intel]
    [<000000006fa8c15e>] vcpu_run+0xfbf/0x16f0 [kvm]
    [<00000000891f7860>] kvm_arch_vcpu_ioctl_run+0x1d2/0x650 [kvm]
    [<000000001b8d2d97>] kvm_vcpu_ioctl+0x223/0x6d0 [kvm]
    [<00000000e7aa7a58>] __x64_sys_ioctl+0x85/0xc0
    [<00000000c41da0be>] do_syscall_64+0x55/0x80
    [<000000001635e1c8>] entry_SYSCALL_64_after_hwframe+0x46/0xb0

 arch/x86/kvm/xen.c | 12 +++++++-----
 1 file changed, 7 insertions(+), 5 deletions(-)

Comments

Paul Durrant Dec. 20, 2022, 2:10 p.m. UTC | #1
On 16/12/2022 00:52, Michal Luczaj wrote:
> Release page irrespectively of kvm_vcpu_write_guest() return value.
> 
> Signed-off-by: Michal Luczaj <mhal@rbox.co>
> ---
> # cat /sys/kernel/debug/kmemleak
> unreferenced object 0xffff888131eff000 (size 4096):
>    comm "xen_hcall_leak", pid 949, jiffies 4294753212 (age 11.943s)
>    hex dump (first 32 bytes):
>      00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00  ................
>      00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00  ................
>    backtrace:
>      [<00000000e2915da4>] __kmalloc_node_track_caller+0x44/0xa0
>      [<00000000a9f05df2>] memdup_user+0x26/0x90
>      [<000000008e647779>] kvm_xen_write_hypercall_page+0xaa/0x160 [kvm]
>      [<00000000e5da0818>] vmx_set_msr+0x8d3/0x1090 [kvm_intel]
>      [<000000003f0226a5>] __kvm_set_msr+0x6f/0x1a0 [kvm]
>      [<00000000d3dc90c4>] kvm_emulate_wrmsr+0x4b/0x120 [kvm]
>      [<00000000093585d7>] vmx_handle_exit+0x1b6/0x710 [kvm_intel]
>      [<000000006fa8c15e>] vcpu_run+0xfbf/0x16f0 [kvm]
>      [<00000000891f7860>] kvm_arch_vcpu_ioctl_run+0x1d2/0x650 [kvm]
>      [<000000001b8d2d97>] kvm_vcpu_ioctl+0x223/0x6d0 [kvm]
>      [<00000000e7aa7a58>] __x64_sys_ioctl+0x85/0xc0
>      [<00000000c41da0be>] do_syscall_64+0x55/0x80
>      [<000000001635e1c8>] entry_SYSCALL_64_after_hwframe+0x46/0xb0
> 
>   arch/x86/kvm/xen.c | 12 +++++++-----
>   1 file changed, 7 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
> index f3098c0e386a..61953248bc0c 100644
> --- a/arch/x86/kvm/xen.c
> +++ b/arch/x86/kvm/xen.c
> @@ -879,6 +879,8 @@ int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
>   						 instructions, sizeof(instructions)))
>   				return 1;
>   		}
> +
> +		return 0;

I'd prefer dropping this hunk...

>   	} else {
>   		/*
>   		 * Note, truncation is a non-issue as 'lm' is guaranteed to be
> @@ -889,6 +891,7 @@ int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
>   		u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
>   				  : kvm->arch.xen_hvm_config.blob_size_32;
>   		u8 *page;
> +		int ret;
>   
>   		if (page_num >= blob_size)
>   			return 1;
> @@ -899,12 +902,11 @@ int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
>   		if (IS_ERR(page))
>   			return PTR_ERR(page);
>   
> -		if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE)) {
> -			kfree(page);
> -			return 1;
> -		}
> +		ret = kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE);
> +		kfree(page);
> +
> +		return !!ret;

... making this

if (ret)
     return 1;

>   	}
> -	return 0;

... and then leaving this alone too.

   Paul

>   }
>   
>   int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc)
Michal Luczaj Dec. 20, 2022, 2:25 p.m. UTC | #2
On 12/20/22 15:10, Paul Durrant wrote:
> I'd prefer dropping this hunk...
> ...

Sure, sending v2.

Should I use your @xen.org or @gmail.com for suggested-by tag?

thanks,
Michal
Paul Durrant Dec. 20, 2022, 3:03 p.m. UTC | #3
On 20/12/2022 14:25, Michal Luczaj wrote:
> On 12/20/22 15:10, Paul Durrant wrote:
>> I'd prefer dropping this hunk...
>> ...
> 
> Sure, sending v2.
> 
> Should I use your @xen.org or @gmail.com for suggested-by tag?
> 

@xen.org

Looks like my config has lost a Reply-To somewhere along the way. I'll 
go and find it.

   Paul
diff mbox series

Patch

diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
index f3098c0e386a..61953248bc0c 100644
--- a/arch/x86/kvm/xen.c
+++ b/arch/x86/kvm/xen.c
@@ -879,6 +879,8 @@  int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
 						 instructions, sizeof(instructions)))
 				return 1;
 		}
+
+		return 0;
 	} else {
 		/*
 		 * Note, truncation is a non-issue as 'lm' is guaranteed to be
@@ -889,6 +891,7 @@  int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
 		u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
 				  : kvm->arch.xen_hvm_config.blob_size_32;
 		u8 *page;
+		int ret;
 
 		if (page_num >= blob_size)
 			return 1;
@@ -899,12 +902,11 @@  int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
 		if (IS_ERR(page))
 			return PTR_ERR(page);
 
-		if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE)) {
-			kfree(page);
-			return 1;
-		}
+		ret = kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE);
+		kfree(page);
+
+		return !!ret;
 	}
-	return 0;
 }
 
 int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc)