diff mbox

[v2,05/22] KVM: x86: abstract the operation for read/write emulation

Message ID 4E01FC78.6060000@cn.fujitsu.com (mailing list archive)
State New, archived
Headers show

Commit Message

Xiao Guangrong June 22, 2011, 2:30 p.m. UTC
The operations of read emulation and write emulation are very similar, so we
can abstract the operation of them, in larter patch, it is used to cleanup the
same code

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
 arch/x86/kvm/x86.c |   72 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 72 insertions(+), 0 deletions(-)

Comments

Avi Kivity June 29, 2011, 8:37 a.m. UTC | #1
On 06/22/2011 05:30 PM, Xiao Guangrong wrote:
> The operations of read emulation and write emulation are very similar, so we
> can abstract the operation of them, in larter patch, it is used to cleanup the
> same code
>
> Signed-off-by: Xiao Guangrong<xiaoguangrong@cn.fujitsu.com>
> ---
>   arch/x86/kvm/x86.c |   72 ++++++++++++++++++++++++++++++++++++++++++++++++++++
>   1 files changed, 72 insertions(+), 0 deletions(-)
>
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index c29ef96..887714f 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -4056,6 +4056,78 @@ int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
>   	return 1;
>   }
>
> +struct read_write_emulator_ops {
> +	int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
> +				  int bytes);
> +	int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
> +				  void *val, int bytes);
> +	int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
> +			       int bytes, void *val);
> +	int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
> +				    void *val, int bytes);
> +	bool write;
> +};


Interesting!

This structure combines two unrelated operations, though.  One is the 
internals of the iteration on a virtual address that is split to various 
physical addresses.  The other is the interaction with userspace on mmio 
exits.  They should be split, but I think it's fine to do it in a later 
patch.  This series is long enough already.

I was also annoyed by the duplication.  They way I thought of fixing it 
is having gva_to_gpa() return two gpas, and having the access function 
accept gpa vectors.  The reason was so that we can implemented locked 
cross-page operations (which we now emulate as unlocked writes).

But I think we can do without it, and instead emulated locked cross-page 
ops by stalling all other vcpus while we write, or by unmapping the 
pages involved.  It isn't pretty but it doesn't need to be fast since 
it's a very rare operation.  So I think we can go with your approach.
Xiao Guangrong June 29, 2011, 10:59 a.m. UTC | #2
On 06/29/2011 04:37 PM, Avi Kivity wrote:

>> +struct read_write_emulator_ops {
>> +    int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
>> +                  int bytes);
>> +    int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
>> +                  void *val, int bytes);
>> +    int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
>> +                   int bytes, void *val);
>> +    int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
>> +                    void *val, int bytes);
>> +    bool write;
>> +};
> 
> 
> Interesting!
> 
> This structure combines two unrelated operations, though.  One is the internals of the iteration on a virtual address that is split to various physical addresses.  The other is the interaction with userspace on mmio exits.  They should be split, but I think it's fine to do it in a later patch.  This series is long enough already.
> 
> I was also annoyed by the duplication.  They way I thought of fixing it is having gva_to_gpa() return two gpas, and having the access function accept gpa vectors.  The reason was so that we can implemented locked cross-page operations (which we now emulate as unlocked writes).
> 
> But I think we can do without it, and instead emulated locked cross-page ops by stalling all other vcpus while we write, or by unmapping the pages involved.  It isn't pretty but it doesn't need to be fast since it's a very rare operation.  So I think we can go with your approach.
> 

OK, i'll post it in the separate patchset, thanks, Avi.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c29ef96..887714f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4056,6 +4056,78 @@  int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
 	return 1;
 }
 
+struct read_write_emulator_ops {
+	int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
+				  int bytes);
+	int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
+				  void *val, int bytes);
+	int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
+			       int bytes, void *val);
+	int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
+				    void *val, int bytes);
+	bool write;
+};
+
+static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
+{
+	if (vcpu->mmio_read_completed) {
+		memcpy(val, vcpu->mmio_data, bytes);
+		trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
+			       vcpu->mmio_phys_addr, *(u64 *)val);
+		vcpu->mmio_read_completed = 0;
+		return 1;
+	}
+
+	return 0;
+}
+
+static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
+			void *val, int bytes)
+{
+	return !kvm_read_guest(vcpu->kvm, gpa, val, bytes);
+}
+
+static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
+			 void *val, int bytes)
+{
+	return emulator_write_phys(vcpu, gpa, val, bytes);
+}
+
+static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
+{
+	trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
+	return vcpu_mmio_write(vcpu, gpa, bytes, val);
+}
+
+static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
+			  void *val, int bytes)
+{
+	trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
+	return X86EMUL_IO_NEEDED;
+}
+
+static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
+			   void *val, int bytes)
+{
+	memcpy(vcpu->mmio_data, val, bytes);
+	memcpy(vcpu->run->mmio.data, vcpu->mmio_data, 8);
+	return X86EMUL_CONTINUE;
+}
+
+static struct read_write_emulator_ops read_emultor = {
+	.read_write_prepare = read_prepare,
+	.read_write_emulate = read_emulate,
+	.read_write_mmio = vcpu_mmio_read,
+	.read_write_exit_mmio = read_exit_mmio,
+};
+
+static struct read_write_emulator_ops write_emultor = {
+	.read_write_emulate = write_emulate,
+	.read_write_mmio = write_mmio,
+	.read_write_exit_mmio = write_exit_mmio,
+	.write = true,
+};
+
 static int emulator_write_emulated_onepage(unsigned long addr,
 					   const void *val,
 					   unsigned int bytes,