diff mbox series

[v4,05/10] common/domain: allocate vmtrace_pt_buffer

Message ID 0e02c97054da6e367f740ab8d2574e2d255553c8.1593519420.git.michal.leszczynski@cert.pl (mailing list archive)
State Superseded
Headers show
Series Implement support for external IPT monitoring | expand

Commit Message

Michał Leszczyński June 30, 2020, 12:33 p.m. UTC
From: Michal Leszczynski <michal.leszczynski@cert.pl>

Allocate processor trace buffer for each vCPU when the domain
is created, deallocate trace buffers on domain destruction.

Signed-off-by: Michal Leszczynski <michal.leszczynski@cert.pl>
---
 xen/arch/x86/domain.c        | 11 +++++++++++
 xen/common/domain.c          | 32 ++++++++++++++++++++++++++++++++
 xen/include/asm-x86/domain.h |  4 ++++
 xen/include/xen/sched.h      |  4 ++++
 4 files changed, 51 insertions(+)

Comments

Roger Pau Monné July 1, 2020, 10:38 a.m. UTC | #1
On Tue, Jun 30, 2020 at 02:33:48PM +0200, Michał Leszczyński wrote:
> From: Michal Leszczynski <michal.leszczynski@cert.pl>
> 
> Allocate processor trace buffer for each vCPU when the domain
> is created, deallocate trace buffers on domain destruction.
> 
> Signed-off-by: Michal Leszczynski <michal.leszczynski@cert.pl>
> ---
>  xen/arch/x86/domain.c        | 11 +++++++++++
>  xen/common/domain.c          | 32 ++++++++++++++++++++++++++++++++
>  xen/include/asm-x86/domain.h |  4 ++++
>  xen/include/xen/sched.h      |  4 ++++
>  4 files changed, 51 insertions(+)
> 
> diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
> index fee6c3931a..0d79fd390c 100644
> --- a/xen/arch/x86/domain.c
> +++ b/xen/arch/x86/domain.c
> @@ -2199,6 +2199,17 @@ int domain_relinquish_resources(struct domain *d)
>                  altp2m_vcpu_disable_ve(v);
>          }
>  
> +        for_each_vcpu ( d, v )
> +        {
> +            if ( !v->arch.vmtrace.pt_buf )
> +                continue;
> +
> +            vmtrace_destroy_pt(v);
> +
> +            free_domheap_pages(v->arch.vmtrace.pt_buf,
> +                get_order_from_bytes(v->domain->vmtrace_pt_size));
> +        }
> +
>          if ( is_pv_domain(d) )
>          {
>              for_each_vcpu ( d, v )
> diff --git a/xen/common/domain.c b/xen/common/domain.c
> index 27dcfbac8c..8513659ef8 100644
> --- a/xen/common/domain.c
> +++ b/xen/common/domain.c
> @@ -137,6 +137,31 @@ static void vcpu_destroy(struct vcpu *v)
>      free_vcpu_struct(v);
>  }
>  
> +static int vmtrace_alloc_buffers(struct vcpu *v)
> +{
> +    struct page_info *pg;
> +    uint64_t size = v->domain->vmtrace_pt_size;

IMO you would be better by just storing an order here (like it's
passed from the toolstack), that would avoid the checks and conversion
to an order. Also vmtrace_pt_size could be of type unsigned int
instead of uint64_t.

> +
> +    if ( size < PAGE_SIZE || size > GB(4) || (size & (size - 1)) )
> +    {
> +        /*
> +         * We don't accept trace buffer size smaller than single page
> +         * and the upper bound is defined as 4GB in the specification.
> +         * The buffer size must be also a power of 2.
> +         */
> +        return -EINVAL;
> +    }
> +
> +    pg = alloc_domheap_pages(v->domain, get_order_from_bytes(size),
> +                             MEMF_no_refcount);
> +
> +    if ( !pg )
> +        return -ENOMEM;
> +
> +    v->arch.vmtrace.pt_buf = pg;

You can assign to pt_buf directly IMO, no need for the pg local
variable.

> +    return 0;
> +}
> +
>  struct vcpu *vcpu_create(struct domain *d, unsigned int vcpu_id)
>  {
>      struct vcpu *v;
> @@ -162,6 +187,9 @@ struct vcpu *vcpu_create(struct domain *d, unsigned int vcpu_id)
>      v->vcpu_id = vcpu_id;
>      v->dirty_cpu = VCPU_CPU_CLEAN;
>  
> +    if ( d->vmtrace_pt_size && vmtrace_alloc_buffers(v) != 0 )
> +        return NULL;

You are leaking the allocated v here, see other error paths below in
the function.

> +
>      spin_lock_init(&v->virq_lock);
>  
>      tasklet_init(&v->continue_hypercall_tasklet, NULL, NULL);
> @@ -188,6 +216,9 @@ struct vcpu *vcpu_create(struct domain *d, unsigned int vcpu_id)
>      if ( arch_vcpu_create(v) != 0 )
>          goto fail_sched;
>  
> +    if ( d->vmtrace_pt_size && vmtrace_init_pt(v) != 0 )
> +        goto fail_sched;
> +
>      d->vcpu[vcpu_id] = v;
>      if ( vcpu_id != 0 )
>      {
> @@ -422,6 +453,7 @@ struct domain *domain_create(domid_t domid,
>      d->shutdown_code = SHUTDOWN_CODE_INVALID;
>  
>      spin_lock_init(&d->pbuf_lock);
> +    spin_lock_init(&d->vmtrace_lock);
>  
>      rwlock_init(&d->vnuma_rwlock);
>  
> diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
> index 6fd94c2e14..b01c107f5c 100644
> --- a/xen/include/asm-x86/domain.h
> +++ b/xen/include/asm-x86/domain.h
> @@ -627,6 +627,10 @@ struct arch_vcpu
>      struct {
>          bool next_interrupt_enabled;
>      } monitor;
> +
> +    struct {
> +        struct page_info *pt_buf;
> +    } vmtrace;
>  };
>  
>  struct guest_memory_policy
> diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
> index ac53519d7f..48f0a61bbd 100644
> --- a/xen/include/xen/sched.h
> +++ b/xen/include/xen/sched.h
> @@ -457,6 +457,10 @@ struct domain
>      unsigned    pbuf_idx;
>      spinlock_t  pbuf_lock;
>  
> +    /* Used by vmtrace features */
> +    spinlock_t  vmtrace_lock;

Does this need to be per domain or rather per-vcpu? It's hard to tell
because there's no user of it in the patch.

Thanks, Roger.
Julien Grall July 1, 2020, 3:35 p.m. UTC | #2
Hi,

On 30/06/2020 13:33, Michał Leszczyński wrote:
> +static int vmtrace_alloc_buffers(struct vcpu *v)
> +{
> +    struct page_info *pg;
> +    uint64_t size = v->domain->vmtrace_pt_size;
> +
> +    if ( size < PAGE_SIZE || size > GB(4) || (size & (size - 1)) )
> +    {
> +        /*
> +         * We don't accept trace buffer size smaller than single page
> +         * and the upper bound is defined as 4GB in the specification.

This is common code, so what specification are you talking about?

I am guessing this is an Intel one, but I don't think Intel should 
dictate the common code implementation.

> +         * The buffer size must be also a power of 2.
> +         */
> +        return -EINVAL;
> +    }
> +
> +    pg = alloc_domheap_pages(v->domain, get_order_from_bytes(size),
> +                             MEMF_no_refcount);
> +
> +    if ( !pg )
> +        return -ENOMEM;
> +
> +    v->arch.vmtrace.pt_buf = pg;

v->arch.vmtrace.pt_buf is not defined on Arm. Please make sure common 
code build on all arch.

Cheers,
diff mbox series

Patch

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index fee6c3931a..0d79fd390c 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -2199,6 +2199,17 @@  int domain_relinquish_resources(struct domain *d)
                 altp2m_vcpu_disable_ve(v);
         }
 
+        for_each_vcpu ( d, v )
+        {
+            if ( !v->arch.vmtrace.pt_buf )
+                continue;
+
+            vmtrace_destroy_pt(v);
+
+            free_domheap_pages(v->arch.vmtrace.pt_buf,
+                get_order_from_bytes(v->domain->vmtrace_pt_size));
+        }
+
         if ( is_pv_domain(d) )
         {
             for_each_vcpu ( d, v )
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 27dcfbac8c..8513659ef8 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -137,6 +137,31 @@  static void vcpu_destroy(struct vcpu *v)
     free_vcpu_struct(v);
 }
 
+static int vmtrace_alloc_buffers(struct vcpu *v)
+{
+    struct page_info *pg;
+    uint64_t size = v->domain->vmtrace_pt_size;
+
+    if ( size < PAGE_SIZE || size > GB(4) || (size & (size - 1)) )
+    {
+        /*
+         * We don't accept trace buffer size smaller than single page
+         * and the upper bound is defined as 4GB in the specification.
+         * The buffer size must be also a power of 2.
+         */
+        return -EINVAL;
+    }
+
+    pg = alloc_domheap_pages(v->domain, get_order_from_bytes(size),
+                             MEMF_no_refcount);
+
+    if ( !pg )
+        return -ENOMEM;
+
+    v->arch.vmtrace.pt_buf = pg;
+    return 0;
+}
+
 struct vcpu *vcpu_create(struct domain *d, unsigned int vcpu_id)
 {
     struct vcpu *v;
@@ -162,6 +187,9 @@  struct vcpu *vcpu_create(struct domain *d, unsigned int vcpu_id)
     v->vcpu_id = vcpu_id;
     v->dirty_cpu = VCPU_CPU_CLEAN;
 
+    if ( d->vmtrace_pt_size && vmtrace_alloc_buffers(v) != 0 )
+        return NULL;
+
     spin_lock_init(&v->virq_lock);
 
     tasklet_init(&v->continue_hypercall_tasklet, NULL, NULL);
@@ -188,6 +216,9 @@  struct vcpu *vcpu_create(struct domain *d, unsigned int vcpu_id)
     if ( arch_vcpu_create(v) != 0 )
         goto fail_sched;
 
+    if ( d->vmtrace_pt_size && vmtrace_init_pt(v) != 0 )
+        goto fail_sched;
+
     d->vcpu[vcpu_id] = v;
     if ( vcpu_id != 0 )
     {
@@ -422,6 +453,7 @@  struct domain *domain_create(domid_t domid,
     d->shutdown_code = SHUTDOWN_CODE_INVALID;
 
     spin_lock_init(&d->pbuf_lock);
+    spin_lock_init(&d->vmtrace_lock);
 
     rwlock_init(&d->vnuma_rwlock);
 
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 6fd94c2e14..b01c107f5c 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -627,6 +627,10 @@  struct arch_vcpu
     struct {
         bool next_interrupt_enabled;
     } monitor;
+
+    struct {
+        struct page_info *pt_buf;
+    } vmtrace;
 };
 
 struct guest_memory_policy
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index ac53519d7f..48f0a61bbd 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -457,6 +457,10 @@  struct domain
     unsigned    pbuf_idx;
     spinlock_t  pbuf_lock;
 
+    /* Used by vmtrace features */
+    spinlock_t  vmtrace_lock;
+    uint64_t    vmtrace_pt_size;
+
     /* OProfile support. */
     struct xenoprof *xenoprof;