diff mbox

[2/6] x86/HVM: remove unnecessary indirection from hvm_get_mem_pinned_cacheattr()

Message ID 56D8761302000078000D8F46@prv-mh.provo.novell.com (mailing list archive)
State New, archived
Headers show

Commit Message

Jan Beulich March 3, 2016, 4:36 p.m. UTC
Its return value can easily serve the purpose. We cannot, however,
return unspecific "success" anymore for a domain of the wrong type -
since no caller exists that would call this for PV domains, simply add
an ASSERT().

Signed-off-by: Jan Beulich <jbeulich@suse.com>
x86/HVM: remove unnecessary indirection from hvm_get_mem_pinned_cacheattr()

Its return value can easily serve the purpose. We cannot, however,
return unspecific "success" anymore for a domain of the wrong type -
since no caller exists that would call this for PV domains, simply add
an ASSERT().

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -546,17 +546,13 @@ void hvm_destroy_cacheattr_region_list(
 int hvm_get_mem_pinned_cacheattr(
     struct domain *d,
     uint64_t guest_fn,
-    unsigned int order,
-    uint32_t *type)
+    unsigned int order)
 {
     struct hvm_mem_pinned_cacheattr_range *range;
     uint64_t mask = ~(uint64_t)0 << order;
-    int rc = 0;
+    int rc = -ENXIO;
 
-    *type = ~0;
-
-    if ( !is_hvm_domain(d) )
-        return 0;
+    ASSERT(has_hvm_container_domain(d));
 
     rcu_read_lock(&pinned_cacheattr_rcu_lock);
     list_for_each_entry_rcu ( range,
@@ -566,14 +562,13 @@ int hvm_get_mem_pinned_cacheattr(
         if ( ((guest_fn & mask) >= range->start) &&
              ((guest_fn | ~mask) <= range->end) )
         {
-            *type = range->type;
-            rc = 1;
+            rc = range->type;
             break;
         }
         if ( ((guest_fn & mask) <= range->end) &&
              (range->start <= (guest_fn | ~mask)) )
         {
-            rc = -1;
+            rc = -EADDRNOTAVAIL;
             break;
         }
     }
@@ -762,7 +757,6 @@ int epte_get_entry_emt(struct domain *d,
                        unsigned int order, uint8_t *ipat, bool_t direct_mmio)
 {
     int gmtrr_mtype, hmtrr_mtype;
-    uint32_t type;
     struct vcpu *v = current;
 
     *ipat = 0;
@@ -798,14 +792,15 @@ int epte_get_entry_emt(struct domain *d,
         return MTRR_TYPE_WRBACK;
     }
 
-    switch ( hvm_get_mem_pinned_cacheattr(d, gfn, order, &type) )
+    gmtrr_mtype = hvm_get_mem_pinned_cacheattr(d, gfn, order);
+    if ( gmtrr_mtype >= 0 )
     {
-    case 1:
         *ipat = 1;
-        return type != PAT_TYPE_UC_MINUS ? type : PAT_TYPE_UNCACHABLE;
-    case -1:
-        return -1;
+        return gmtrr_mtype != PAT_TYPE_UC_MINUS ? gmtrr_mtype
+                                                : MTRR_TYPE_UNCACHABLE;
     }
+    if ( gmtrr_mtype == -EADDRNOTAVAIL )
+        return -1;
 
     gmtrr_mtype = is_hvm_domain(d) && v ?
                   get_mtrr_type(&v->arch.hvm_vcpu.mtrr,
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -607,7 +607,7 @@ _sh_propagate(struct vcpu *v,
     if ( (level == 1) && is_hvm_domain(d) &&
          !is_xen_heap_mfn(mfn_x(target_mfn)) )
     {
-        unsigned int type;
+        int type;
 
         ASSERT(!(sflags & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)));
 
@@ -619,7 +619,8 @@ _sh_propagate(struct vcpu *v,
          *    gMTRR and gPAT.
          */
         if ( !mmio_mfn &&
-             hvm_get_mem_pinned_cacheattr(d, gfn_x(target_gfn), 0, &type) )
+             (type = hvm_get_mem_pinned_cacheattr(d, gfn_x(target_gfn),
+                                                  0)) >= 0 )
             sflags |= pat_type_2_pte_flags(type);
         else if ( d->arch.hvm_domain.is_in_uc_mode )
             sflags |= pat_type_2_pte_flags(PAT_TYPE_UNCACHABLE);
--- a/xen/include/asm-x86/hvm/cacheattr.h
+++ b/xen/include/asm-x86/hvm/cacheattr.h
@@ -15,8 +15,7 @@ void hvm_destroy_cacheattr_region_list(
 int hvm_get_mem_pinned_cacheattr(
     struct domain *d,
     uint64_t guest_fn,
-    unsigned int order,
-    uint32_t *type);
+    unsigned int order);
 
 
 /* Set pinned caching type for a domain. */

Comments

Wei Liu March 3, 2016, 4:59 p.m. UTC | #1
On Thu, Mar 03, 2016 at 09:36:19AM -0700, Jan Beulich wrote:
[...]
>              sflags |= pat_type_2_pte_flags(type);
>          else if ( d->arch.hvm_domain.is_in_uc_mode )
>              sflags |= pat_type_2_pte_flags(PAT_TYPE_UNCACHABLE);
> --- a/xen/include/asm-x86/hvm/cacheattr.h
> +++ b/xen/include/asm-x86/hvm/cacheattr.h
> @@ -15,8 +15,7 @@ void hvm_destroy_cacheattr_region_list(
>  int hvm_get_mem_pinned_cacheattr(
>      struct domain *d,
>      uint64_t guest_fn,
> -    unsigned int order,
> -    uint32_t *type);
> +    unsigned int order);
>  

You seem to have forgotten to update the comment for this function as
you did in previous patch.

Wei.
Wei Liu March 3, 2016, 5:07 p.m. UTC | #2
On Thu, Mar 03, 2016 at 04:59:22PM +0000, Wei Liu wrote:
> On Thu, Mar 03, 2016 at 09:36:19AM -0700, Jan Beulich wrote:
> [...]
> >              sflags |= pat_type_2_pte_flags(type);
> >          else if ( d->arch.hvm_domain.is_in_uc_mode )
> >              sflags |= pat_type_2_pte_flags(PAT_TYPE_UNCACHABLE);
> > --- a/xen/include/asm-x86/hvm/cacheattr.h
> > +++ b/xen/include/asm-x86/hvm/cacheattr.h
> > @@ -15,8 +15,7 @@ void hvm_destroy_cacheattr_region_list(
> >  int hvm_get_mem_pinned_cacheattr(
> >      struct domain *d,
> >      uint64_t guest_fn,
> > -    unsigned int order,
> > -    uint32_t *type);
> > +    unsigned int order);
> >  
> 
> You seem to have forgotten to update the comment for this function as
> you did in previous patch.

Oh well, the updated comment went into the final patch of this series.

Wei.
Andrew Cooper March 3, 2016, 5:35 p.m. UTC | #3
On 03/03/16 16:36, Jan Beulich wrote:
> Its return value can easily serve the purpose. We cannot, however,
> return unspecific "success" anymore for a domain of the wrong type -
> since no caller exists that would call this for PV domains, simply add
> an ASSERT().
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
diff mbox

Patch

--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -546,17 +546,13 @@  void hvm_destroy_cacheattr_region_list(
 int hvm_get_mem_pinned_cacheattr(
     struct domain *d,
     uint64_t guest_fn,
-    unsigned int order,
-    uint32_t *type)
+    unsigned int order)
 {
     struct hvm_mem_pinned_cacheattr_range *range;
     uint64_t mask = ~(uint64_t)0 << order;
-    int rc = 0;
+    int rc = -ENXIO;
 
-    *type = ~0;
-
-    if ( !is_hvm_domain(d) )
-        return 0;
+    ASSERT(has_hvm_container_domain(d));
 
     rcu_read_lock(&pinned_cacheattr_rcu_lock);
     list_for_each_entry_rcu ( range,
@@ -566,14 +562,13 @@  int hvm_get_mem_pinned_cacheattr(
         if ( ((guest_fn & mask) >= range->start) &&
              ((guest_fn | ~mask) <= range->end) )
         {
-            *type = range->type;
-            rc = 1;
+            rc = range->type;
             break;
         }
         if ( ((guest_fn & mask) <= range->end) &&
              (range->start <= (guest_fn | ~mask)) )
         {
-            rc = -1;
+            rc = -EADDRNOTAVAIL;
             break;
         }
     }
@@ -762,7 +757,6 @@  int epte_get_entry_emt(struct domain *d,
                        unsigned int order, uint8_t *ipat, bool_t direct_mmio)
 {
     int gmtrr_mtype, hmtrr_mtype;
-    uint32_t type;
     struct vcpu *v = current;
 
     *ipat = 0;
@@ -798,14 +792,15 @@  int epte_get_entry_emt(struct domain *d,
         return MTRR_TYPE_WRBACK;
     }
 
-    switch ( hvm_get_mem_pinned_cacheattr(d, gfn, order, &type) )
+    gmtrr_mtype = hvm_get_mem_pinned_cacheattr(d, gfn, order);
+    if ( gmtrr_mtype >= 0 )
     {
-    case 1:
         *ipat = 1;
-        return type != PAT_TYPE_UC_MINUS ? type : PAT_TYPE_UNCACHABLE;
-    case -1:
-        return -1;
+        return gmtrr_mtype != PAT_TYPE_UC_MINUS ? gmtrr_mtype
+                                                : MTRR_TYPE_UNCACHABLE;
     }
+    if ( gmtrr_mtype == -EADDRNOTAVAIL )
+        return -1;
 
     gmtrr_mtype = is_hvm_domain(d) && v ?
                   get_mtrr_type(&v->arch.hvm_vcpu.mtrr,
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -607,7 +607,7 @@  _sh_propagate(struct vcpu *v,
     if ( (level == 1) && is_hvm_domain(d) &&
          !is_xen_heap_mfn(mfn_x(target_mfn)) )
     {
-        unsigned int type;
+        int type;
 
         ASSERT(!(sflags & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)));
 
@@ -619,7 +619,8 @@  _sh_propagate(struct vcpu *v,
          *    gMTRR and gPAT.
          */
         if ( !mmio_mfn &&
-             hvm_get_mem_pinned_cacheattr(d, gfn_x(target_gfn), 0, &type) )
+             (type = hvm_get_mem_pinned_cacheattr(d, gfn_x(target_gfn),
+                                                  0)) >= 0 )
             sflags |= pat_type_2_pte_flags(type);
         else if ( d->arch.hvm_domain.is_in_uc_mode )
             sflags |= pat_type_2_pte_flags(PAT_TYPE_UNCACHABLE);
--- a/xen/include/asm-x86/hvm/cacheattr.h
+++ b/xen/include/asm-x86/hvm/cacheattr.h
@@ -15,8 +15,7 @@  void hvm_destroy_cacheattr_region_list(
 int hvm_get_mem_pinned_cacheattr(
     struct domain *d,
     uint64_t guest_fn,
-    unsigned int order,
-    uint32_t *type);
+    unsigned int order);
 
 
 /* Set pinned caching type for a domain. */