@@ -47,7 +47,7 @@
/* Per-CPU variable for enforcing the lock ordering */
DEFINE_PER_CPU(int, mm_lock_level);
-#ifndef CONFIG_PV_SHIM_EXCLUSIVE
+#if PG_log_dirty
/************************************************/
/* LOG DIRTY SUPPORT */
@@ -630,7 +630,7 @@ void paging_log_dirty_init(struct domain
d->arch.paging.log_dirty.ops = ops;
}
-#endif /* CONFIG_PV_SHIM_EXCLUSIVE */
+#endif /* PG_log_dirty */
/************************************************/
/* CODE FOR PAGING SUPPORT */
@@ -671,7 +671,7 @@ void paging_vcpu_init(struct vcpu *v)
shadow_vcpu_init(v);
}
-#ifndef CONFIG_PV_SHIM_EXCLUSIVE
+#if PG_log_dirty
int paging_domctl(struct domain *d, struct xen_domctl_shadow_op *sc,
XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl,
bool_t resuming)
@@ -792,7 +792,7 @@ long paging_domctl_continuation(XEN_GUES
return ret;
}
-#endif /* CONFIG_PV_SHIM_EXCLUSIVE */
+#endif /* PG_log_dirty */
/* Call when destroying a domain */
int paging_teardown(struct domain *d)
@@ -808,7 +808,7 @@ int paging_teardown(struct domain *d)
if ( preempted )
return -ERESTART;
-#ifndef CONFIG_PV_SHIM_EXCLUSIVE
+#if PG_log_dirty
/* clean up log dirty resources. */
rc = paging_free_log_dirty_bitmap(d, 0);
if ( rc == -ERESTART )
@@ -2869,12 +2869,14 @@ void shadow_teardown(struct domain *d, b
* calls now that we've torn down the bitmap */
d->arch.paging.mode &= ~PG_log_dirty;
- if ( d->arch.hvm.dirty_vram )
+#ifdef CONFIG_HVM
+ if ( is_hvm_domain(d) && d->arch.hvm.dirty_vram )
{
xfree(d->arch.hvm.dirty_vram->sl1ma);
xfree(d->arch.hvm.dirty_vram->dirty_bitmap);
XFREE(d->arch.hvm.dirty_vram);
}
+#endif
out:
paging_unlock(d);
@@ -618,6 +618,7 @@ _sh_propagate(struct vcpu *v,
}
}
+#ifdef CONFIG_HVM
if ( unlikely(level == 1) && is_hvm_domain(d) )
{
struct sh_dirty_vram *dirty_vram = d->arch.hvm.dirty_vram;
@@ -632,6 +633,7 @@ _sh_propagate(struct vcpu *v,
sflags &= ~_PAGE_RW;
}
}
+#endif
/* Read-only memory */
if ( p2m_is_readonly(p2mt) )
@@ -1050,6 +1052,7 @@ static inline void shadow_vram_get_l1e(s
mfn_t sl1mfn,
struct domain *d)
{
+#ifdef CONFIG_HVM
mfn_t mfn = shadow_l1e_get_mfn(new_sl1e);
int flags = shadow_l1e_get_flags(new_sl1e);
unsigned long gfn;
@@ -1074,6 +1077,7 @@ static inline void shadow_vram_get_l1e(s
dirty_vram->sl1ma[i] = mfn_to_maddr(sl1mfn)
| ((unsigned long)sl1e & ~PAGE_MASK);
}
+#endif
}
static inline void shadow_vram_put_l1e(shadow_l1e_t old_sl1e,
@@ -1081,6 +1085,7 @@ static inline void shadow_vram_put_l1e(s
mfn_t sl1mfn,
struct domain *d)
{
+#ifdef CONFIG_HVM
mfn_t mfn = shadow_l1e_get_mfn(old_sl1e);
int flags = shadow_l1e_get_flags(old_sl1e);
unsigned long gfn;
@@ -1140,6 +1145,7 @@ static inline void shadow_vram_put_l1e(s
dirty_vram->last_dirty = NOW();
}
}
+#endif
}
static int shadow_set_l1e(struct domain *d,
@@ -67,8 +67,12 @@
#define PG_translate 0
#define PG_external 0
#endif
+#if defined(CONFIG_HVM) || !defined(CONFIG_PV_SHIM_EXCLUSIVE)
/* Enable log dirty mode */
#define PG_log_dirty (XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY << PG_mode_shift)
+#else
+#define PG_log_dirty 0
+#endif
/* All paging modes. */
#define PG_MASK (PG_refcounts | PG_log_dirty | PG_translate | PG_external)
@@ -154,7 +158,7 @@ struct paging_mode {
/*****************************************************************************
* Log dirty code */
-#ifndef CONFIG_PV_SHIM_EXCLUSIVE
+#if PG_log_dirty
/* get the dirty bitmap for a specific range of pfns */
void paging_log_dirty_range(struct domain *d,
@@ -195,23 +199,28 @@ int paging_mfn_is_dirty(struct domain *d
#define L4_LOGDIRTY_IDX(pfn) ((pfn_x(pfn) >> (PAGE_SHIFT + 3 + PAGETABLE_ORDER * 2)) & \
(LOGDIRTY_NODE_ENTRIES-1))
+#ifdef CONFIG_HVM
/* VRAM dirty tracking support */
struct sh_dirty_vram {
unsigned long begin_pfn;
unsigned long end_pfn;
+#ifdef CONFIG_SHADOW_PAGING
paddr_t *sl1ma;
uint8_t *dirty_bitmap;
s_time_t last_dirty;
+#endif
};
+#endif
-#else /* !CONFIG_PV_SHIM_EXCLUSIVE */
+#else /* !PG_log_dirty */
static inline void paging_log_dirty_init(struct domain *d,
const struct log_dirty_ops *ops) {}
static inline void paging_mark_dirty(struct domain *d, mfn_t gmfn) {}
static inline void paging_mark_pfn_dirty(struct domain *d, pfn_t pfn) {}
+static inline bool paging_mfn_is_dirty(struct domain *d, mfn_t gmfn) { return false; }
-#endif /* CONFIG_PV_SHIM_EXCLUSIVE */
+#endif /* PG_log_dirty */
/*****************************************************************************
* Entry points into the paging-assistance code */