diff mbox series

[v8,6/8] KVM: Update lpage info when private/shared memory are mixed

Message ID 20220915142913.2213336-7-chao.p.peng@linux.intel.com (mailing list archive)
State New
Headers show
Series KVM: mm: fd-based approach for supporting KVM | expand

Commit Message

Chao Peng Sept. 15, 2022, 2:29 p.m. UTC
When private/shared memory are mixed in a large page, the lpage_info may
not be accurate and should be updated with this mixed info. A large page
has mixed pages can't be really mapped as large page since its
private/shared pages are from different physical memory.

This patch updates lpage_info when private/shared memory attribute is
changed.  If both private and shared pages are within a large page
region, it can't be mapped as large page. It's a bit challenge to track
the mixed info in a 'count' like variable, this patch instead reserves a
bit in disallow_lpage to indicate a large page include mixed
private/share pages.

Signed-off-by: Chao Peng <chao.p.peng@linux.intel.com>
---
 arch/x86/include/asm/kvm_host.h |   8 +++
 arch/x86/kvm/mmu/mmu.c          | 119 +++++++++++++++++++++++++++++++-
 arch/x86/kvm/x86.c              |   2 +
 include/linux/kvm_host.h        |  17 +++++
 virt/kvm/kvm_main.c             |  11 ++-
 5 files changed, 154 insertions(+), 3 deletions(-)

Comments

Isaku Yamahata Sept. 29, 2022, 4:52 p.m. UTC | #1
On Thu, Sep 15, 2022 at 10:29:11PM +0800,
Chao Peng <chao.p.peng@linux.intel.com> wrote:

> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 08abad4f3e6f..a0f198cede3d 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
...
> @@ -6894,3 +6899,115 @@ void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
>  	if (kvm->arch.nx_lpage_recovery_thread)
>  		kthread_stop(kvm->arch.nx_lpage_recovery_thread);
>  }
> +
> +static bool mem_attr_is_mixed(struct kvm *kvm, unsigned int attr,
> +			      gfn_t start, gfn_t end)
> +{
> +	XA_STATE(xas, &kvm->mem_attr_array, start);
> +	gfn_t gfn = start;
> +	void *entry;
> +	bool shared, private;
> +	bool mixed = false;
> +
> +	if (attr == KVM_MEM_ATTR_SHARED) {
> +		shared = true;
> +		private = false;
> +	} else {
> +		shared = false;
> +		private = true;
> +	}

We don't have to care the target is shared or private.  We need to check
only same or not.

> +
> +	rcu_read_lock();
> +	entry = xas_load(&xas);
> +	while (gfn < end) {
> +		if (xas_retry(&xas, entry))
> +			continue;
> +
> +		KVM_BUG_ON(gfn != xas.xa_index, kvm);
> +
> +		if (entry)
> +			private = true;
> +		else
> +			shared = true;
> +
> +		if (private && shared) {
> +			mixed = true;
> +			goto out;
> +		}
> +
> +		entry = xas_next(&xas);
> +		gfn++;
> +	}
> +out:
> +	rcu_read_unlock();
> +	return mixed;
> +}
> +
> +static inline void update_mixed(struct kvm_lpage_info *linfo, bool mixed)
> +{
> +	if (mixed)
> +		linfo->disallow_lpage |= KVM_LPAGE_PRIVATE_SHARED_MIXED;
> +	else
> +		linfo->disallow_lpage &= ~KVM_LPAGE_PRIVATE_SHARED_MIXED;
> +}
> +
> +static void update_mem_lpage_info(struct kvm *kvm,
> +				  struct kvm_memory_slot *slot,
> +				  unsigned int attr,
> +				  gfn_t start, gfn_t end)
> +{
> +	unsigned long lpage_start, lpage_end;
> +	unsigned long gfn, pages, mask;
> +	int level;
> +
> +	for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
> +		pages = KVM_PAGES_PER_HPAGE(level);
> +		mask = ~(pages - 1);
> +		lpage_start = start & mask;
> +		lpage_end = (end - 1) & mask;
> +
> +		/*
> +		 * We only need to scan the head and tail page, for middle pages
> +		 * we know they are not mixed.
> +		 */
> +		update_mixed(lpage_info_slot(lpage_start, slot, level),
> +			     mem_attr_is_mixed(kvm, attr, lpage_start,
> +							  lpage_start + pages));
> +
> +		if (lpage_start == lpage_end)
> +			return;
> +
> +		for (gfn = lpage_start + pages; gfn < lpage_end; gfn += pages)
> +			update_mixed(lpage_info_slot(gfn, slot, level), false);


For >2M case, we don't have to check all entry. just check lower level case.

> +
> +		update_mixed(lpage_info_slot(lpage_end, slot, level),
> +			     mem_attr_is_mixed(kvm, attr, lpage_end,
> +							  lpage_end + pages));
> +	}
> +}
> +
> +void kvm_arch_update_mem_attr(struct kvm *kvm, unsigned int attr,
> +			      gfn_t start, gfn_t end)
> +{
> +	struct kvm_memory_slot *slot;
> +	struct kvm_memslots *slots;
> +	struct kvm_memslot_iter iter;
> +	int i;
> +
> +	WARN_ONCE(!(attr & (KVM_MEM_ATTR_PRIVATE | KVM_MEM_ATTR_SHARED)),
> +			"Unsupported mem attribute.\n");
> +
> +	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
> +		slots = __kvm_memslots(kvm, i);
> +
> +		kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) {
> +			slot = iter.slot;
> +			start = max(start, slot->base_gfn);
> +			end = min(end, slot->base_gfn + slot->npages);
> +			if (WARN_ON_ONCE(start >= end))
> +				continue;
> +
> +			update_mem_lpage_info(kvm, slot, attr, start, end);
> +		}
> +	}
> +}


Here is my updated version.

bool kvm_mem_attr_is_mixed(struct kvm_memory_slot *slot, gfn_t gfn, int level)
{
	gfn_t pages = KVM_PAGES_PER_HPAGE(level);
	gfn_t mask = ~(pages - 1);
	struct kvm_lpage_info *linfo = lpage_info_slot(gfn & mask, slot, level);

	WARN_ON_ONCE(level == PG_LEVEL_4K);
	return linfo->disallow_lpage & KVM_LPAGE_PRIVATE_SHARED_MIXED;
}

#ifdef CONFIG_HAVE_KVM_PRIVATE_MEM_ATTR
static void update_mixed(struct kvm_lpage_info *linfo, bool mixed)
{
	if (mixed)
		linfo->disallow_lpage |= KVM_LPAGE_PRIVATE_SHARED_MIXED;
	else
		linfo->disallow_lpage &= ~KVM_LPAGE_PRIVATE_SHARED_MIXED;
}

static bool __mem_attr_is_mixed(struct kvm *kvm, gfn_t start, gfn_t end)
{
	XA_STATE(xas, &kvm->mem_attr_array, start);
	bool mixed = false;
	gfn_t gfn = start;
	void *s_entry;
	void *entry;

	rcu_read_lock();
	s_entry = xas_load(&xas);
	entry = s_entry;
	while (gfn < end) {
		if (xas_retry(&xas, entry))
			continue;

		KVM_BUG_ON(gfn != xas.xa_index, kvm);

		entry = xas_next(&xas);
		if (entry != s_entry) {
			mixed = true;
			break;
		}
		gfn++;
	}
	rcu_read_unlock();
	return mixed;
}

static bool mem_attr_is_mixed(struct kvm *kvm,
			      struct kvm_memory_slot *slot, int level,
			      gfn_t start, gfn_t end)
{
	struct kvm_lpage_info *child_linfo;
	unsigned long child_pages;
	bool mixed = false;
	unsigned long gfn;
	void *entry;

	if (WARN_ON_ONCE(level == PG_LEVEL_4K))
		return false;

	if (level == PG_LEVEL_2M)
		return __mem_attr_is_mixed(kvm, start, end);

	/* This assumes that level - 1 is already updated. */
	rcu_read_lock();
	child_pages = KVM_PAGES_PER_HPAGE(level - 1);
	entry = xa_load(&kvm->mem_attr_array, start);
	for (gfn = start; gfn < end; gfn += child_pages) {
		child_linfo = lpage_info_slot(gfn, slot, level - 1);
		if (child_linfo->disallow_lpage & KVM_LPAGE_PRIVATE_SHARED_MIXED) {
			mixed = true;
			break;
		}
		if (xa_load(&kvm->mem_attr_array, gfn) != entry) {
			mixed = true;
			break;
		}
	}
	rcu_read_unlock();
	return mixed;
}

static void update_mem_lpage_info(struct kvm *kvm,
				  struct kvm_memory_slot *slot,
				  unsigned int attr,
				  gfn_t start, gfn_t end)
{
	unsigned long lpage_start, lpage_end;
	unsigned long gfn, pages, mask;
	int level;

	for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
		pages = KVM_PAGES_PER_HPAGE(level);
		mask = ~(pages - 1);
		lpage_start = start & mask;
		lpage_end = (end - 1) & mask;

		/*
		 * We only need to scan the head and tail page, for middle pages
		 * we know they are not mixed.
		 */
		update_mixed(lpage_info_slot(lpage_start, slot, level),
			     mem_attr_is_mixed(kvm, slot, level,
					       lpage_start, lpage_start + pages));

		if (lpage_start == lpage_end)
			return;

		for (gfn = lpage_start + pages; gfn < lpage_end; gfn += pages)
			update_mixed(lpage_info_slot(gfn, slot, level), false);

		update_mixed(lpage_info_slot(lpage_end, slot, level),
			     mem_attr_is_mixed(kvm, slot, level,
					       lpage_end, lpage_end + pages));
	}
}

void kvm_arch_update_mem_attr(struct kvm *kvm, unsigned int attr,
			      gfn_t start, gfn_t end)
{
	struct kvm_memory_slot *slot;
	struct kvm_memslots *slots;
	struct kvm_memslot_iter iter;
	int idx;
	int i;

	WARN_ONCE(!(attr & (KVM_MEM_ATTR_PRIVATE | KVM_MEM_ATTR_SHARED)),
		  "Unsupported mem attribute.\n");

	idx = srcu_read_lock(&kvm->srcu);
	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
		slots = __kvm_memslots(kvm, i);

		kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) {
			slot = iter.slot;
			start = max(start, slot->base_gfn);
			end = min(end, slot->base_gfn + slot->npages);
			if (WARN_ON_ONCE(start >= end))
				continue;

			update_mem_lpage_info(kvm, slot, attr, start, end);
		}
	}
	srcu_read_unlock(&kvm->srcu, idx);
}
#endif
Chao Peng Sept. 30, 2022, 8:59 a.m. UTC | #2
On Thu, Sep 29, 2022 at 09:52:06AM -0700, Isaku Yamahata wrote:
> On Thu, Sep 15, 2022 at 10:29:11PM +0800,
> Chao Peng <chao.p.peng@linux.intel.com> wrote:
> 
> > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> > index 08abad4f3e6f..a0f198cede3d 100644
> > --- a/arch/x86/kvm/mmu/mmu.c
> > +++ b/arch/x86/kvm/mmu/mmu.c
> ...
> > @@ -6894,3 +6899,115 @@ void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
> >  	if (kvm->arch.nx_lpage_recovery_thread)
> >  		kthread_stop(kvm->arch.nx_lpage_recovery_thread);
> >  }
> > +
> > +static bool mem_attr_is_mixed(struct kvm *kvm, unsigned int attr,
> > +			      gfn_t start, gfn_t end)
> > +{
> > +	XA_STATE(xas, &kvm->mem_attr_array, start);
> > +	gfn_t gfn = start;
> > +	void *entry;
> > +	bool shared, private;
> > +	bool mixed = false;
> > +
> > +	if (attr == KVM_MEM_ATTR_SHARED) {
> > +		shared = true;
> > +		private = false;
> > +	} else {
> > +		shared = false;
> > +		private = true;
> > +	}
> 
> We don't have to care the target is shared or private.  We need to check
> only same or not.

There is optimization chance if we know what we are going to set. we can
return 'mixed = true' earlier when we find the first reverse attr, e.g.
it's unnecessarily to check all the child page attr in one largepage to
give a conclusion.

After a further look, the code can be refined as below:

--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -7255,17 +7255,9 @@ static bool mem_attr_is_mixed(struct kvm *kvm, unsigned int attr,
 	XA_STATE(xas, &kvm->mem_attr_array, start);
 	gfn_t gfn = start;
 	void *entry;
-	bool shared, private;
+	bool shared = attr == KVM_MEM_ATTR_SHARED;
 	bool mixed = false;
 
-	if (attr == KVM_MEM_ATTR_SHARED) {
-		shared = true;
-		private = false;
-	} else {
-		shared = false;
-		private = true;
-	}
-
 	rcu_read_lock();
 	entry = xas_load(&xas);
 	while (gfn < end) {
@@ -7274,12 +7266,7 @@ static bool mem_attr_is_mixed(struct kvm *kvm, unsigned int attr,
 
 		KVM_BUG_ON(gfn != xas.xa_index, kvm);
 
-		if (entry)
-			private = true;
-		else
-			shared = true;
-
-		if (private && shared) {
+		if ((entry && !shared) || (!entry && shared)) {
 			mixed = true;
 			goto out;
 		}
@@ -7320,8 +7307,7 @@ static void update_mem_lpage_info(struct kvm *kvm,
 		 * we know they are not mixed.
 		 */
 		update_mixed(lpage_info_slot(lpage_start, slot, level),
-			     mem_attr_is_mixed(kvm, attr, lpage_start,
-							  lpage_start + pages));
+			     mem_attr_is_mixed(kvm, attr, lpage_start, start));
 
 		if (lpage_start == lpage_end)
 			return;
@@ -7330,7 +7316,7 @@ static void update_mem_lpage_info(struct kvm *kvm,
 			update_mixed(lpage_info_slot(gfn, slot, level), false);
 
 		update_mixed(lpage_info_slot(lpage_end, slot, level),
-			     mem_attr_is_mixed(kvm, attr, lpage_end,
+			     mem_attr_is_mixed(kvm, attr, end,
 							  lpage_end + pages));
 	}
 }
> 
> > +
> > +	rcu_read_lock();
> > +	entry = xas_load(&xas);
> > +	while (gfn < end) {
> > +		if (xas_retry(&xas, entry))
> > +			continue;
> > +
> > +		KVM_BUG_ON(gfn != xas.xa_index, kvm);
> > +
> > +		if (entry)
> > +			private = true;
> > +		else
> > +			shared = true;
> > +
> > +		if (private && shared) {
> > +			mixed = true;
> > +			goto out;
> > +		}
> > +
> > +		entry = xas_next(&xas);
> > +		gfn++;
> > +	}
> > +out:
> > +	rcu_read_unlock();
> > +	return mixed;
> > +}
> > +
> > +static inline void update_mixed(struct kvm_lpage_info *linfo, bool mixed)
> > +{
> > +	if (mixed)
> > +		linfo->disallow_lpage |= KVM_LPAGE_PRIVATE_SHARED_MIXED;
> > +	else
> > +		linfo->disallow_lpage &= ~KVM_LPAGE_PRIVATE_SHARED_MIXED;
> > +}
> > +
> > +static void update_mem_lpage_info(struct kvm *kvm,
> > +				  struct kvm_memory_slot *slot,
> > +				  unsigned int attr,
> > +				  gfn_t start, gfn_t end)
> > +{
> > +	unsigned long lpage_start, lpage_end;
> > +	unsigned long gfn, pages, mask;
> > +	int level;
> > +
> > +	for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
> > +		pages = KVM_PAGES_PER_HPAGE(level);
> > +		mask = ~(pages - 1);
> > +		lpage_start = start & mask;
> > +		lpage_end = (end - 1) & mask;
> > +
> > +		/*
> > +		 * We only need to scan the head and tail page, for middle pages
> > +		 * we know they are not mixed.
> > +		 */
> > +		update_mixed(lpage_info_slot(lpage_start, slot, level),
> > +			     mem_attr_is_mixed(kvm, attr, lpage_start,
> > +							  lpage_start + pages));
> > +
> > +		if (lpage_start == lpage_end)
> > +			return;
> > +
> > +		for (gfn = lpage_start + pages; gfn < lpage_end; gfn += pages)
> > +			update_mixed(lpage_info_slot(gfn, slot, level), false);
> 
> 
> For >2M case, we don't have to check all entry. just check lower level case.

Sounds good, we can reduce some scanning.

Thanks,
Chao
> 
> > +
> > +		update_mixed(lpage_info_slot(lpage_end, slot, level),
> > +			     mem_attr_is_mixed(kvm, attr, lpage_end,
> > +							  lpage_end + pages));
> > +	}
> > +}
> > +
> > +void kvm_arch_update_mem_attr(struct kvm *kvm, unsigned int attr,
> > +			      gfn_t start, gfn_t end)
> > +{
> > +	struct kvm_memory_slot *slot;
> > +	struct kvm_memslots *slots;
> > +	struct kvm_memslot_iter iter;
> > +	int i;
> > +
> > +	WARN_ONCE(!(attr & (KVM_MEM_ATTR_PRIVATE | KVM_MEM_ATTR_SHARED)),
> > +			"Unsupported mem attribute.\n");
> > +
> > +	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
> > +		slots = __kvm_memslots(kvm, i);
> > +
> > +		kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) {
> > +			slot = iter.slot;
> > +			start = max(start, slot->base_gfn);
> > +			end = min(end, slot->base_gfn + slot->npages);
> > +			if (WARN_ON_ONCE(start >= end))
> > +				continue;
> > +
> > +			update_mem_lpage_info(kvm, slot, attr, start, end);
> > +		}
> > +	}
> > +}
> 
> 
> Here is my updated version.
> 
> bool kvm_mem_attr_is_mixed(struct kvm_memory_slot *slot, gfn_t gfn, int level)
> {
> 	gfn_t pages = KVM_PAGES_PER_HPAGE(level);
> 	gfn_t mask = ~(pages - 1);
> 	struct kvm_lpage_info *linfo = lpage_info_slot(gfn & mask, slot, level);
> 
> 	WARN_ON_ONCE(level == PG_LEVEL_4K);
> 	return linfo->disallow_lpage & KVM_LPAGE_PRIVATE_SHARED_MIXED;
> }
> 
> #ifdef CONFIG_HAVE_KVM_PRIVATE_MEM_ATTR
> static void update_mixed(struct kvm_lpage_info *linfo, bool mixed)
> {
> 	if (mixed)
> 		linfo->disallow_lpage |= KVM_LPAGE_PRIVATE_SHARED_MIXED;
> 	else
> 		linfo->disallow_lpage &= ~KVM_LPAGE_PRIVATE_SHARED_MIXED;
> }
> 
> static bool __mem_attr_is_mixed(struct kvm *kvm, gfn_t start, gfn_t end)
> {
> 	XA_STATE(xas, &kvm->mem_attr_array, start);
> 	bool mixed = false;
> 	gfn_t gfn = start;
> 	void *s_entry;
> 	void *entry;
> 
> 	rcu_read_lock();
> 	s_entry = xas_load(&xas);
> 	entry = s_entry;
> 	while (gfn < end) {
> 		if (xas_retry(&xas, entry))
> 			continue;
> 
> 		KVM_BUG_ON(gfn != xas.xa_index, kvm);
> 
> 		entry = xas_next(&xas);
> 		if (entry != s_entry) {
> 			mixed = true;
> 			break;
> 		}
> 		gfn++;
> 	}
> 	rcu_read_unlock();
> 	return mixed;
> }
> 
> static bool mem_attr_is_mixed(struct kvm *kvm,
> 			      struct kvm_memory_slot *slot, int level,
> 			      gfn_t start, gfn_t end)
> {
> 	struct kvm_lpage_info *child_linfo;
> 	unsigned long child_pages;
> 	bool mixed = false;
> 	unsigned long gfn;
> 	void *entry;
> 
> 	if (WARN_ON_ONCE(level == PG_LEVEL_4K))
> 		return false;
> 
> 	if (level == PG_LEVEL_2M)
> 		return __mem_attr_is_mixed(kvm, start, end);
> 
> 	/* This assumes that level - 1 is already updated. */
> 	rcu_read_lock();
> 	child_pages = KVM_PAGES_PER_HPAGE(level - 1);
> 	entry = xa_load(&kvm->mem_attr_array, start);
> 	for (gfn = start; gfn < end; gfn += child_pages) {
> 		child_linfo = lpage_info_slot(gfn, slot, level - 1);
> 		if (child_linfo->disallow_lpage & KVM_LPAGE_PRIVATE_SHARED_MIXED) {
> 			mixed = true;
> 			break;
> 		}
> 		if (xa_load(&kvm->mem_attr_array, gfn) != entry) {
> 			mixed = true;
> 			break;
> 		}
> 	}
> 	rcu_read_unlock();
> 	return mixed;
> }
> 
> static void update_mem_lpage_info(struct kvm *kvm,
> 				  struct kvm_memory_slot *slot,
> 				  unsigned int attr,
> 				  gfn_t start, gfn_t end)
> {
> 	unsigned long lpage_start, lpage_end;
> 	unsigned long gfn, pages, mask;
> 	int level;
> 
> 	for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
> 		pages = KVM_PAGES_PER_HPAGE(level);
> 		mask = ~(pages - 1);
> 		lpage_start = start & mask;
> 		lpage_end = (end - 1) & mask;
> 
> 		/*
> 		 * We only need to scan the head and tail page, for middle pages
> 		 * we know they are not mixed.
> 		 */
> 		update_mixed(lpage_info_slot(lpage_start, slot, level),
> 			     mem_attr_is_mixed(kvm, slot, level,
> 					       lpage_start, lpage_start + pages));
> 
> 		if (lpage_start == lpage_end)
> 			return;
> 
> 		for (gfn = lpage_start + pages; gfn < lpage_end; gfn += pages)
> 			update_mixed(lpage_info_slot(gfn, slot, level), false);
> 
> 		update_mixed(lpage_info_slot(lpage_end, slot, level),
> 			     mem_attr_is_mixed(kvm, slot, level,
> 					       lpage_end, lpage_end + pages));
> 	}
> }
> 
> void kvm_arch_update_mem_attr(struct kvm *kvm, unsigned int attr,
> 			      gfn_t start, gfn_t end)
> {
> 	struct kvm_memory_slot *slot;
> 	struct kvm_memslots *slots;
> 	struct kvm_memslot_iter iter;
> 	int idx;
> 	int i;
> 
> 	WARN_ONCE(!(attr & (KVM_MEM_ATTR_PRIVATE | KVM_MEM_ATTR_SHARED)),
> 		  "Unsupported mem attribute.\n");
> 
> 	idx = srcu_read_lock(&kvm->srcu);
> 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
> 		slots = __kvm_memslots(kvm, i);
> 
> 		kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) {
> 			slot = iter.slot;
> 			start = max(start, slot->base_gfn);
> 			end = min(end, slot->base_gfn + slot->npages);
> 			if (WARN_ON_ONCE(start >= end))
> 				continue;
> 
> 			update_mem_lpage_info(kvm, slot, attr, start, end);
> 		}
> 	}
> 	srcu_read_unlock(&kvm->srcu, idx);
> }
> #endif
> 
> 
> -- 
> Isaku Yamahata <isaku.yamahata@gmail.com>
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index cfad6ba1a70a..85119ed9527a 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -38,6 +38,7 @@ 
 
 #define __KVM_HAVE_ARCH_VCPU_DEBUGFS
 #define __KVM_HAVE_ZAP_GFN_RANGE
+#define __KVM_HAVE_ARCH_UPDATE_MEM_ATTR
 
 #define KVM_MAX_VCPUS 1024
 
@@ -945,6 +946,13 @@  struct kvm_vcpu_arch {
 #endif
 };
 
+/*
+ * Use a bit in disallow_lpage to indicate private/shared pages mixed at the
+ * level. The remaining bits will be used as a reference count for other users.
+ */
+#define KVM_LPAGE_PRIVATE_SHARED_MIXED		(1U << 31)
+#define KVM_LPAGE_COUNT_MAX			((1U << 31) - 1)
+
 struct kvm_lpage_info {
 	int disallow_lpage;
 };
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 08abad4f3e6f..a0f198cede3d 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -762,11 +762,16 @@  static void update_gfn_disallow_lpage_count(const struct kvm_memory_slot *slot,
 {
 	struct kvm_lpage_info *linfo;
 	int i;
+	int disallow_count;
 
 	for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
 		linfo = lpage_info_slot(gfn, slot, i);
+
+		disallow_count = linfo->disallow_lpage & KVM_LPAGE_COUNT_MAX;
+		WARN_ON(disallow_count + count < 0 ||
+			disallow_count > KVM_LPAGE_COUNT_MAX - count);
+
 		linfo->disallow_lpage += count;
-		WARN_ON(linfo->disallow_lpage < 0);
 	}
 }
 
@@ -6894,3 +6899,115 @@  void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
 	if (kvm->arch.nx_lpage_recovery_thread)
 		kthread_stop(kvm->arch.nx_lpage_recovery_thread);
 }
+
+static bool mem_attr_is_mixed(struct kvm *kvm, unsigned int attr,
+			      gfn_t start, gfn_t end)
+{
+	XA_STATE(xas, &kvm->mem_attr_array, start);
+	gfn_t gfn = start;
+	void *entry;
+	bool shared, private;
+	bool mixed = false;
+
+	if (attr == KVM_MEM_ATTR_SHARED) {
+		shared = true;
+		private = false;
+	} else {
+		shared = false;
+		private = true;
+	}
+
+	rcu_read_lock();
+	entry = xas_load(&xas);
+	while (gfn < end) {
+		if (xas_retry(&xas, entry))
+			continue;
+
+		KVM_BUG_ON(gfn != xas.xa_index, kvm);
+
+		if (entry)
+			private = true;
+		else
+			shared = true;
+
+		if (private && shared) {
+			mixed = true;
+			goto out;
+		}
+
+		entry = xas_next(&xas);
+		gfn++;
+	}
+out:
+	rcu_read_unlock();
+	return mixed;
+}
+
+static inline void update_mixed(struct kvm_lpage_info *linfo, bool mixed)
+{
+	if (mixed)
+		linfo->disallow_lpage |= KVM_LPAGE_PRIVATE_SHARED_MIXED;
+	else
+		linfo->disallow_lpage &= ~KVM_LPAGE_PRIVATE_SHARED_MIXED;
+}
+
+static void update_mem_lpage_info(struct kvm *kvm,
+				  struct kvm_memory_slot *slot,
+				  unsigned int attr,
+				  gfn_t start, gfn_t end)
+{
+	unsigned long lpage_start, lpage_end;
+	unsigned long gfn, pages, mask;
+	int level;
+
+	for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
+		pages = KVM_PAGES_PER_HPAGE(level);
+		mask = ~(pages - 1);
+		lpage_start = start & mask;
+		lpage_end = (end - 1) & mask;
+
+		/*
+		 * We only need to scan the head and tail page, for middle pages
+		 * we know they are not mixed.
+		 */
+		update_mixed(lpage_info_slot(lpage_start, slot, level),
+			     mem_attr_is_mixed(kvm, attr, lpage_start,
+							  lpage_start + pages));
+
+		if (lpage_start == lpage_end)
+			return;
+
+		for (gfn = lpage_start + pages; gfn < lpage_end; gfn += pages)
+			update_mixed(lpage_info_slot(gfn, slot, level), false);
+
+		update_mixed(lpage_info_slot(lpage_end, slot, level),
+			     mem_attr_is_mixed(kvm, attr, lpage_end,
+							  lpage_end + pages));
+	}
+}
+
+void kvm_arch_update_mem_attr(struct kvm *kvm, unsigned int attr,
+			      gfn_t start, gfn_t end)
+{
+	struct kvm_memory_slot *slot;
+	struct kvm_memslots *slots;
+	struct kvm_memslot_iter iter;
+	int i;
+
+	WARN_ONCE(!(attr & (KVM_MEM_ATTR_PRIVATE | KVM_MEM_ATTR_SHARED)),
+			"Unsupported mem attribute.\n");
+
+	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+		slots = __kvm_memslots(kvm, i);
+
+		kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) {
+			slot = iter.slot;
+			start = max(start, slot->base_gfn);
+			end = min(end, slot->base_gfn + slot->npages);
+			if (WARN_ON_ONCE(start >= end))
+				continue;
+
+			update_mem_lpage_info(kvm, slot, attr, start, end);
+		}
+	}
+}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 081f62ccc9a1..ef11cda6f13f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -12321,6 +12321,8 @@  static int kvm_alloc_memslot_metadata(struct kvm *kvm,
 		if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
 			linfo[lpages - 1].disallow_lpage = 1;
 		ugfn = slot->userspace_addr >> PAGE_SHIFT;
+		if (kvm_slot_can_be_private(slot))
+			ugfn |= slot->private_offset >> PAGE_SHIFT;
 		/*
 		 * If the gfn and userspace address are not aligned wrt each
 		 * other, disable large page support for this slot.
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index d65690cae80b..fd36ce6597ad 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -2277,4 +2277,21 @@  static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
 /* Max number of entries allowed for each kvm dirty ring */
 #define  KVM_DIRTY_RING_MAX_ENTRIES  65536
 
+#ifdef CONFIG_HAVE_KVM_PRIVATE_MEM
+
+#define KVM_MEM_ATTR_SHARED	0x0001
+#define KVM_MEM_ATTR_PRIVATE	0x0002
+
+#ifdef __KVM_HAVE_ARCH_UPDATE_MEM_ATTR
+void kvm_arch_update_mem_attr(struct kvm *kvm, unsigned int attr,
+			      gfn_t start, gfn_t end);
+#else
+static inline void kvm_arch_update_mem_attr(struct kvm *kvm, unsigned int attr,
+					    gfn_t start, gfn_t end)
+{
+}
+#endif
+
+#endif /* CONFIG_HAVE_KVM_PRIVATE_MEM */
+
 #endif
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index de5cce8c82c7..97d893f7482c 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -938,13 +938,13 @@  static int kvm_init_mmu_notifier(struct kvm *kvm)
 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
 
 #ifdef CONFIG_HAVE_KVM_PRIVATE_MEM
-#define KVM_MEM_ATTR_SHARED	0x0001
 static int kvm_vm_ioctl_set_mem_attr(struct kvm *kvm, gpa_t gpa, gpa_t size,
 				     bool is_private)
 {
 	gfn_t start, end;
 	unsigned long index;
 	void *entry;
+	int attr;
 	int r;
 
 	if (size == 0 || gpa + size < gpa)
@@ -959,7 +959,13 @@  static int kvm_vm_ioctl_set_mem_attr(struct kvm *kvm, gpa_t gpa, gpa_t size,
 	 * Guest memory defaults to private, kvm->mem_attr_array only stores
 	 * shared memory.
 	 */
-	entry = is_private ? NULL : xa_mk_value(KVM_MEM_ATTR_SHARED);
+	if (is_private) {
+		attr = KVM_MEM_ATTR_PRIVATE;
+		entry = NULL;
+	} else {
+		attr = KVM_MEM_ATTR_SHARED;
+		entry = xa_mk_value(KVM_MEM_ATTR_SHARED);
+	}
 
 	for (index = start; index < end; index++) {
 		r = xa_err(xa_store(&kvm->mem_attr_array, index, entry,
@@ -969,6 +975,7 @@  static int kvm_vm_ioctl_set_mem_attr(struct kvm *kvm, gpa_t gpa, gpa_t size,
 	}
 
 	kvm_zap_gfn_range(kvm, start, end);
+	kvm_arch_update_mem_attr(kvm, attr, start, end);
 
 	return r;
 err: