diff mbox series

[RFC,07/16] KVM: mm: Introduce VM_KVM_PROTECTED

Message ID 20200522125214.31348-8-kirill.shutemov@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series KVM protected memory extension | expand

Commit Message

Kirill A. Shutemov May 22, 2020, 12:52 p.m. UTC
The new VMA flag that indicate a VMA that is not accessible to userspace
but usable by kernel with GUP if FOLL_KVM is specified.

The FOLL_KVM is only used in the KVM code. The code has to know how to
deal with such pages.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
 include/linux/mm.h  |  8 ++++++++
 mm/gup.c            | 20 ++++++++++++++++----
 mm/huge_memory.c    | 20 ++++++++++++++++----
 mm/memory.c         |  3 +++
 mm/mmap.c           |  3 +++
 virt/kvm/async_pf.c |  4 ++--
 virt/kvm/kvm_main.c |  9 +++++----
 7 files changed, 53 insertions(+), 14 deletions(-)

Comments

Mike Rapoport May 26, 2020, 6:15 a.m. UTC | #1
On Fri, May 22, 2020 at 03:52:05PM +0300, Kirill A. Shutemov wrote:
> The new VMA flag that indicate a VMA that is not accessible to userspace
> but usable by kernel with GUP if FOLL_KVM is specified.
> 
> The FOLL_KVM is only used in the KVM code. The code has to know how to
> deal with such pages.
> 
> Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
> ---
>  include/linux/mm.h  |  8 ++++++++
>  mm/gup.c            | 20 ++++++++++++++++----
>  mm/huge_memory.c    | 20 ++++++++++++++++----
>  mm/memory.c         |  3 +++
>  mm/mmap.c           |  3 +++
>  virt/kvm/async_pf.c |  4 ++--
>  virt/kvm/kvm_main.c |  9 +++++----
>  7 files changed, 53 insertions(+), 14 deletions(-)
> 
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index e1882eec1752..4f7195365cc0 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -329,6 +329,8 @@ extern unsigned int kobjsize(const void *objp);
>  # define VM_MAPPED_COPY	VM_ARCH_1	/* T if mapped copy of data (nommu mmap) */
>  #endif
>  
> +#define VM_KVM_PROTECTED 0

With all the ideas about removing pages from the direct mapi floating
around I wouldn't limit this to KVM.

VM_NOT_IN_DIRECT_MAP would describe such areas better, but I realise
it's very far from perfect and nothing better does not comes to mind :)


>  #ifndef VM_GROWSUP
>  # define VM_GROWSUP	VM_NONE
>  #endif
> @@ -646,6 +648,11 @@ static inline bool vma_is_accessible(struct vm_area_struct *vma)
>  	return vma->vm_flags & VM_ACCESS_FLAGS;
>  }
>  
> +static inline bool vma_is_kvm_protected(struct vm_area_struct *vma)

Ditto

> +{
> +	return vma->vm_flags & VM_KVM_PROTECTED;
> +}
> +
>  #ifdef CONFIG_SHMEM
>  /*
>   * The vma_is_shmem is not inline because it is used only by slow
> @@ -2773,6 +2780,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
>  #define FOLL_LONGTERM	0x10000	/* mapping lifetime is indefinite: see below */
>  #define FOLL_SPLIT_PMD	0x20000	/* split huge pmd before returning */
>  #define FOLL_PIN	0x40000	/* pages must be released via unpin_user_page */
> +#define FOLL_KVM	0x80000 /* access to VM_KVM_PROTECTED VMAs */

Maybe

FOLL_DM		0x80000  /* access  memory dropped from the direct map */

>  /*
>   * FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each
> diff --git a/mm/gup.c b/mm/gup.c
> index 87a6a59fe667..bd7b9484b35a 100644
> --- a/mm/gup.c
> +++ b/mm/gup.c

...

> diff --git a/mm/mmap.c b/mm/mmap.c
> index f609e9ec4a25..d56c3f6efc99 100644
> --- a/mm/mmap.c
> +++ b/mm/mmap.c
> @@ -112,6 +112,9 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags)
>  				(VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
>  			pgprot_val(arch_vm_get_page_prot(vm_flags)));
>  
> +	if (vm_flags & VM_KVM_PROTECTED)
> +		ret = PAGE_NONE;

Nit: vma_is_kvm_protected()?

> +
>  	return arch_filter_pgprot(ret);
>  }
>  EXPORT_SYMBOL(vm_get_page_prot);
> diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
> index 15e5b037f92d..7663e962510a 100644
> --- a/virt/kvm/async_pf.c
> +++ b/virt/kvm/async_pf.c
> @@ -60,8 +60,8 @@ static void async_pf_execute(struct work_struct *work)
>  	 * access remotely.
>  	 */
>  	down_read(&mm->mmap_sem);
> -	get_user_pages_remote(NULL, mm, addr, 1, FOLL_WRITE, NULL, NULL,
> -			&locked);
> +	get_user_pages_remote(NULL, mm, addr, 1, FOLL_WRITE | FOLL_KVM, NULL,
> +			      NULL, &locked);
>  	if (locked)
>  		up_read(&mm->mmap_sem);
>  
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 033471f71dae..530af95efdf3 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -1727,7 +1727,7 @@ unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *w
>  
>  static inline int check_user_page_hwpoison(unsigned long addr)
>  {
> -	int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
> +	int rc, flags = FOLL_HWPOISON | FOLL_WRITE | FOLL_KVM;
>  
>  	rc = get_user_pages(addr, 1, flags, NULL, NULL);
>  	return rc == -EHWPOISON;
> @@ -1771,7 +1771,7 @@ static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
>  static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
>  			   bool *writable, kvm_pfn_t *pfn)
>  {
> -	unsigned int flags = FOLL_HWPOISON;
> +	unsigned int flags = FOLL_HWPOISON | FOLL_KVM;
>  	struct page *page;
>  	int npages = 0;
>  
> @@ -2255,7 +2255,7 @@ int copy_from_guest(void *data, unsigned long hva, int len)
>  	int npages, seg;
>  
>  	while ((seg = next_segment(len, offset)) != 0) {
> -		npages = get_user_pages_unlocked(hva, 1, &page, 0);
> +		npages = get_user_pages_unlocked(hva, 1, &page, FOLL_KVM);
>  		if (npages != 1)
>  			return -EFAULT;
>  		memcpy(data, page_address(page) + offset, seg);
> @@ -2275,7 +2275,8 @@ int copy_to_guest(unsigned long hva, const void *data, int len)
>  	int npages, seg;
>  
>  	while ((seg = next_segment(len, offset)) != 0) {
> -		npages = get_user_pages_unlocked(hva, 1, &page, FOLL_WRITE);
> +		npages = get_user_pages_unlocked(hva, 1, &page,
> +						 FOLL_WRITE | FOLL_KVM);
>  		if (npages != 1)
>  			return -EFAULT;
>  		memcpy(page_address(page) + offset, data, seg);
> -- 
> 2.26.2
> 
>
John Hubbard May 26, 2020, 6:40 a.m. UTC | #2
On 2020-05-22 05:52, Kirill A. Shutemov wrote:
...
> @@ -2773,6 +2780,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
>   #define FOLL_LONGTERM	0x10000	/* mapping lifetime is indefinite: see below */
>   #define FOLL_SPLIT_PMD	0x20000	/* split huge pmd before returning */
>   #define FOLL_PIN	0x40000	/* pages must be released via unpin_user_page */
> +#define FOLL_KVM	0x80000 /* access to VM_KVM_PROTECTED VMAs */
>   

I grabbed 0x80000 already, for FOLL_FAST_ONLY. :)

thanks,
Kirill A. Shutemov May 26, 2020, 10:01 p.m. UTC | #3
On Tue, May 26, 2020 at 09:15:52AM +0300, Mike Rapoport wrote:
> On Fri, May 22, 2020 at 03:52:05PM +0300, Kirill A. Shutemov wrote:
> > The new VMA flag that indicate a VMA that is not accessible to userspace
> > but usable by kernel with GUP if FOLL_KVM is specified.
> > 
> > The FOLL_KVM is only used in the KVM code. The code has to know how to
> > deal with such pages.
> > 
> > Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
> > ---
> >  include/linux/mm.h  |  8 ++++++++
> >  mm/gup.c            | 20 ++++++++++++++++----
> >  mm/huge_memory.c    | 20 ++++++++++++++++----
> >  mm/memory.c         |  3 +++
> >  mm/mmap.c           |  3 +++
> >  virt/kvm/async_pf.c |  4 ++--
> >  virt/kvm/kvm_main.c |  9 +++++----
> >  7 files changed, 53 insertions(+), 14 deletions(-)
> > 
> > diff --git a/include/linux/mm.h b/include/linux/mm.h
> > index e1882eec1752..4f7195365cc0 100644
> > --- a/include/linux/mm.h
> > +++ b/include/linux/mm.h
> > @@ -329,6 +329,8 @@ extern unsigned int kobjsize(const void *objp);
> >  # define VM_MAPPED_COPY	VM_ARCH_1	/* T if mapped copy of data (nommu mmap) */
> >  #endif
> >  
> > +#define VM_KVM_PROTECTED 0
> 
> With all the ideas about removing pages from the direct mapi floating
> around I wouldn't limit this to KVM.
> 
> VM_NOT_IN_DIRECT_MAP would describe such areas better, but I realise
> it's very far from perfect and nothing better does not comes to mind :)

I don't like VM_NOT_IN_DIRECT_MAP.

It's not only about direct mapping, but about userspace mapping as well.
For the same reason other naming proposals don't fit as well.

> > diff --git a/mm/mmap.c b/mm/mmap.c
> > index f609e9ec4a25..d56c3f6efc99 100644
> > --- a/mm/mmap.c
> > +++ b/mm/mmap.c
> > @@ -112,6 +112,9 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags)
> >  				(VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
> >  			pgprot_val(arch_vm_get_page_prot(vm_flags)));
> >  
> > +	if (vm_flags & VM_KVM_PROTECTED)
> > +		ret = PAGE_NONE;
> 
> Nit: vma_is_kvm_protected()?

Which VMA? :P
Kirill A. Shutemov May 26, 2020, 10:04 p.m. UTC | #4
On Mon, May 25, 2020 at 11:40:01PM -0700, John Hubbard wrote:
> On 2020-05-22 05:52, Kirill A. Shutemov wrote:
> ...
> > @@ -2773,6 +2780,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
> >   #define FOLL_LONGTERM	0x10000	/* mapping lifetime is indefinite: see below */
> >   #define FOLL_SPLIT_PMD	0x20000	/* split huge pmd before returning */
> >   #define FOLL_PIN	0x40000	/* pages must be released via unpin_user_page */
> > +#define FOLL_KVM	0x80000 /* access to VM_KVM_PROTECTED VMAs */
> 
> I grabbed 0x80000 already, for FOLL_FAST_ONLY. :)

Let's see who getting upstream first :P (Spoiler: you)
diff mbox series

Patch

diff --git a/include/linux/mm.h b/include/linux/mm.h
index e1882eec1752..4f7195365cc0 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -329,6 +329,8 @@  extern unsigned int kobjsize(const void *objp);
 # define VM_MAPPED_COPY	VM_ARCH_1	/* T if mapped copy of data (nommu mmap) */
 #endif
 
+#define VM_KVM_PROTECTED 0
+
 #ifndef VM_GROWSUP
 # define VM_GROWSUP	VM_NONE
 #endif
@@ -646,6 +648,11 @@  static inline bool vma_is_accessible(struct vm_area_struct *vma)
 	return vma->vm_flags & VM_ACCESS_FLAGS;
 }
 
+static inline bool vma_is_kvm_protected(struct vm_area_struct *vma)
+{
+	return vma->vm_flags & VM_KVM_PROTECTED;
+}
+
 #ifdef CONFIG_SHMEM
 /*
  * The vma_is_shmem is not inline because it is used only by slow
@@ -2773,6 +2780,7 @@  struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
 #define FOLL_LONGTERM	0x10000	/* mapping lifetime is indefinite: see below */
 #define FOLL_SPLIT_PMD	0x20000	/* split huge pmd before returning */
 #define FOLL_PIN	0x40000	/* pages must be released via unpin_user_page */
+#define FOLL_KVM	0x80000 /* access to VM_KVM_PROTECTED VMAs */
 
 /*
  * FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each
diff --git a/mm/gup.c b/mm/gup.c
index 87a6a59fe667..bd7b9484b35a 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -385,10 +385,19 @@  static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
  * FOLL_FORCE can write to even unwritable pte's, but only
  * after we've gone through a COW cycle and they are dirty.
  */
-static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
+static inline bool can_follow_write_pte(struct vm_area_struct *vma,
+					pte_t pte, unsigned int flags)
 {
-	return pte_write(pte) ||
-		((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
+	if (pte_write(pte))
+		return true;
+
+	if ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte))
+		return true;
+
+	if (!vma_is_kvm_protected(vma) || !(vma->vm_flags & VM_WRITE))
+		return false;
+
+	return (vma->vm_flags & VM_SHARED) || page_mapcount(pte_page(pte)) == 1;
 }
 
 static struct page *follow_page_pte(struct vm_area_struct *vma,
@@ -431,7 +440,7 @@  static struct page *follow_page_pte(struct vm_area_struct *vma,
 	}
 	if ((flags & FOLL_NUMA) && pte_protnone(pte))
 		goto no_page;
-	if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
+	if ((flags & FOLL_WRITE) && !can_follow_write_pte(vma, pte, flags)) {
 		pte_unmap_unlock(ptep, ptl);
 		return NULL;
 	}
@@ -751,6 +760,9 @@  static struct page *follow_page_mask(struct vm_area_struct *vma,
 
 	ctx->page_mask = 0;
 
+	if (vma_is_kvm_protected(vma) && (flags & FOLL_KVM))
+		flags &= ~FOLL_NUMA;
+
 	/* make this handle hugepd */
 	page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
 	if (!IS_ERR(page)) {
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 6ecd1045113b..c3562648a4ef 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1518,10 +1518,19 @@  vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
  * FOLL_FORCE can write to even unwritable pmd's, but only
  * after we've gone through a COW cycle and they are dirty.
  */
-static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
+static inline bool can_follow_write_pmd(struct vm_area_struct *vma,
+					pmd_t pmd, unsigned int flags)
 {
-	return pmd_write(pmd) ||
-	       ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
+	if (pmd_write(pmd))
+		return true;
+
+	if ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd))
+		return true;
+
+	if (!vma_is_kvm_protected(vma) || !(vma->vm_flags & VM_WRITE))
+		return false;
+
+	return (vma->vm_flags & VM_SHARED) || page_mapcount(pmd_page(pmd)) == 1;
 }
 
 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
@@ -1534,7 +1543,7 @@  struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
 
 	assert_spin_locked(pmd_lockptr(mm, pmd));
 
-	if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
+	if (flags & FOLL_WRITE && !can_follow_write_pmd(vma, *pmd, flags))
 		goto out;
 
 	/* Avoid dumping huge zero page */
@@ -1609,6 +1618,9 @@  vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
 	bool was_writable;
 	int flags = 0;
 
+	if (vma_is_kvm_protected(vma))
+		return VM_FAULT_SIGBUS;
+
 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
 	if (unlikely(!pmd_same(pmd, *vmf->pmd)))
 		goto out_unlock;
diff --git a/mm/memory.c b/mm/memory.c
index f703fe8c8346..d7228db6e4bf 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4013,6 +4013,9 @@  static vm_fault_t do_numa_page(struct vm_fault *vmf)
 	bool was_writable = pte_savedwrite(vmf->orig_pte);
 	int flags = 0;
 
+	if (vma_is_kvm_protected(vma))
+		return VM_FAULT_SIGBUS;
+
 	/*
 	 * The "pte" at this point cannot be used safely without
 	 * validation through pte_unmap_same(). It's of NUMA type but
diff --git a/mm/mmap.c b/mm/mmap.c
index f609e9ec4a25..d56c3f6efc99 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -112,6 +112,9 @@  pgprot_t vm_get_page_prot(unsigned long vm_flags)
 				(VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
 			pgprot_val(arch_vm_get_page_prot(vm_flags)));
 
+	if (vm_flags & VM_KVM_PROTECTED)
+		ret = PAGE_NONE;
+
 	return arch_filter_pgprot(ret);
 }
 EXPORT_SYMBOL(vm_get_page_prot);
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 15e5b037f92d..7663e962510a 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -60,8 +60,8 @@  static void async_pf_execute(struct work_struct *work)
 	 * access remotely.
 	 */
 	down_read(&mm->mmap_sem);
-	get_user_pages_remote(NULL, mm, addr, 1, FOLL_WRITE, NULL, NULL,
-			&locked);
+	get_user_pages_remote(NULL, mm, addr, 1, FOLL_WRITE | FOLL_KVM, NULL,
+			      NULL, &locked);
 	if (locked)
 		up_read(&mm->mmap_sem);
 
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 033471f71dae..530af95efdf3 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1727,7 +1727,7 @@  unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *w
 
 static inline int check_user_page_hwpoison(unsigned long addr)
 {
-	int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
+	int rc, flags = FOLL_HWPOISON | FOLL_WRITE | FOLL_KVM;
 
 	rc = get_user_pages(addr, 1, flags, NULL, NULL);
 	return rc == -EHWPOISON;
@@ -1771,7 +1771,7 @@  static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
 static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
 			   bool *writable, kvm_pfn_t *pfn)
 {
-	unsigned int flags = FOLL_HWPOISON;
+	unsigned int flags = FOLL_HWPOISON | FOLL_KVM;
 	struct page *page;
 	int npages = 0;
 
@@ -2255,7 +2255,7 @@  int copy_from_guest(void *data, unsigned long hva, int len)
 	int npages, seg;
 
 	while ((seg = next_segment(len, offset)) != 0) {
-		npages = get_user_pages_unlocked(hva, 1, &page, 0);
+		npages = get_user_pages_unlocked(hva, 1, &page, FOLL_KVM);
 		if (npages != 1)
 			return -EFAULT;
 		memcpy(data, page_address(page) + offset, seg);
@@ -2275,7 +2275,8 @@  int copy_to_guest(unsigned long hva, const void *data, int len)
 	int npages, seg;
 
 	while ((seg = next_segment(len, offset)) != 0) {
-		npages = get_user_pages_unlocked(hva, 1, &page, FOLL_WRITE);
+		npages = get_user_pages_unlocked(hva, 1, &page,
+						 FOLL_WRITE | FOLL_KVM);
 		if (npages != 1)
 			return -EFAULT;
 		memcpy(page_address(page) + offset, data, seg);