@@ -20,6 +20,11 @@ extern unsigned int mktme_algs;
extern void mprotect_set_encrypt(struct vm_area_struct *vma, int newkeyid,
unsigned long start, unsigned long end);
+/* MTKME encrypt_count for VMAs */
+extern struct percpu_ref *encrypt_count;
+extern void vma_get_encrypt_ref(struct vm_area_struct *vma);
+extern void vma_put_encrypt_ref(struct vm_area_struct *vma);
+
DECLARE_STATIC_KEY_FALSE(mktme_enabled_key);
static inline bool mktme_enabled(void)
{
@@ -84,11 +84,12 @@ void mprotect_set_encrypt(struct vm_area_struct *vma, int newkeyid,
if (oldkeyid == newkeyid)
return;
-
+ vma_put_encrypt_ref(vma);
newprot = pgprot_val(vma->vm_page_prot);
newprot &= ~mktme_keyid_mask();
newprot |= (unsigned long)newkeyid << mktme_keyid_shift();
vma->vm_page_prot = __pgprot(newprot);
+ vma_get_encrypt_ref(vma);
/*
* The VMA doesn't have any inherited pages.
@@ -97,6 +98,18 @@ void mprotect_set_encrypt(struct vm_area_struct *vma, int newkeyid,
unlink_anon_vmas(vma);
}
+void vma_get_encrypt_ref(struct vm_area_struct *vma)
+{
+ if (vma_keyid(vma))
+ percpu_ref_get(&encrypt_count[vma_keyid(vma)]);
+}
+
+void vma_put_encrypt_ref(struct vm_area_struct *vma)
+{
+ if (vma_keyid(vma))
+ percpu_ref_put(&encrypt_count[vma_keyid(vma)]);
+}
+
/* Prepare page to be used for encryption. Called from page allocator. */
void __prep_encrypted_page(struct page *page, int order, int keyid, bool zero)
{
@@ -137,6 +150,22 @@ void __prep_encrypted_page(struct page *page, int order, int keyid, bool zero)
page++;
}
+
+ /*
+ * Make sure the KeyID cannot be freed until the last page that
+ * uses the KeyID is gone.
+ *
+ * This is required because the page may live longer than VMA it
+ * is mapped into (i.e. in get_user_pages() case) and having
+ * refcounting per-VMA is not enough.
+ *
+ * Taking a reference per-4K helps in case if the page will be
+ * split after the allocation. free_encrypted_page() will balance
+ * out the refcount even if the page was split and freed as bunch
+ * of 4K pages.
+ */
+
+ percpu_ref_get_many(&encrypt_count[keyid], 1 << order);
}
/*
@@ -145,7 +174,9 @@ void __prep_encrypted_page(struct page *page, int order, int keyid, bool zero)
*/
void free_encrypted_page(struct page *page, int order)
{
- int i;
+ int i, keyid;
+
+ keyid = page_keyid(page);
/*
* The hardware/CPU does not enforce coherency between mappings
@@ -177,6 +208,8 @@ void free_encrypted_page(struct page *page, int order)
lookup_page_ext(page)->keyid = 0;
page++;
}
+
+ percpu_ref_put_many(&encrypt_count[keyid], 1 << order);
}
static int sync_direct_mapping_pte(unsigned long keyid,
@@ -2911,6 +2911,8 @@ static inline void mprotect_set_encrypt(struct vm_area_struct *vma,
int newkeyid,
unsigned long start,
unsigned long end) {}
+static inline void vma_get_encrypt_ref(struct vm_area_struct *vma) {}
+static inline void vma_put_encrypt_ref(struct vm_area_struct *vma) {}
#endif /* CONFIG_X86_INTEL_MKTME */
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
@@ -349,12 +349,14 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
if (new) {
*new = *orig;
INIT_LIST_HEAD(&new->anon_vma_chain);
+ vma_get_encrypt_ref(new);
}
return new;
}
void vm_area_free(struct vm_area_struct *vma)
{
+ vma_put_encrypt_ref(vma);
kmem_cache_free(vm_area_cachep, vma);
}