@@ -4,6 +4,8 @@
#include <linux/types.h>
#include <linux/page_ext.h>
+struct vm_area_struct;
+
#ifdef CONFIG_X86_INTEL_MKTME
extern phys_addr_t mktme_keyid_mask;
extern int mktme_nr_keyids;
@@ -14,6 +16,9 @@ extern struct page_ext_operations page_mktme_ops;
#define page_keyid page_keyid
int page_keyid(const struct page *page);
+#define vma_keyid vma_keyid
+int vma_keyid(struct vm_area_struct *vma);
+
#else
#define mktme_keyid_mask ((phys_addr_t)0)
#define mktme_nr_keyids 0
@@ -1,3 +1,4 @@
+#include <linux/mm.h>
#include <asm/mktme.h>
phys_addr_t mktme_keyid_mask;
@@ -37,3 +38,14 @@ struct page_ext_operations page_mktme_ops = {
.need = need_page_mktme,
.init = init_page_mktme,
};
+
+int vma_keyid(struct vm_area_struct *vma)
+{
+ pgprotval_t prot;
+
+ if (!mktme_enabled())
+ return 0;
+
+ prot = pgprot_val(vma->vm_page_prot);
+ return (prot & mktme_keyid_mask) >> mktme_keyid_shift;
+}
We store KeyID in upper bits for vm_page_prot that match position of KeyID in PTE. vma_keyid() extracts KeyID from vm_page_prot. With KeyID in vm_page_prot we don't need to modify any page table helper to propagate the KeyID to page table entires. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> --- arch/x86/include/asm/mktme.h | 5 +++++ arch/x86/mm/mktme.c | 12 ++++++++++++ 2 files changed, 17 insertions(+)