@@ -1,7 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
// Copyright(c) 2016-19 Intel Corporation.
-#include <asm/mman.h>
+#include <linux/mman.h>
#include <linux/delay.h>
#include <linux/file.h>
#include <linux/hashtable.h>
@@ -11,6 +11,7 @@
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/suspend.h>
+#include <linux/security.h>
#include "driver.h"
struct sgx_add_page_req {
@@ -575,6 +576,46 @@ static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long addr,
return ret;
}
+static int sgx_encl_prepare_page(struct file *filp, unsigned long dst,
+ unsigned long src, void *buf)
+{
+ struct vm_area_struct *vma;
+ unsigned long prot;
+ int rc;
+
+ if (dst & ~PAGE_SIZE)
+ return -EINVAL;
+
+ rc = down_read_killable(¤t->mm->mmap_sem);
+ if (rc)
+ return rc;
+
+ vma = find_vma(current->mm, dst);
+ if (vma && dst >= vma->vm_start)
+ prot = _calc_vm_trans(vma->vm_flags, VM_READ, PROT_READ) |
+ _calc_vm_trans(vma->vm_flags, VM_WRITE, PROT_WRITE) |
+ _calc_vm_trans(vma->vm_flags, VM_EXEC, PROT_EXEC);
+ else
+ prot = 0;
+
+ vma = find_vma(current->mm, src);
+ if (!vma || src < vma->vm_start || src + PAGE_SIZE > vma->vm_end)
+ rc = -EFAULT;
+
+ if (!rc && !(vma->vm_flags & VM_MAYEXEC))
+ rc = -EACCES;
+
+ if (!rc && copy_from_user(buf, (void __user *)src, PAGE_SIZE))
+ rc = -EFAULT;
+
+ if (!rc)
+ rc = security_enclave_load(filp, dst, PAGE_SIZE, prot, vma);
+
+ up_read(¤t->mm->mmap_sem);
+
+ return rc;
+}
+
/**
* sgx_ioc_enclave_add_page - handler for %SGX_IOC_ENCLAVE_ADD_PAGE
*
@@ -613,10 +654,9 @@ static long sgx_ioc_enclave_add_page(struct file *filep, unsigned int cmd,
data = kmap(data_page);
- if (copy_from_user((void *)data, (void __user *)addp->src, PAGE_SIZE)) {
- ret = -EFAULT;
+ ret = sgx_encl_prepare_page(filep, addp->addr, addp->src, data);
+ if (ret)
goto out;
- }
ret = sgx_encl_add_page(encl, addp->addr, data, &secinfo, addp->mrmask);
if (ret)
@@ -718,6 +758,31 @@ static int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct,
return ret;
}
+static int sgx_encl_prepare_sigstruct(struct file *filp, unsigned long src,
+ struct sgx_sigstruct *ss)
+{
+ struct vm_area_struct *vma;
+ int rc;
+
+ rc = down_read_killable(¤t->mm->mmap_sem);
+ if (rc)
+ return rc;
+
+ vma = find_vma(current->mm, src);
+ if (!vma || src < vma->vm_start || src + sizeof(*ss) > vma->vm_end)
+ rc = -EFAULT;
+
+ if (!rc && copy_from_user(ss, (void __user *)src, sizeof(*ss)))
+ rc = -EFAULT;
+
+ if (!rc)
+ rc = security_enclave_init(filp, ss, vma);
+
+ up_read(¤t->mm->mmap_sem);
+
+ return rc;
+}
+
/**
* sgx_ioc_enclave_init - handler for %SGX_IOC_ENCLAVE_INIT
*
@@ -753,12 +818,9 @@ static long sgx_ioc_enclave_init(struct file *filep, unsigned int cmd,
((unsigned long)sigstruct + PAGE_SIZE / 2);
memset(einittoken, 0, sizeof(*einittoken));
- if (copy_from_user(sigstruct, (void __user *)initp->sigstruct,
- sizeof(*sigstruct))) {
- ret = -EFAULT;
+ ret = sgx_encl_prepare_sigstruct(filep, initp->sigstruct, sigstruct);
+ if (ret)
goto out;
- }
-
ret = sgx_encl_init(encl, sigstruct, einittoken);
@@ -63,14 +63,26 @@ static long sgx_compat_ioctl(struct file *filep, unsigned int cmd,
static int sgx_mmap(struct file *file, struct vm_area_struct *vma)
{
struct sgx_encl *encl = file->private_data;
+ unsigned long prot;
+ int rc;
vma->vm_ops = &sgx_vm_ops;
vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
vma->vm_private_data = encl;
- kref_get(&encl->refcount);
+ prot = vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC);
+ vma->vm_flags &= ~prot;
- return 0;
+ prot = _calc_vm_trans(prot, VM_READ, PROT_READ) |
+ _calc_vm_trans(prot, VM_WRITE, PROT_WRITE) |
+ _calc_vm_trans(prot, VM_EXEC, PROT_EXEC);
+ rc = security_file_mprotect(vma, prot, prot);
+ if (!rc) {
+ vma->vm_flags |= calc_vm_prot_bits(prot, 0);
+ kref_get(&encl->refcount);
+ }
+
+ return rc;
}
static unsigned long sgx_get_unmapped_area(struct file *file,
It’s straightforward to call new LSM hooks from the SGX subsystem/module. There are three places where LSM hooks are invoked. 1) sgx_mmap() invokes security_file_mprotect() to validate requested protection. It is necessary because security_mmap_file() invoked by mmap() syscall only validates protections against /dev/sgx/enclave file, but not against those files from which the pages were loaded from. 2) security_enclave_load() is invoked upon loading of every enclave page by the EADD ioctl. Please note that if pages are EADD’ed in batch, the SGX subsystem/module is responsible for dividing pages in trunks so that each trunk is loaded from a single VMA. 3) security_enclave_init() is invoked before initializing (EINIT) every enclave. Signed-off-by: Cedric Xing <cedric.xing@intel.com> --- arch/x86/kernel/cpu/sgx/driver/ioctl.c | 80 +++++++++++++++++++++++--- arch/x86/kernel/cpu/sgx/driver/main.c | 16 +++++- 2 files changed, 85 insertions(+), 11 deletions(-)