@@ -85,4 +85,6 @@ int evm_init_hmac(struct inode *inode, const struct xattr *xattrs,
char *hmac_val);
int evm_init_secfs(void);
+bool evm_enabled(void);
+
#endif
@@ -1117,6 +1117,9 @@ static int __init init_evm(void)
int error;
struct list_head *pos, *q;
+ if (!evm_enabled())
+ return 0;
+
evm_init_config();
error = integrity_init_keyring(INTEGRITY_KEYRING_EVM);
@@ -1181,4 +1184,9 @@ DEFINE_LSM(evm) = {
.blobs = &evm_blob_sizes,
};
+bool evm_enabled(void)
+{
+ return *__lsm_evm.enabled != 0;
+}
+
late_initcall(init_evm);
@@ -69,7 +69,8 @@ static ssize_t evm_write_key(struct file *file, const char __user *buf,
unsigned int i;
int ret;
- if (!capable(CAP_SYS_ADMIN) || (evm_initialized & EVM_SETUP_COMPLETE))
+ if (!capable(CAP_SYS_ADMIN) || (evm_initialized & EVM_SETUP_COMPLETE) ||
+ !evm_enabled())
return -EPERM;
ret = kstrtouint_from_user(buf, count, 0, &i);
@@ -1141,10 +1141,15 @@ static int ima_kernel_module_request(char *kmod_name)
#endif /* CONFIG_INTEGRITY_ASYMMETRIC_KEYS */
+static bool ima_enabled(void);
+
static int __init init_ima(void)
{
int error;
+ if (!ima_enabled())
+ return 0;
+
ima_appraise_parse_cmdline();
ima_init_template_list();
hash_setup(CONFIG_IMA_DEFAULT_HASH);
@@ -1217,4 +1222,9 @@ DEFINE_LSM(ima) = {
.blobs = &ima_blob_sizes,
};
+static bool ima_enabled(void)
+{
+ return *__lsm_ima.enabled != 0;
+}
+
late_initcall(init_ima); /* Start IMA after the TPM is available */
@@ -363,8 +363,17 @@ static void __init ordered_lsm_parse(const char *order, const char *origin)
for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
if (strcmp(lsm->name, name) == 0) {
- if (lsm->order == LSM_ORDER_MUTABLE)
+ if (lsm->order == LSM_ORDER_MUTABLE) {
append_ordered_lsm(lsm, origin);
+ } else if (lsm->order == LSM_ORDER_LAST) {
+ /*
+ * We cannot append "LAST" LSM yet.
+ * Set a flag to append it later.
+ * Use lsm->enabled as the flag.
+ */
+ set_enabled(lsm, true);
+ }
+
found = true;
}
}
@@ -386,7 +395,8 @@ static void __init ordered_lsm_parse(const char *order, const char *origin)
/* LSM_ORDER_LAST is always last. */
for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
- if (lsm->order == LSM_ORDER_LAST)
+ /* If the "LAST" LSM is enabled above, append it now. */
+ if (lsm->order == LSM_ORDER_LAST && is_enabled(lsm))
append_ordered_lsm(lsm, " last");
}
Let lsm= cmdline option enable and disable ima and evm. Before the change: (ima and evm are not enabled in cmdline) [root@(none) /]# grep -o "lsm.*" /proc/cmdline lsm= (but they are actually enabled) [root@(none) /]# ls /sys/kernel/security/ evm ima integrity lsm [root@(none) /]# cat /sys/kernel/security/lsm capability,ima,evm After the change: (ima and evm are not enabled in cmdline) [root@(none) /]# grep -o "lsm=.*" /proc/cmdline lsm= (ima and evm are not enabled, as expected) [root@(none) /]# ls /sys/kernel/security/ integrity lsm [root@(none) /]# cat /sys/kernel/security/lsm capability (ima and evm are enabled in cmdline) [root@(none) /]# grep -o "lsm.*" /proc/cmdline lsm=ima,evm (ima and evm are enabled, expected) [root@(none) /]# ls /sys/kernel/security/ evm ima integrity lsm [root@(none) /]# cat /sys/kernel/security/lsm capability,ima,evm Signed-off-by: Song Liu <song@kernel.org> --- This was discussed in this RFC [1]. [1] https://lore.kernel.org/linux-integrity/20241217202525.1802109-1-song@kernel.org/ --- security/integrity/evm/evm.h | 2 ++ security/integrity/evm/evm_main.c | 8 ++++++++ security/integrity/evm/evm_secfs.c | 3 ++- security/integrity/ima/ima_main.c | 10 ++++++++++ security/security.c | 14 ++++++++++++-- 5 files changed, 34 insertions(+), 3 deletions(-)