@@ -203,6 +203,7 @@ struct ima_namespace {
struct ima_policy_data *policy_data;
struct integrity_iint_tree *iint_tree;
struct list_head *measurements;
+ atomic_long_t ml_len; /* number of stored measurements in the list */
} __randomize_layout;
extern struct ima_namespace init_ima_ns;
@@ -63,7 +63,9 @@ static ssize_t ima_show_measurements_count(struct file *filp,
char __user *buf,
size_t count, loff_t *ppos)
{
- return ima_show_htable_value(buf, count, ppos, &ima_htable.len);
+ struct ima_namespace *ima_ns = get_current_ns();
+
+ return ima_show_htable_value(buf, count, ppos, &ima_ns->ml_len);
}
@@ -77,10 +79,36 @@ static void *ima_measurements_start(struct seq_file *m, loff_t *pos)
{
loff_t l = *pos;
struct ima_queue_entry *qe;
+ struct ima_namespace *ima_ns = get_current_ns();
+ unsigned int ns_id = get_ns_id(ima_ns);
+
+ if (ima_ns == &init_ima_ns) {
+ /* we need a lock since pos could point beyond last element */
+ rcu_read_lock();
+ list_for_each_entry_rcu(qe, &ima_measurements, later) {
+ if (!l--) {
+ rcu_read_unlock();
+ return qe;
+ }
+ }
+ rcu_read_unlock();
+ return NULL;
+ }
- /* we need a lock since pos could point beyond last element */
rcu_read_lock();
- list_for_each_entry_rcu(qe, &ima_measurements, later) {
+ qe = list_next_or_null_rcu(&ima_measurements,
+ ima_ns->measurements,
+ struct ima_queue_entry,
+ later);
+ if (!qe) {
+ rcu_read_unlock();
+ return NULL;
+ }
+
+ list_for_each_entry_from_rcu(qe, &ima_measurements, later) {
+ if (ns_id != qe->entry->ns_id)
+ continue;
+
if (!l--) {
rcu_read_unlock();
return qe;
@@ -93,12 +121,27 @@ static void *ima_measurements_start(struct seq_file *m, loff_t *pos)
static void *ima_measurements_next(struct seq_file *m, void *v, loff_t *pos)
{
struct ima_queue_entry *qe = v;
+ struct ima_namespace *ima_ns = get_current_ns();
+ unsigned int ns_id = get_ns_id(ima_ns);
+
+ if (ima_ns == &init_ima_ns) {
+ /* lock protects when reading beyond last element
+ * against concurrent list-extension
+ */
+ rcu_read_lock();
+ qe = list_entry_rcu(qe->later.next, struct ima_queue_entry,
+ later);
+ rcu_read_unlock();
+ (*pos)++;
+
+ return (&qe->later == &ima_measurements) ? NULL : qe;
+ }
- /* lock protects when reading beyond last element
- * against concurrent list-extension
- */
rcu_read_lock();
- qe = list_entry_rcu(qe->later.next, struct ima_queue_entry, later);
+ list_for_each_entry_continue_rcu(qe, &ima_measurements, later) {
+ if (ns_id == qe->entry->ns_id)
+ break;
+ }
rcu_read_unlock();
(*pos)++;
@@ -36,6 +36,7 @@ struct ima_namespace init_ima_ns = {
.policy_data = &init_policy_data,
.iint_tree = &init_iint_tree,
.measurements = &ima_measurements,
+ .ml_len = ATOMIC_LONG_INIT(0),
};
EXPORT_SYMBOL(init_ima_ns);
@@ -126,6 +126,7 @@ static struct ima_namespace *clone_ima_ns(struct user_namespace *user_ns,
ns->user_ns = get_user_ns(user_ns);
ns->ucounts = ucounts;
ns->frozen = false;
+ atomic_long_set(&ns->ml_len, 0);
rwlock_init(&ns->iint_tree->lock);
ns->iint_tree->root = RB_ROOT;
@@ -227,6 +227,14 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
goto out;
}
+ /* Initial ima namespace has access to all measurement list entries,
+ * therefore always increment its measurement list length. Other
+ * namespaces can see only their own entries.
+ */
+ if (ima_ns != &init_ima_ns)
+ atomic_long_inc(&ima_ns->ml_len);
+ atomic_long_inc(&init_ima_ns.ml_len);
+
if (violation) /* invalidate pcr */
digests_arg = digests;