@@ -3189,6 +3189,7 @@ struct vm_struct *remove_vm_area(const void *addr)
{
struct vmap_area *va;
struct vm_struct *vm;
+ unsigned long vm_addr;
might_sleep();
@@ -3200,6 +3201,7 @@ struct vm_struct *remove_vm_area(const void *addr)
if (!va || !va->vm)
return NULL;
vm = va->vm;
+ vm_addr = (unsigned long) READ_ONCE(vm->addr);
debug_check_no_locks_freed(vm->addr, get_vm_area_size(vm));
debug_check_no_obj_freed(vm->addr, get_vm_area_size(vm));
@@ -3331,6 +3333,7 @@ void vfree(const void *addr)
addr);
return;
}
+ asi_unmap(ASI_GLOBAL_NONSENSITIVE, vm->addr, get_vm_area_size(vm));
if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS))
vm_reset_perms(vm);
@@ -3370,12 +3373,14 @@ void vunmap(const void *addr)
if (!addr)
return;
+
vm = remove_vm_area(addr);
if (unlikely(!vm)) {
WARN(1, KERN_ERR "Trying to vunmap() nonexistent vm area (%p)\n",
addr);
return;
}
+ asi_unmap(ASI_GLOBAL_NONSENSITIVE, vm->addr, get_vm_area_size(vm));
kfree(vm);
}
EXPORT_SYMBOL(vunmap);
@@ -3424,16 +3429,21 @@ void *vmap(struct page **pages, unsigned int count,
addr = (unsigned long)area->addr;
if (vmap_pages_range(addr, addr + size, pgprot_nx(prot),
- pages, PAGE_SHIFT) < 0) {
- vunmap(area->addr);
- return NULL;
- }
+ pages, PAGE_SHIFT) < 0)
+ goto err;
+
+ if (asi_map(ASI_GLOBAL_NONSENSITIVE, area->addr,
+ get_vm_area_size(area)))
+ goto err; /* The necessary asi_unmap() is in vunmap. */
if (flags & VM_MAP_PUT_PAGES) {
area->pages = pages;
area->nr_pages = count;
}
return area->addr;
+err:
+ vunmap(area->addr);
+ return NULL;
}
EXPORT_SYMBOL(vmap);
@@ -3701,6 +3711,10 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
goto fail;
}
+ if (asi_map(ASI_GLOBAL_NONSENSITIVE, area->addr,
+ get_vm_area_size(area)))
+ goto fail; /* The necessary asi_unmap() is in vfree. */
+
return area->addr;
fail:
@@ -3780,6 +3794,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
size = ALIGN(real_size, 1UL << shift);
}
+ /*
+ * Assume nobody is interested in accessing these pages via the direct
+ * map, so there's no point in having them in ASI's global-nonsensitive
+ * physmap, which would just cost us a TLB flush later on.
+ */
+ gfp_mask |= __GFP_SENSITIVE;
+
again:
area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |
VM_UNINITIALIZED | vm_flags, start, end, node,
We add new VM flags for sensitive and global-nonsensitive, parallel to the corresponding GFP flags. __get_vm_area_node and friends will default to creating global-nonsensitive VM areas, and vmap then calls asi_map as necessary. __vmalloc_node_range has additional logic to check and set defaults for the sensitivity of the underlying page allocation. It does this via an initial __set_asi_flags call - note that it then calls __get_vm_area_node which also calls __set_asi_flags. This second call is a NOP. By default, we mark the underlying page allocation as sensitive, even if the VM area is global-nonsensitive. This is just an optimization to avoid unnecessary asi_map etc, since presumably most code has no reason to access vmalloc'd data through the direct map. There are some details of the GFP-flag/VM-flag interaction that are not really obvious, for example: what should happen when callers of __vmalloc explicitly set GFP sensitivity flags? (That function has no VM flags argument). For the moment let's just not block on that and focus on adding the infastructure, though. At the moment, the high-level vmalloc APIs doesn't actually provide a way to conffigure sensitivity, this commit just adds the infrastructure. We'll have to decide how to expose this to allocation sites as we implement more denylist logic. vmap does already allow configuring vm flags. Signed-off-by: Brendan Jackman <jackmanb@google.com> --- mm/vmalloc.c | 29 +++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-)