@@ -37,6 +37,7 @@ struct vmemmap_remap_walk {
unsigned long reuse_addr;
struct list_head *vmemmap_pages;
#define VMEMMAP_SPLIT_NO_TLB_FLUSH BIT(0)
+#define VMEMMAP_REMAP_NO_TLB_FLUSH BIT(1)
unsigned long flags;
};
@@ -211,7 +212,7 @@ static int vmemmap_remap_range(unsigned long start, unsigned long end,
return ret;
} while (pgd++, addr = next, addr != end);
- if (walk->remap_pte)
+ if (walk->remap_pte && !(walk->flags & VMEMMAP_REMAP_NO_TLB_FLUSH))
flush_tlb_kernel_range(start, end);
return 0;
@@ -355,19 +356,21 @@ static int vmemmap_remap_split(unsigned long start, unsigned long end,
* @reuse: reuse address.
* @vmemmap_pages: list to deposit vmemmap pages to be freed. It is callers
* responsibility to free pages.
+ * @flags: modifications to vmemmap_remap_walk flags
*
* Return: %0 on success, negative error code otherwise.
*/
static int vmemmap_remap_free(unsigned long start, unsigned long end,
unsigned long reuse,
- struct list_head *vmemmap_pages)
+ struct list_head *vmemmap_pages,
+ unsigned long flags)
{
int ret;
struct vmemmap_remap_walk walk = {
.remap_pte = vmemmap_remap_pte,
.reuse_addr = reuse,
.vmemmap_pages = vmemmap_pages,
- .flags = 0,
+ .flags = flags,
};
int nid = page_to_nid((struct page *)start);
gfp_t gfp_mask = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
@@ -628,7 +631,8 @@ static bool vmemmap_should_optimize(const struct hstate *h, const struct page *h
static int __hugetlb_vmemmap_optimize(const struct hstate *h,
struct page *head,
- struct list_head *vmemmap_pages)
+ struct list_head *vmemmap_pages,
+ unsigned long flags)
{
int ret = 0;
unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
@@ -639,6 +643,18 @@ static int __hugetlb_vmemmap_optimize(const struct hstate *h,
return ret;
static_branch_inc(&hugetlb_optimize_vmemmap_key);
+ /*
+ * Very Subtle
+ * If VMEMMAP_REMAP_NO_TLB_FLUSH is set, TLB flushing is not performed
+ * immediately after remapping. As a result, subsequent accesses
+ * and modifications to struct pages associated with the hugetlb
+ * page could be to the OLD struct pages. Set the vmemmap optimized
+ * flag here so that it is copied to the new head page. This keeps
+ * the old and new struct pages in sync.
+ * If there is an error during optimization, we will immediately FLUSH
+ * the TLB and clear the flag below.
+ */
+ SetHPageVmemmapOptimized(head);
vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h);
vmemmap_reuse = vmemmap_start;
@@ -650,11 +666,12 @@ static int __hugetlb_vmemmap_optimize(const struct hstate *h,
* mapping the range to vmemmap_pages list so that they can be freed by
* the caller.
*/
- ret = vmemmap_remap_free(vmemmap_start, vmemmap_end, vmemmap_reuse, vmemmap_pages);
- if (ret)
+ ret = vmemmap_remap_free(vmemmap_start, vmemmap_end, vmemmap_reuse,
+ vmemmap_pages, flags);
+ if (ret) {
static_branch_dec(&hugetlb_optimize_vmemmap_key);
- else
- SetHPageVmemmapOptimized(head);
+ ClearHPageVmemmapOptimized(head);
+ }
return ret;
}
@@ -673,7 +690,7 @@ void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head)
{
LIST_HEAD(vmemmap_pages);
- __hugetlb_vmemmap_optimize(h, head, &vmemmap_pages);
+ __hugetlb_vmemmap_optimize(h, head, &vmemmap_pages, 0);
free_vmemmap_page_list(&vmemmap_pages);
}
@@ -708,19 +725,24 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l
list_for_each_entry(folio, folio_list, lru) {
int ret = __hugetlb_vmemmap_optimize(h, &folio->page,
- &vmemmap_pages);
+ &vmemmap_pages,
+ VMEMMAP_REMAP_NO_TLB_FLUSH);
/*
* Pages may have been accumulated, thus free what we have
* and try again.
*/
if (ret == -ENOMEM) {
+ flush_tlb_all();
free_vmemmap_page_list(&vmemmap_pages);
INIT_LIST_HEAD(&vmemmap_pages);
- __hugetlb_vmemmap_optimize(h, &folio->page, &vmemmap_pages);
+ __hugetlb_vmemmap_optimize(h, &folio->page,
+ &vmemmap_pages,
+ VMEMMAP_REMAP_NO_TLB_FLUSH);
}
}
+ flush_tlb_all();
free_vmemmap_page_list(&vmemmap_pages);
}