@@ -132,11 +132,20 @@ static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk,
pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
}
-static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
+static void ___pcpu_unmap_pages(unsigned long addr, int nr_pages)
{
vunmap_range_noflush(addr, addr + (nr_pages << PAGE_SHIFT));
}
+static void __pcpu_unmap_pages(unsigned long addr, int nr_pages,
+ unsigned long vm_flags)
+{
+ unsigned long size = nr_pages << PAGE_SHIFT;
+
+ asi_unmap(ASI_GLOBAL_NONSENSITIVE, (void *)addr, size);
+ ___pcpu_unmap_pages(addr, nr_pages);
+}
+
/**
* pcpu_unmap_pages - unmap pages out of a pcpu_chunk
* @chunk: chunk of interest
@@ -153,6 +162,8 @@ static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
static void pcpu_unmap_pages(struct pcpu_chunk *chunk,
struct page **pages, int page_start, int page_end)
{
+ struct vm_struct **vms = (struct vm_struct **)chunk->data;
+ unsigned long vm_flags = vms ? vms[0]->flags : VM_ALLOC;
unsigned int cpu;
int i;
@@ -165,7 +176,7 @@ static void pcpu_unmap_pages(struct pcpu_chunk *chunk,
pages[pcpu_page_idx(cpu, i)] = page;
}
__pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start),
- page_end - page_start);
+ page_end - page_start, vm_flags);
}
}
@@ -190,13 +201,38 @@ static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
}
-static int __pcpu_map_pages(unsigned long addr, struct page **pages,
- int nr_pages)
+/*
+ * __pcpu_map_pages() should not be called during the percpu initialization,
+ * as asi_map() depends on the page allocator (which isn't available yet
+ * during percpu initialization). Instead, ___pcpu_map_pages() can be used
+ * during the percpu initialization. But, any pages that are mapped with
+ * ___pcpu_map_pages() will be treated as sensitive memory, unless
+ * they are explicitly mapped with asi_map() later.
+ */
+static int ___pcpu_map_pages(unsigned long addr, struct page **pages,
+ int nr_pages)
{
return vmap_pages_range_noflush(addr, addr + (nr_pages << PAGE_SHIFT),
PAGE_KERNEL, pages, PAGE_SHIFT);
}
+static int __pcpu_map_pages(unsigned long addr, struct page **pages,
+ int nr_pages, unsigned long vm_flags)
+{
+ unsigned long size = nr_pages << PAGE_SHIFT;
+ int err;
+
+ err = ___pcpu_map_pages(addr, pages, nr_pages);
+ if (err)
+ return err;
+
+ /*
+ * If this fails, pcpu_map_pages()->__pcpu_unmap_pages() will call
+ * asi_unmap() and clean up any partial mappings.
+ */
+ return asi_map(ASI_GLOBAL_NONSENSITIVE, (void *)addr, size);
+}
+
/**
* pcpu_map_pages - map pages into a pcpu_chunk
* @chunk: chunk of interest
@@ -214,13 +250,15 @@ static int __pcpu_map_pages(unsigned long addr, struct page **pages,
static int pcpu_map_pages(struct pcpu_chunk *chunk,
struct page **pages, int page_start, int page_end)
{
+ struct vm_struct **vms = (struct vm_struct **)chunk->data;
+ unsigned long vm_flags = vms ? vms[0]->flags : VM_ALLOC;
unsigned int cpu, tcpu;
int i, err;
for_each_possible_cpu(cpu) {
err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start),
&pages[pcpu_page_idx(cpu, page_start)],
- page_end - page_start);
+ page_end - page_start, vm_flags);
if (err < 0)
goto err;
@@ -232,7 +270,7 @@ static int pcpu_map_pages(struct pcpu_chunk *chunk,
err:
for_each_possible_cpu(tcpu) {
__pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
- page_end - page_start);
+ page_end - page_start, vm_flags);
if (tcpu == cpu)
break;
}
@@ -3328,8 +3328,8 @@ int __init pcpu_page_first_chunk(size_t reserved_size, pcpu_fc_cpu_to_node_fn_t
pcpu_populate_pte(unit_addr + (i << PAGE_SHIFT));
/* pte already populated, the following shouldn't fail */
- rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
- unit_pages);
+ rc = ___pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
+ unit_pages);
if (rc < 0)
panic("failed to map percpu area, err=%d\n", rc);