@@ -12,10 +12,25 @@
#include <linux/types.h>
#include <asm/pgtable-types.h>
+/*
+ * trans_alloc_page
+ * - Allocator that should return exactly one zeroed page, if this
+ * allocator fails, trans_pgd_create_copy() and trans_pgd_map_page()
+ * return -ENOMEM error.
+ *
+ * trans_alloc_arg
+ * - Passed to trans_alloc_page as an argument
+ */
+
+struct trans_pgd_info {
+ void * (*trans_alloc_page)(void *arg);
+ void *trans_alloc_arg;
+};
+
int trans_pgd_create_copy(pgd_t **dst_pgdp, unsigned long start,
unsigned long end);
-int trans_pgd_map_page(pgd_t *trans_pgd, void *page, unsigned long dst_addr,
- pgprot_t pgprot);
+int trans_pgd_map_page(struct trans_pgd_info *info, pgd_t *trans_pgd,
+ void *page, unsigned long dst_addr, pgprot_t pgprot);
#endif /* _ASM_TRANS_TABLE_H */
@@ -176,6 +176,11 @@ int arch_hibernation_header_restore(void *addr)
}
EXPORT_SYMBOL(arch_hibernation_header_restore);
+static void *hibernate_page_alloc(void *arg)
+{
+ return (void *)get_safe_page((gfp_t)(unsigned long)arg);
+}
+
/*
* Copies length bytes, starting at src_start into an new page,
* perform cache maintenance, then maps it at the specified address low
@@ -192,6 +197,11 @@ static int create_safe_exec_page(void *src_start, size_t length,
unsigned long dst_addr,
phys_addr_t *phys_dst_addr)
{
+ struct trans_pgd_info trans_info = {
+ .trans_alloc_page = hibernate_page_alloc,
+ .trans_alloc_arg = (void *)GFP_ATOMIC,
+ };
+
void *page = (void *)get_safe_page(GFP_ATOMIC);
pgd_t *trans_pgd;
int rc;
@@ -206,7 +216,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
if (!trans_pgd)
return -ENOMEM;
- rc = trans_pgd_map_page(trans_pgd, page, dst_addr,
+ rc = trans_pgd_map_page(&trans_info, trans_pgd, page, dst_addr,
PAGE_KERNEL_EXEC);
if (rc)
return rc;
@@ -25,6 +25,11 @@
#include <linux/mm.h>
#include <linux/mmzone.h>
+static void *trans_alloc(struct trans_pgd_info *info)
+{
+ return info->trans_alloc_page(info->trans_alloc_arg);
+}
+
static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr)
{
pte_t pte = READ_ONCE(*src_ptep);
@@ -201,9 +206,18 @@ int trans_pgd_create_copy(pgd_t **dst_pgdp, unsigned long start,
return rc;
}
-int trans_pgd_map_page(pgd_t *trans_pgd, void *page,
- unsigned long dst_addr,
- pgprot_t pgprot)
+/*
+ * Add map entry to trans_pgd for a base-size page at PTE level.
+ * info: contains allocator and its argument
+ * trans_pgd: page table in which new map is added.
+ * page: page to be mapped.
+ * dst_addr: new VA address for the page
+ * pgprot: protection for the page.
+ *
+ * Returns 0 on success, and -ENOMEM on failure.
+ */
+int trans_pgd_map_page(struct trans_pgd_info *info, pgd_t *trans_pgd,
+ void *page, unsigned long dst_addr, pgprot_t pgprot)
{
pgd_t *pgdp;
p4d_t *p4dp;
@@ -213,7 +227,7 @@ int trans_pgd_map_page(pgd_t *trans_pgd, void *page,
pgdp = pgd_offset_pgd(trans_pgd, dst_addr);
if (pgd_none(READ_ONCE(*pgdp))) {
- p4dp = (void *)get_safe_page(GFP_ATOMIC);
+ p4dp = trans_alloc(info);
if (!pgdp)
return -ENOMEM;
pgd_populate(&init_mm, pgdp, p4dp);
@@ -221,7 +235,7 @@ int trans_pgd_map_page(pgd_t *trans_pgd, void *page,
p4dp = p4d_offset(pgdp, dst_addr);
if (p4d_none(READ_ONCE(*p4dp))) {
- pudp = (void *)get_safe_page(GFP_ATOMIC);
+ pudp = trans_alloc(info);
if (!pudp)
return -ENOMEM;
p4d_populate(&init_mm, p4dp, pudp);
@@ -229,7 +243,7 @@ int trans_pgd_map_page(pgd_t *trans_pgd, void *page,
pudp = pud_offset(p4dp, dst_addr);
if (pud_none(READ_ONCE(*pudp))) {
- pmdp = (void *)get_safe_page(GFP_ATOMIC);
+ pmdp = trans_alloc(info);
if (!pmdp)
return -ENOMEM;
pud_populate(&init_mm, pudp, pmdp);
@@ -237,14 +251,14 @@ int trans_pgd_map_page(pgd_t *trans_pgd, void *page,
pmdp = pmd_offset(pudp, dst_addr);
if (pmd_none(READ_ONCE(*pmdp))) {
- ptep = (void *)get_safe_page(GFP_ATOMIC);
+ ptep = trans_alloc(info);
if (!ptep)
return -ENOMEM;
pmd_populate_kernel(&init_mm, pmdp, ptep);
}
ptep = pte_offset_kernel(pmdp, dst_addr);
- set_pte(ptep, pfn_pte(virt_to_pfn(page), PAGE_KERNEL_EXEC));
+ set_pte(ptep, pfn_pte(virt_to_pfn(page), pgprot));
return 0;
}