Message ID | 1470071280-78706-2-git-send-email-thgarnie@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Monday, August 01, 2016 10:07:59 AM Thomas Garnier wrote: > Correctly setup the temporary mapping for hibernation. Previous > implementation assumed the address was aligned on the PGD level. With > KASLR memory randomization enabled, the address is randomized on the PUD > level. This change supports unaligned address up to PMD. > > Signed-off-by: Thomas Garnier <thgarnie@google.com> Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> This code is shared with kexec AFAICS, so it likely is better to push it through tip rather than through the PM tree. > --- > arch/x86/mm/ident_map.c | 18 ++++++++++-------- > 1 file changed, 10 insertions(+), 8 deletions(-) > > diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c > index ec21796..ea1ebf1 100644 > --- a/arch/x86/mm/ident_map.c > +++ b/arch/x86/mm/ident_map.c > @@ -3,15 +3,16 @@ > * included by both the compressed kernel and the regular kernel. > */ > > -static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page, > +static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page, > unsigned long addr, unsigned long end) > { > - addr &= PMD_MASK; > - for (; addr < end; addr += PMD_SIZE) { > - pmd_t *pmd = pmd_page + pmd_index(addr); > + int off = info->kernel_mapping ? pmd_index(__PAGE_OFFSET) : 0; > + > + for (addr &= PMD_MASK; addr < end; addr += PMD_SIZE) { > + pmd_t *pmd = pmd_page + pmd_index(addr) + off; > > if (!pmd_present(*pmd)) > - set_pmd(pmd, __pmd(addr | pmd_flag)); > + set_pmd(pmd, __pmd(addr | info->pmd_flag)); > } > } > > @@ -19,9 +20,10 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page, > unsigned long addr, unsigned long end) > { > unsigned long next; > + int off = info->kernel_mapping ? pud_index(__PAGE_OFFSET) : 0; > > for (; addr < end; addr = next) { > - pud_t *pud = pud_page + pud_index(addr); > + pud_t *pud = pud_page + pud_index(addr) + off; > pmd_t *pmd; > > next = (addr & PUD_MASK) + PUD_SIZE; > @@ -30,13 +32,13 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page, > > if (pud_present(*pud)) { > pmd = pmd_offset(pud, 0); > - ident_pmd_init(info->pmd_flag, pmd, addr, next); > + ident_pmd_init(info, pmd, addr, next); > continue; > } > pmd = (pmd_t *)info->alloc_pgt_page(info->context); > if (!pmd) > return -ENOMEM; > - ident_pmd_init(info->pmd_flag, pmd, addr, next); > + ident_pmd_init(info, pmd, addr, next); > set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); > } > >
On Mon, Aug 1, 2016 at 10:07 AM, Thomas Garnier <thgarnie@google.com> wrote: > Correctly setup the temporary mapping for hibernation. Previous > implementation assumed the address was aligned on the PGD level. With > KASLR memory randomization enabled, the address is randomized on the PUD > level. This change supports unaligned address up to PMD. > > Signed-off-by: Thomas Garnier <thgarnie@google.com> > --- > arch/x86/mm/ident_map.c | 18 ++++++++++-------- > 1 file changed, 10 insertions(+), 8 deletions(-) > > diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c > index ec21796..ea1ebf1 100644 > --- a/arch/x86/mm/ident_map.c > +++ b/arch/x86/mm/ident_map.c > @@ -3,15 +3,16 @@ > * included by both the compressed kernel and the regular kernel. > */ > > -static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page, > +static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page, > unsigned long addr, unsigned long end) > { > - addr &= PMD_MASK; > - for (; addr < end; addr += PMD_SIZE) { > - pmd_t *pmd = pmd_page + pmd_index(addr); > + int off = info->kernel_mapping ? pmd_index(__PAGE_OFFSET) : 0; > + > + for (addr &= PMD_MASK; addr < end; addr += PMD_SIZE) { > + pmd_t *pmd = pmd_page + pmd_index(addr) + off; > > if (!pmd_present(*pmd)) > - set_pmd(pmd, __pmd(addr | pmd_flag)); > + set_pmd(pmd, __pmd(addr | info->pmd_flag)); > } > } > > @@ -19,9 +20,10 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page, > unsigned long addr, unsigned long end) > { > unsigned long next; > + int off = info->kernel_mapping ? pud_index(__PAGE_OFFSET) : 0; > > for (; addr < end; addr = next) { > - pud_t *pud = pud_page + pud_index(addr); > + pud_t *pud = pud_page + pud_index(addr) + off; > pmd_t *pmd; > > next = (addr & PUD_MASK) + PUD_SIZE; Is there any chance for (pud_index(addr) + off) or (pmd_index(addr) + off) bigger than 512? Looks like we need to change the loop from phys address to virtual address instead. to avoid the overflow. Thanks Yinghai
On Tue, Aug 2, 2016 at 10:36 AM, Yinghai Lu <yinghai@kernel.org> wrote: > On Mon, Aug 1, 2016 at 10:07 AM, Thomas Garnier <thgarnie@google.com> wrote: >> Correctly setup the temporary mapping for hibernation. Previous >> implementation assumed the address was aligned on the PGD level. With >> KASLR memory randomization enabled, the address is randomized on the PUD >> level. This change supports unaligned address up to PMD. >> >> Signed-off-by: Thomas Garnier <thgarnie@google.com> >> --- >> arch/x86/mm/ident_map.c | 18 ++++++++++-------- >> 1 file changed, 10 insertions(+), 8 deletions(-) >> >> diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c >> index ec21796..ea1ebf1 100644 >> --- a/arch/x86/mm/ident_map.c >> +++ b/arch/x86/mm/ident_map.c >> @@ -3,15 +3,16 @@ >> * included by both the compressed kernel and the regular kernel. >> */ >> >> -static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page, >> +static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page, >> unsigned long addr, unsigned long end) >> { >> - addr &= PMD_MASK; >> - for (; addr < end; addr += PMD_SIZE) { >> - pmd_t *pmd = pmd_page + pmd_index(addr); >> + int off = info->kernel_mapping ? pmd_index(__PAGE_OFFSET) : 0; >> + >> + for (addr &= PMD_MASK; addr < end; addr += PMD_SIZE) { >> + pmd_t *pmd = pmd_page + pmd_index(addr) + off; >> >> if (!pmd_present(*pmd)) >> - set_pmd(pmd, __pmd(addr | pmd_flag)); >> + set_pmd(pmd, __pmd(addr | info->pmd_flag)); >> } >> } >> >> @@ -19,9 +20,10 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page, >> unsigned long addr, unsigned long end) >> { >> unsigned long next; >> + int off = info->kernel_mapping ? pud_index(__PAGE_OFFSET) : 0; >> >> for (; addr < end; addr = next) { >> - pud_t *pud = pud_page + pud_index(addr); >> + pud_t *pud = pud_page + pud_index(addr) + off; >> pmd_t *pmd; >> >> next = (addr & PUD_MASK) + PUD_SIZE; > > Is there any chance for (pud_index(addr) + off) or (pmd_index(addr) + off) > bigger than 512? > > Looks like we need to change the loop from phys address to virtual > address instead. > to avoid the overflow. > That's a good point. I will take a look at it. > Thanks > > Yinghai
On Mon, Aug 1, 2016 at 5:36 PM, Rafael J. Wysocki <rjw@rjwysocki.net> wrote: > On Monday, August 01, 2016 10:07:59 AM Thomas Garnier wrote: >> Correctly setup the temporary mapping for hibernation. Previous >> implementation assumed the address was aligned on the PGD level. With >> KASLR memory randomization enabled, the address is randomized on the PUD >> level. This change supports unaligned address up to PMD. > > This code is shared with kexec AFAICS, so it likely is better to push it > through tip rather than through the PM tree. Only calling path via arch/x86/power/hibernate_64.c have kernel_mapping = true; other two paths: arch/x86/boot/compressed/pagetable.c and arch/x86/kernel/machine_kexec_64.c all have kernel_mapping as false. maybe that path need simplified kernel_physical_mapping_init() instead? Thanks Yinghai
diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c index ec21796..ea1ebf1 100644 --- a/arch/x86/mm/ident_map.c +++ b/arch/x86/mm/ident_map.c @@ -3,15 +3,16 @@ * included by both the compressed kernel and the regular kernel. */ -static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page, +static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page, unsigned long addr, unsigned long end) { - addr &= PMD_MASK; - for (; addr < end; addr += PMD_SIZE) { - pmd_t *pmd = pmd_page + pmd_index(addr); + int off = info->kernel_mapping ? pmd_index(__PAGE_OFFSET) : 0; + + for (addr &= PMD_MASK; addr < end; addr += PMD_SIZE) { + pmd_t *pmd = pmd_page + pmd_index(addr) + off; if (!pmd_present(*pmd)) - set_pmd(pmd, __pmd(addr | pmd_flag)); + set_pmd(pmd, __pmd(addr | info->pmd_flag)); } } @@ -19,9 +20,10 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page, unsigned long addr, unsigned long end) { unsigned long next; + int off = info->kernel_mapping ? pud_index(__PAGE_OFFSET) : 0; for (; addr < end; addr = next) { - pud_t *pud = pud_page + pud_index(addr); + pud_t *pud = pud_page + pud_index(addr) + off; pmd_t *pmd; next = (addr & PUD_MASK) + PUD_SIZE; @@ -30,13 +32,13 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page, if (pud_present(*pud)) { pmd = pmd_offset(pud, 0); - ident_pmd_init(info->pmd_flag, pmd, addr, next); + ident_pmd_init(info, pmd, addr, next); continue; } pmd = (pmd_t *)info->alloc_pgt_page(info->context); if (!pmd) return -ENOMEM; - ident_pmd_init(info->pmd_flag, pmd, addr, next); + ident_pmd_init(info, pmd, addr, next); set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); }
Correctly setup the temporary mapping for hibernation. Previous implementation assumed the address was aligned on the PGD level. With KASLR memory randomization enabled, the address is randomized on the PUD level. This change supports unaligned address up to PMD. Signed-off-by: Thomas Garnier <thgarnie@google.com> --- arch/x86/mm/ident_map.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-)