Message ID | 20180720084453.116825-9-frankja@linux.ibm.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Fri, 20 Jul 2018 09:44:48 +0100 Janosch Frank <frankja@linux.ibm.com> wrote: > Similarly to the pte skey handling, where we set the storage key to > the default key for each newly mapped pte, we have to also do that for > huge pmds. > > With the PG_arch_1 flag we keep track if the area has already been > cleared of its skeys. > > Signed-off-by: Janosch Frank <frankja@linux.ibm.com> > --- > arch/s390/mm/gmap.c | 2 ++ > arch/s390/mm/hugetlbpage.c | 24 ++++++++++++++++++++++++ > 2 files changed, 26 insertions(+) > > diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c > index 6194a8a62aa0..409bc8f9d5b6 100644 > --- a/arch/s390/mm/gmap.c > +++ b/arch/s390/mm/gmap.c > @@ -2554,6 +2554,7 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr, > { > pmd_t *pmd = (pmd_t *)pte; > unsigned long start, end; > + struct page *page = pmd_page(*pmd); > > /* > * The write check makes sure we do not set a key on shared > @@ -2568,6 +2569,7 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr, > start = pmd_val(*pmd) & HPAGE_MASK; > end = start + HPAGE_SIZE - 1; > __storage_key_init_range(start, end); > + set_bit(PG_arch_1, &page->flags); > return 0; > } > > diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c > index e804090f4470..b0246c705a19 100644 > --- a/arch/s390/mm/hugetlbpage.c > +++ b/arch/s390/mm/hugetlbpage.c > @@ -123,6 +123,29 @@ static inline pte_t __rste_to_pte(unsigned long rste) > return pte; > } > > +static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste) > +{ > + struct page *page; > + unsigned long size, paddr; > + > + if (!mm_uses_skeys(mm) || > + rste & _SEGMENT_ENTRY_INVALID) > + return; > + > + if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) { > + page = pud_page(__pud(rste)); > + size = PUD_SIZE; > + paddr = rste & PUD_MASK; > + } else { > + page = pmd_page(__pmd(rste)); > + size = PMD_SIZE; > + paddr = rste & PMD_MASK; > + } > + > + if (!test_and_set_bit(PG_arch_1, &page->flags)) > + __storage_key_init_range(paddr, paddr + size - 1); > +} > + > void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, > pte_t *ptep, pte_t pte) > { > @@ -137,6 +160,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, > rste |= _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE; > else > rste |= _SEGMENT_ENTRY_LARGE; > + clear_huge_pte_skeys(mm, rste); > pte_val(*ptep) = rste; > } > That looks promising but I miss the hunk for arch_clear_hugepage_flags() to reset the PG_arch_1 bit again when the huge page is freed.
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 6194a8a62aa0..409bc8f9d5b6 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -2554,6 +2554,7 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr, { pmd_t *pmd = (pmd_t *)pte; unsigned long start, end; + struct page *page = pmd_page(*pmd); /* * The write check makes sure we do not set a key on shared @@ -2568,6 +2569,7 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr, start = pmd_val(*pmd) & HPAGE_MASK; end = start + HPAGE_SIZE - 1; __storage_key_init_range(start, end); + set_bit(PG_arch_1, &page->flags); return 0; } diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index e804090f4470..b0246c705a19 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -123,6 +123,29 @@ static inline pte_t __rste_to_pte(unsigned long rste) return pte; } +static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste) +{ + struct page *page; + unsigned long size, paddr; + + if (!mm_uses_skeys(mm) || + rste & _SEGMENT_ENTRY_INVALID) + return; + + if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) { + page = pud_page(__pud(rste)); + size = PUD_SIZE; + paddr = rste & PUD_MASK; + } else { + page = pmd_page(__pmd(rste)); + size = PMD_SIZE; + paddr = rste & PMD_MASK; + } + + if (!test_and_set_bit(PG_arch_1, &page->flags)) + __storage_key_init_range(paddr, paddr + size - 1); +} + void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { @@ -137,6 +160,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, rste |= _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE; else rste |= _SEGMENT_ENTRY_LARGE; + clear_huge_pte_skeys(mm, rste); pte_val(*ptep) = rste; }
Similarly to the pte skey handling, where we set the storage key to the default key for each newly mapped pte, we have to also do that for huge pmds. With the PG_arch_1 flag we keep track if the area has already been cleared of its skeys. Signed-off-by: Janosch Frank <frankja@linux.ibm.com> --- arch/s390/mm/gmap.c | 2 ++ arch/s390/mm/hugetlbpage.c | 24 ++++++++++++++++++++++++ 2 files changed, 26 insertions(+)