@@ -162,6 +162,7 @@ static inline int page_reset_referenced(unsigned long addr)
#define _PAGE_ACC_BITS 0xf0 /* HW access control bits */
struct page;
+struct folio;
void arch_free_page(struct page *page, int order);
void arch_alloc_page(struct page *page, int order);
void arch_set_page_dat(struct page *page, int order);
@@ -175,8 +176,8 @@ static inline int devmem_is_allowed(unsigned long pfn)
#define HAVE_ARCH_ALLOC_PAGE
#if IS_ENABLED(CONFIG_PGSTE)
-int arch_make_page_accessible(struct page *page);
-#define HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
+int arch_make_folio_accessible(struct folio *folio);
+#define arch_make_folio_accessible arch_make_folio_accessible
#endif
#define __PAGE_OFFSET 0x0UL
@@ -426,46 +426,58 @@ int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
EXPORT_SYMBOL_GPL(gmap_destroy_page);
/*
- * To be called with the page locked or with an extra reference! This will
- * prevent gmap_make_secure from touching the page concurrently. Having 2
- * parallel make_page_accessible is fine, as the UV calls will become a
- * no-op if the page is already exported.
+ * To be called with the folio locked or with an extra reference! This will
+ * prevent gmap_make_secure from touching the folio concurrently. Having 2
+ * parallel make_folio_accessible is fine, as the UV calls will become a
+ * no-op if the folio is already exported.
+ *
+ * Returns 0 on success or negative errno.
*/
-int arch_make_page_accessible(struct page *page)
+int arch_make_folio_accessible(struct folio *folio)
{
- int rc = 0;
+ unsigned long i, nr = folio_nr_pages(folio);
+ unsigned long pfn = folio_pfn(folio);
+ int err = 0;
/* Hugepage cannot be protected, so nothing to do */
- if (PageHuge(page))
+ if (folio_test_hugetlb(folio))
return 0;
/*
* PG_arch_1 is used in 3 places:
* 1. for kernel page tables during early boot
* 2. for storage keys of huge pages and KVM
- * 3. As an indication that this page might be secure. This can
+ * 3. As an indication that this folio might be secure. This can
* overindicate, e.g. we set the bit before calling
* convert_to_secure.
* As secure pages are never huge, all 3 variants can co-exists.
*/
- if (!test_bit(PG_arch_1, &page->flags))
+ if (!test_bit(PG_arch_1, &folio->flags))
return 0;
- rc = uv_pin_shared(page_to_phys(page));
- if (!rc) {
- clear_bit(PG_arch_1, &page->flags);
+ for (i = 0; i < nr; i++) {
+ err = uv_pin_shared((pfn + i) * PAGE_SIZE);
+ if (err)
+ break;
+ }
+ if (!err) {
+ clear_bit(PG_arch_1, &folio->flags);
return 0;
}
- rc = uv_convert_from_secure(page_to_phys(page));
- if (!rc) {
- clear_bit(PG_arch_1, &page->flags);
+ for (i = 0; i < nr; i++) {
+ err = uv_convert_from_secure((pfn + i) * PAGE_SIZE);
+ if (err)
+ break;
+ }
+ if (!err) {
+ clear_bit(PG_arch_1, &folio->flags);
return 0;
}
- return rc;
+ return err;
}
-EXPORT_SYMBOL_GPL(arch_make_page_accessible);
+EXPORT_SYMBOL_GPL(arch_make_folio_accessible);
#endif
@@ -588,6 +588,7 @@ void do_secure_storage_access(struct pt_regs *regs)
struct vm_area_struct *vma;
struct mm_struct *mm;
struct page *page;
+ struct folio *folio;
struct gmap *gmap;
int rc;
@@ -643,17 +644,17 @@ void do_secure_storage_access(struct pt_regs *regs)
mmap_read_unlock(mm);
break;
}
- if (arch_make_page_accessible(page))
+ folio = page_folio(page);
+ if (arch_make_folio_accessible(folio))
send_sig(SIGSEGV, current, 0);
- put_page(page);
+ folio_put(folio);
mmap_read_unlock(mm);
break;
case KERNEL_FAULT:
- page = phys_to_page(addr);
- if (unlikely(!try_get_page(page)))
- break;
- rc = arch_make_page_accessible(page);
- put_page(page);
+ folio = page_folio(phys_to_page(addr));
+ folio_get(folio);
+ rc = arch_make_folio_accessible(folio);
+ folio_put(folio);
if (rc)
BUG();
break;
@@ -2139,26 +2139,10 @@ static inline int folio_estimated_sharers(struct folio *folio)
return page_mapcount(folio_page(folio, 0));
}
-#ifndef HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
-static inline int arch_make_page_accessible(struct page *page)
-{
- return 0;
-}
-#endif
-
-#ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
+#ifndef arch_make_folio_accessible
static inline int arch_make_folio_accessible(struct folio *folio)
{
- int ret;
- long i, nr = folio_nr_pages(folio);
-
- for (i = 0; i < nr; i++) {
- ret = arch_make_page_accessible(folio_page(folio, i));
- if (ret)
- break;
- }
-
- return ret;
+ return 0;
}
#endif
With all users now using arch_make_folio_accessible(), move the loop over each page from common code into the only implementation. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- arch/s390/include/asm/page.h | 5 ++-- arch/s390/kernel/uv.c | 46 +++++++++++++++++++++++------------- arch/s390/mm/fault.c | 15 ++++++------ include/linux/mm.h | 20 ++-------------- 4 files changed, 42 insertions(+), 44 deletions(-)