Message ID | 1453889591-30968-10-git-send-email-david@gibson.dropbear.id.au (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Wed, 2016-01-27 at 21:13 +1100, David Gibson wrote: > h_enter() in the spapr code needs to know the page size of the HPTE > it's > about to insert. Unlike other paths that do this, it doesn't have > access > to the SLB, so at the moment it determines this with some open-coded > tests which assume POWER7 or POWER8 page size encodings. > > To make this more flexible add ppc_hash64_hpte_page_shift_noslb() to > determine both the "base" page size per segment, and the individual > effective page size from an HPTE alone. > > This means that the spapr code should now be able to handle any page > size > listed in the env->sps table. > > Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> > --- > hw/ppc/spapr_hcall.c | 25 ++++++------------------- > target-ppc/mmu-hash64.c | 35 +++++++++++++++++++++++++++++++++++ > target-ppc/mmu-hash64.h | 3 +++ > 3 files changed, 44 insertions(+), 19 deletions(-) > > diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c > index dedc7e0..a535c73 100644 > --- a/hw/ppc/spapr_hcall.c > +++ b/hw/ppc/spapr_hcall.c > @@ -72,31 +72,18 @@ static target_ulong h_enter(PowerPCCPU *cpu, > sPAPRMachineState *spapr, > target_ulong pte_index = args[1]; > target_ulong pteh = args[2]; > target_ulong ptel = args[3]; > - target_ulong page_shift = 12; > + unsigned apshift, spshift; > target_ulong raddr; > target_ulong index; > uint64_t token; > > - /* only handle 4k and 16M pages for now */ > - if (pteh & HPTE64_V_LARGE) { > -#if 0 /* We don't support 64k pages yet */ > - if ((ptel & 0xf000) == 0x1000) { > - /* 64k page */ > - } else > -#endif > - if ((ptel & 0xff000) == 0) { > - /* 16M page */ > - page_shift = 24; > - /* lowest AVA bit must be 0 for 16M pages */ > - if (pteh & 0x80) { > - return H_PARAMETER; > - } > - } else { > - return H_PARAMETER; > - } > + apshift = ppc_hash64_hpte_page_shift_noslb(cpu, pteh, ptel, > &spshift); > + if (!apshift) { > + /* Bad page size encoding */ > + return H_PARAMETER; > } > > - raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << page_shift) - 1); > + raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << apshift) - 1); > > if (is_ram_address(spapr, raddr)) { > /* Regular RAM - should have WIMG=0010 */ > diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c > index 3284776..19ee942 100644 > --- a/target-ppc/mmu-hash64.c > +++ b/target-ppc/mmu-hash64.c > @@ -512,6 +512,41 @@ static unsigned hpte_page_shift(const struct > ppc_one_seg_page_size *sps, > return 0; /* Bad page size encoding */ > } > > +unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu, > + uint64_t pte0, uint64_t > pte1, > + unsigned *seg_page_shift) > +{ > + CPUPPCState *env = &cpu->env; > + int i; > + > + if (!(pte0 & HPTE64_V_LARGE)) { > + *seg_page_shift = 12; > + return 12; > + } > + > + /* > + * The encodings in env->sps need to be carefully chosen so that > + * this gives an unambiguous result. > + */ > + for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { > + const struct ppc_one_seg_page_size *sps = &env->sps.sps[i]; > + unsigned shift; > + > + if (!sps->page_shift) { > + break; > + } > + > + shift = hpte_page_shift(sps, pte0, pte1); > + if (shift) { > + *seg_page_shift = sps->page_shift; > + return shift; > + } > + } > + > + *seg_page_shift = 0; > + return 0; > +} > + > int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr, > int rwx, int mmu_idx) > { > diff --git a/target-ppc/mmu-hash64.h b/target-ppc/mmu-hash64.h > index 293a951..34cf975 100644 > --- a/target-ppc/mmu-hash64.h > +++ b/target-ppc/mmu-hash64.h > @@ -16,6 +16,9 @@ void ppc_hash64_store_hpte(PowerPCCPU *cpu, > target_ulong index, > void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, > target_ulong pte_index, > target_ulong pte0, target_ulong > pte1); > +unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu, > + uint64_t pte0, uint64_t > pte1, > + unsigned *seg_page_shift); > #endif > > /*
diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c index dedc7e0..a535c73 100644 --- a/hw/ppc/spapr_hcall.c +++ b/hw/ppc/spapr_hcall.c @@ -72,31 +72,18 @@ static target_ulong h_enter(PowerPCCPU *cpu, sPAPRMachineState *spapr, target_ulong pte_index = args[1]; target_ulong pteh = args[2]; target_ulong ptel = args[3]; - target_ulong page_shift = 12; + unsigned apshift, spshift; target_ulong raddr; target_ulong index; uint64_t token; - /* only handle 4k and 16M pages for now */ - if (pteh & HPTE64_V_LARGE) { -#if 0 /* We don't support 64k pages yet */ - if ((ptel & 0xf000) == 0x1000) { - /* 64k page */ - } else -#endif - if ((ptel & 0xff000) == 0) { - /* 16M page */ - page_shift = 24; - /* lowest AVA bit must be 0 for 16M pages */ - if (pteh & 0x80) { - return H_PARAMETER; - } - } else { - return H_PARAMETER; - } + apshift = ppc_hash64_hpte_page_shift_noslb(cpu, pteh, ptel, &spshift); + if (!apshift) { + /* Bad page size encoding */ + return H_PARAMETER; } - raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << page_shift) - 1); + raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << apshift) - 1); if (is_ram_address(spapr, raddr)) { /* Regular RAM - should have WIMG=0010 */ diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c index 3284776..19ee942 100644 --- a/target-ppc/mmu-hash64.c +++ b/target-ppc/mmu-hash64.c @@ -512,6 +512,41 @@ static unsigned hpte_page_shift(const struct ppc_one_seg_page_size *sps, return 0; /* Bad page size encoding */ } +unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu, + uint64_t pte0, uint64_t pte1, + unsigned *seg_page_shift) +{ + CPUPPCState *env = &cpu->env; + int i; + + if (!(pte0 & HPTE64_V_LARGE)) { + *seg_page_shift = 12; + return 12; + } + + /* + * The encodings in env->sps need to be carefully chosen so that + * this gives an unambiguous result. + */ + for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { + const struct ppc_one_seg_page_size *sps = &env->sps.sps[i]; + unsigned shift; + + if (!sps->page_shift) { + break; + } + + shift = hpte_page_shift(sps, pte0, pte1); + if (shift) { + *seg_page_shift = sps->page_shift; + return shift; + } + } + + *seg_page_shift = 0; + return 0; +} + int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr, int rwx, int mmu_idx) { diff --git a/target-ppc/mmu-hash64.h b/target-ppc/mmu-hash64.h index 293a951..34cf975 100644 --- a/target-ppc/mmu-hash64.h +++ b/target-ppc/mmu-hash64.h @@ -16,6 +16,9 @@ void ppc_hash64_store_hpte(PowerPCCPU *cpu, target_ulong index, void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong pte_index, target_ulong pte0, target_ulong pte1); +unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu, + uint64_t pte0, uint64_t pte1, + unsigned *seg_page_shift); #endif /*
h_enter() in the spapr code needs to know the page size of the HPTE it's about to insert. Unlike other paths that do this, it doesn't have access to the SLB, so at the moment it determines this with some open-coded tests which assume POWER7 or POWER8 page size encodings. To make this more flexible add ppc_hash64_hpte_page_shift_noslb() to determine both the "base" page size per segment, and the individual effective page size from an HPTE alone. This means that the spapr code should now be able to handle any page size listed in the env->sps table. Signed-off-by: David Gibson <david@gibson.dropbear.id.au> --- hw/ppc/spapr_hcall.c | 25 ++++++------------------- target-ppc/mmu-hash64.c | 35 +++++++++++++++++++++++++++++++++++ target-ppc/mmu-hash64.h | 3 +++ 3 files changed, 44 insertions(+), 19 deletions(-)