Message ID | 1454059965-23402-5-git-send-email-a.rigo@virtualopensystems.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Alvise Rigo <a.rigo@virtualopensystems.com> writes: > Attempting to simplify the helper_*_st_name, wrap the code relative to a > RAM access into an inline function. > > Based on this work, Alex proposed the following patch series > https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg01136.html > that reduces code duplication of the softmmu_helpers. > > Suggested-by: Jani Kokkonen <jani.kokkonen@huawei.com> > Suggested-by: Claudio Fontana <claudio.fontana@huawei.com> > Signed-off-by: Alvise Rigo <a.rigo@virtualopensystems.com> > --- > softmmu_template.h | 110 +++++++++++++++++++++++++++++++++-------------------- > 1 file changed, 68 insertions(+), 42 deletions(-) > > diff --git a/softmmu_template.h b/softmmu_template.h > index 3d388ec..6279437 100644 > --- a/softmmu_template.h > +++ b/softmmu_template.h > @@ -416,13 +416,46 @@ static inline void glue(helper_le_st_name, _do_mmio_access)(CPUArchState *env, > glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr); > } > > +static inline void glue(helper_le_st_name, _do_ram_access)(CPUArchState *env, > + DATA_TYPE val, > + target_ulong addr, > + TCGMemOpIdx oi, > + unsigned mmu_idx, > + int index, > + uintptr_t retaddr) > +{ > + uintptr_t haddr; > + > + /* Handle slow unaligned access (it spans two pages or IO). */ > + if (DATA_SIZE > 1 > + && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 > + >= TARGET_PAGE_SIZE)) { > + glue(helper_le_st_name, _do_unl_access)(env, val, addr, oi, mmu_idx, > + retaddr); > + return; > + } > + > + /* Handle aligned access or unaligned access in the same page. */ > + if ((addr & (DATA_SIZE - 1)) != 0 > + && (get_memop(oi) & MO_AMASK) == MO_ALIGN) { > + cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, > + mmu_idx, retaddr); > + } > + > + haddr = addr + env->tlb_table[mmu_idx][index].addend; > +#if DATA_SIZE == 1 > + glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val); > +#else > + glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val); > +#endif > +} > + > void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, > TCGMemOpIdx oi, uintptr_t retaddr) > { > unsigned mmu_idx = get_mmuidx(oi); > int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); > target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; > - uintptr_t haddr; > > /* Adjust the given return address. */ > retaddr -= GETPC_ADJ; > @@ -448,28 +481,8 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, > return; > } > > - /* Handle slow unaligned access (it spans two pages or IO). */ > - if (DATA_SIZE > 1 > - && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 > - >= TARGET_PAGE_SIZE)) { > - glue(helper_le_st_name, _do_unl_access)(env, val, addr, mmu_idx, > - oi, retaddr); > - return; > - } > - > - /* Handle aligned access or unaligned access in the same page. */ > - if ((addr & (DATA_SIZE - 1)) != 0 > - && (get_memop(oi) & MO_AMASK) == MO_ALIGN) { > - cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, > - mmu_idx, retaddr); > - } > - > - haddr = addr + env->tlb_table[mmu_idx][index].addend; > -#if DATA_SIZE == 1 > - glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val); > -#else > - glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val); > -#endif > + glue(helper_le_st_name, _do_ram_access)(env, val, addr, oi, mmu_idx, index, > + retaddr); > } > > #if DATA_SIZE > 1 > @@ -519,13 +532,42 @@ static inline void glue(helper_be_st_name, _do_mmio_access)(CPUArchState *env, > glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr); > } > > +static inline void glue(helper_be_st_name, _do_ram_access)(CPUArchState *env, > + DATA_TYPE val, > + target_ulong addr, > + TCGMemOpIdx oi, > + unsigned mmu_idx, > + int index, > + uintptr_t retaddr) > +{ > + uintptr_t haddr; > + > + /* Handle slow unaligned access (it spans two pages or IO). */ > + if (DATA_SIZE > 1 > + && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 > + >= TARGET_PAGE_SIZE)) { > + glue(helper_be_st_name, _do_unl_access)(env, val, addr, oi, mmu_idx, > + retaddr); > + return; > + } > + > + /* Handle aligned access or unaligned access in the same page. */ > + if ((addr & (DATA_SIZE - 1)) != 0 > + && (get_memop(oi) & MO_AMASK) == MO_ALIGN) { > + cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, > + mmu_idx, retaddr); > + } > + > + haddr = addr + env->tlb_table[mmu_idx][index].addend; > + glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val); > +} > + > void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, > TCGMemOpIdx oi, uintptr_t retaddr) > { > unsigned mmu_idx = get_mmuidx(oi); > int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); > target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; > - uintptr_t haddr; > > /* Adjust the given return address. */ > retaddr -= GETPC_ADJ; > @@ -551,24 +593,8 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, > return; > } > > - /* Handle slow unaligned access (it spans two pages or IO). */ > - if (DATA_SIZE > 1 > - && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 > - >= TARGET_PAGE_SIZE)) { > - glue(helper_be_st_name, _do_unl_access)(env, val, addr, oi, mmu_idx, > - retaddr); > - return; > - } > - > - /* Handle aligned access or unaligned access in the same page. */ > - if ((addr & (DATA_SIZE - 1)) != 0 > - && (get_memop(oi) & MO_AMASK) == MO_ALIGN) { > - cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, > - mmu_idx, retaddr); > - } > - > - haddr = addr + env->tlb_table[mmu_idx][index].addend; > - glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val); > + glue(helper_be_st_name, _do_ram_access)(env, val, addr, oi, mmu_idx, index, > + retaddr); > } > #endif /* DATA_SIZE > 1 */ Same comments as before, there is more duplication that could be removed. However: Reviewed-by: Alex Bennée <alex.bennee@linaro.org> -- Alex Bennée
diff --git a/softmmu_template.h b/softmmu_template.h index 3d388ec..6279437 100644 --- a/softmmu_template.h +++ b/softmmu_template.h @@ -416,13 +416,46 @@ static inline void glue(helper_le_st_name, _do_mmio_access)(CPUArchState *env, glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr); } +static inline void glue(helper_le_st_name, _do_ram_access)(CPUArchState *env, + DATA_TYPE val, + target_ulong addr, + TCGMemOpIdx oi, + unsigned mmu_idx, + int index, + uintptr_t retaddr) +{ + uintptr_t haddr; + + /* Handle slow unaligned access (it spans two pages or IO). */ + if (DATA_SIZE > 1 + && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 + >= TARGET_PAGE_SIZE)) { + glue(helper_le_st_name, _do_unl_access)(env, val, addr, oi, mmu_idx, + retaddr); + return; + } + + /* Handle aligned access or unaligned access in the same page. */ + if ((addr & (DATA_SIZE - 1)) != 0 + && (get_memop(oi) & MO_AMASK) == MO_ALIGN) { + cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, + mmu_idx, retaddr); + } + + haddr = addr + env->tlb_table[mmu_idx][index].addend; +#if DATA_SIZE == 1 + glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val); +#else + glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val); +#endif +} + void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, TCGMemOpIdx oi, uintptr_t retaddr) { unsigned mmu_idx = get_mmuidx(oi); int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; - uintptr_t haddr; /* Adjust the given return address. */ retaddr -= GETPC_ADJ; @@ -448,28 +481,8 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, return; } - /* Handle slow unaligned access (it spans two pages or IO). */ - if (DATA_SIZE > 1 - && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 - >= TARGET_PAGE_SIZE)) { - glue(helper_le_st_name, _do_unl_access)(env, val, addr, mmu_idx, - oi, retaddr); - return; - } - - /* Handle aligned access or unaligned access in the same page. */ - if ((addr & (DATA_SIZE - 1)) != 0 - && (get_memop(oi) & MO_AMASK) == MO_ALIGN) { - cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, - mmu_idx, retaddr); - } - - haddr = addr + env->tlb_table[mmu_idx][index].addend; -#if DATA_SIZE == 1 - glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val); -#else - glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val); -#endif + glue(helper_le_st_name, _do_ram_access)(env, val, addr, oi, mmu_idx, index, + retaddr); } #if DATA_SIZE > 1 @@ -519,13 +532,42 @@ static inline void glue(helper_be_st_name, _do_mmio_access)(CPUArchState *env, glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr); } +static inline void glue(helper_be_st_name, _do_ram_access)(CPUArchState *env, + DATA_TYPE val, + target_ulong addr, + TCGMemOpIdx oi, + unsigned mmu_idx, + int index, + uintptr_t retaddr) +{ + uintptr_t haddr; + + /* Handle slow unaligned access (it spans two pages or IO). */ + if (DATA_SIZE > 1 + && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 + >= TARGET_PAGE_SIZE)) { + glue(helper_be_st_name, _do_unl_access)(env, val, addr, oi, mmu_idx, + retaddr); + return; + } + + /* Handle aligned access or unaligned access in the same page. */ + if ((addr & (DATA_SIZE - 1)) != 0 + && (get_memop(oi) & MO_AMASK) == MO_ALIGN) { + cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, + mmu_idx, retaddr); + } + + haddr = addr + env->tlb_table[mmu_idx][index].addend; + glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val); +} + void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, TCGMemOpIdx oi, uintptr_t retaddr) { unsigned mmu_idx = get_mmuidx(oi); int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; - uintptr_t haddr; /* Adjust the given return address. */ retaddr -= GETPC_ADJ; @@ -551,24 +593,8 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, return; } - /* Handle slow unaligned access (it spans two pages or IO). */ - if (DATA_SIZE > 1 - && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 - >= TARGET_PAGE_SIZE)) { - glue(helper_be_st_name, _do_unl_access)(env, val, addr, oi, mmu_idx, - retaddr); - return; - } - - /* Handle aligned access or unaligned access in the same page. */ - if ((addr & (DATA_SIZE - 1)) != 0 - && (get_memop(oi) & MO_AMASK) == MO_ALIGN) { - cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, - mmu_idx, retaddr); - } - - haddr = addr + env->tlb_table[mmu_idx][index].addend; - glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val); + glue(helper_be_st_name, _do_ram_access)(env, val, addr, oi, mmu_idx, index, + retaddr); } #endif /* DATA_SIZE > 1 */
Attempting to simplify the helper_*_st_name, wrap the code relative to a RAM access into an inline function. Based on this work, Alex proposed the following patch series https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg01136.html that reduces code duplication of the softmmu_helpers. Suggested-by: Jani Kokkonen <jani.kokkonen@huawei.com> Suggested-by: Claudio Fontana <claudio.fontana@huawei.com> Signed-off-by: Alvise Rigo <a.rigo@virtualopensystems.com> --- softmmu_template.h | 110 +++++++++++++++++++++++++++++++++-------------------- 1 file changed, 68 insertions(+), 42 deletions(-)