@@ -1200,38 +1200,6 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
cpu_loop_exit_atomic(env_cpu(env), retaddr);
}
-#ifdef TARGET_WORDS_BIGENDIAN
-#define NEED_BE_BSWAP 0
-#define NEED_LE_BSWAP 1
-#else
-#define NEED_BE_BSWAP 1
-#define NEED_LE_BSWAP 0
-#endif
-
-/*
- * Byte Swap Helper
- *
- * This should all dead code away depending on the build host and
- * access type.
- */
-
-static inline uint64_t handle_bswap(uint64_t val, MemOp op)
-{
- if ((memop_big_endian(op) && NEED_BE_BSWAP) ||
- (!memop_big_endian(op) && NEED_LE_BSWAP)) {
- switch (op & MO_SIZE) {
- case MO_8: return val;
- case MO_16: return bswap16(val);
- case MO_32: return bswap32(val);
- case MO_64: return bswap64(val);
- default:
- g_assert_not_reached();
- }
- } else {
- return val;
- }
-}
-
/*
* Load Helpers
*
@@ -1306,10 +1274,8 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
}
}
- /* FIXME: io_readx ignores MO_BSWAP. */
- res = io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index],
- mmu_idx, addr, retaddr, access_type, op);
- return handle_bswap(res, op);
+ return io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index],
+ mmu_idx, addr, retaddr, access_type, op);
}
/* Handle slow unaligned access (it spans two pages or IO). */
@@ -1552,10 +1518,8 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
}
}
- /* FIXME: io_writex ignores MO_BSWAP. */
io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx,
- handle_bswap(val, op),
- addr, retaddr, op);
+ val, addr, retaddr, op);
return;
}
@@ -542,16 +542,15 @@ void virtio_address_space_write(VirtIOPCIProxy *proxy, hwaddr addr,
val = pci_get_byte(buf);
break;
case 2:
- val = cpu_to_le16(pci_get_word(buf));
+ val = pci_get_word(buf);
break;
case 4:
- val = cpu_to_le32(pci_get_long(buf));
+ val = pci_get_long(buf);
break;
default:
/* As length is under guest control, handle illegal values. */
return;
}
- /* FIXME: memory_region_dispatch_write ignores MO_BSWAP. */
memory_region_dispatch_write(mr, addr, val, size_memop(len),
MEMTXATTRS_UNSPECIFIED);
}
@@ -576,7 +575,6 @@ virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr,
/* Make sure caller aligned buf properly */
assert(!(((uintptr_t)buf) & (len - 1)));
- /* FIXME: memory_region_dispatch_read ignores MO_BSWAP. */
memory_region_dispatch_read(mr, addr, &val, size_memop(len),
MEMTXATTRS_UNSPECIFIED);
switch (len) {
@@ -584,10 +582,10 @@ virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr,
pci_set_byte(buf, val);
break;
case 2:
- pci_set_word(buf, le16_to_cpu(val));
+ pci_set_word(buf, val);
break;
case 4:
- pci_set_long(buf, le32_to_cpu(val));
+ pci_set_long(buf, val);
break;
default:
/* As length is under guest control, handle illegal values. */
@@ -343,32 +343,23 @@ static void flatview_simplify(FlatView *view)
}
}
-static bool memory_region_wrong_endianness(MemoryRegion *mr)
+static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op)
{
-#ifdef TARGET_WORDS_BIGENDIAN
- return mr->ops->endianness == MO_LE;
-#else
- return mr->ops->endianness == MO_BE;
-#endif
-}
-
-static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
-{
- if (memory_region_wrong_endianness(mr)) {
- switch (size) {
- case 1:
+ if ((op & MO_BSWAP) != mr->ops->endianness) {
+ switch (op & MO_SIZE) {
+ case MO_8:
break;
- case 2:
+ case MO_16:
*data = bswap16(*data);
break;
- case 4:
+ case MO_32:
*data = bswap32(*data);
break;
- case 8:
+ case MO_64:
*data = bswap64(*data);
break;
default:
- abort();
+ g_assert_not_reached();
}
}
}
@@ -1446,7 +1437,7 @@ MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
}
r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
- adjust_endianness(mr, pval, size);
+ adjust_endianness(mr, pval, op);
return r;
}
@@ -1489,7 +1480,7 @@ MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
return MEMTX_DECODE_ERROR;
}
- adjust_endianness(mr, &data, size);
+ adjust_endianness(mr, &data, op);
if ((!kvm_eventfds_enabled()) &&
memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
@@ -2335,7 +2326,7 @@ void memory_region_add_eventfd(MemoryRegion *mr,
}
if (size) {
- adjust_endianness(mr, &mrfd.data, size);
+ adjust_endianness(mr, &mrfd.data, size_memop(size));
}
memory_region_transaction_begin();
for (i = 0; i < mr->ioeventfd_nb; ++i) {
@@ -2370,7 +2361,7 @@ void memory_region_del_eventfd(MemoryRegion *mr,
unsigned i;
if (size) {
- adjust_endianness(mr, &mrfd.data, size);
+ adjust_endianness(mr, &mrfd.data, size_memop(size));
}
memory_region_transaction_begin();
for (i = 0; i < mr->ioeventfd_nb; ++i) {
@@ -37,17 +37,7 @@ static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL,
release_lock |= prepare_mmio_access(mr);
/* I/O case */
- /* FIXME: memory_region_dispatch_read ignores MO_BSWAP. */
r = memory_region_dispatch_read(mr, addr1, &val, MO_32 | endian, attrs);
-#if defined(TARGET_WORDS_BIGENDIAN)
- if (endian == MO_LE) {
- val = bswap32(val);
- }
-#else
- if (endian == MO_BE) {
- val = bswap32(val);
- }
-#endif
} else {
/* RAM case */
ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
@@ -113,17 +103,7 @@ static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL,
release_lock |= prepare_mmio_access(mr);
/* I/O case */
- /* FIXME: memory_region_dispatch_read ignores MO_BSWAP. */
r = memory_region_dispatch_read(mr, addr1, &val, MO_64 | endian, attrs);
-#if defined(TARGET_WORDS_BIGENDIAN)
- if (endian == MO_LE) {
- val = bswap64(val);
- }
-#else
- if (endian == MO_BE) {
- val = bswap64(val);
- }
-#endif
} else {
/* RAM case */
ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
@@ -223,17 +203,7 @@ static inline uint32_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL,
release_lock |= prepare_mmio_access(mr);
/* I/O case */
- /* FIXME: memory_region_dispatch_read ignores MO_BSWAP. */
r = memory_region_dispatch_read(mr, addr1, &val, MO_16 | endian, attrs);
-#if defined(TARGET_WORDS_BIGENDIAN)
- if (endian == MO_LE) {
- val = bswap16(val);
- }
-#else
- if (endian == MO_BE) {
- val = bswap16(val);
- }
-#endif
} else {
/* RAM case */
ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
@@ -299,7 +269,6 @@ void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL,
mr = TRANSLATE(addr, &addr1, &l, true, attrs);
if (l < 4 || !memory_access_is_direct(mr, true)) {
release_lock |= prepare_mmio_access(mr);
-
r = memory_region_dispatch_write(mr, addr1, val, MO_32 | MO_TE, attrs);
} else {
ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
@@ -336,17 +305,6 @@ static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL,
mr = TRANSLATE(addr, &addr1, &l, true, attrs);
if (l < 4 || !memory_access_is_direct(mr, true)) {
release_lock |= prepare_mmio_access(mr);
-
-#if defined(TARGET_WORDS_BIGENDIAN)
- if (endian == MO_LE) {
- val = bswap32(val);
- }
-#else
- if (endian == MO_BE) {
- val = bswap32(val);
- }
-#endif
- /* FIXME: memory_region_dispatch_write ignores MO_BSWAP. */
r = memory_region_dispatch_write(mr, addr1, val, MO_32 | endian, attrs);
} else {
/* RAM case */
@@ -442,17 +400,6 @@ static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL,
mr = TRANSLATE(addr, &addr1, &l, true, attrs);
if (l < 2 || !memory_access_is_direct(mr, true)) {
release_lock |= prepare_mmio_access(mr);
-
-#if defined(TARGET_WORDS_BIGENDIAN)
- if (endian == MO_LE) {
- val = bswap16(val);
- }
-#else
- if (endian == MO_BE) {
- val = bswap16(val);
- }
-#endif
- /* FIXME: memory_region_dispatch_write ignores MO_BSWAP. */
r = memory_region_dispatch_write(mr, addr1, val, MO_16 | endian, attrs);
} else {
/* RAM case */
@@ -516,17 +463,6 @@ static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL,
mr = TRANSLATE(addr, &addr1, &l, true, attrs);
if (l < 8 || !memory_access_is_direct(mr, true)) {
release_lock |= prepare_mmio_access(mr);
-
-#if defined(TARGET_WORDS_BIGENDIAN)
- if (endian == MO_LE) {
- val = bswap64(val);
- }
-#else
- if (endian == MO_BE) {
- val = bswap64(val);
- }
-#endif
- /* FIXME: memory_region_dispatch_write ignores MO_BSWAP. */
r = memory_region_dispatch_write(mr, addr1, val, MO_64 | endian, attrs);
} else {
/* RAM case */
Now that MemOp has been pushed down into the memory API, and callers are encoding endianness, we can collapse byte swaps along the I/O path into the accelerator and target independent adjust_endianness. Collapsing byte swaps along the I/O path enables additional endian inversion logic, e.g. SPARC64 Invert Endian TTE bit, with redundant byte swaps cancelling out. Suggested-by: Richard Henderson <richard.henderson@linaro.org> Signed-off-by: Tony Nguyen <tony.nguyen@bt.com> --- accel/tcg/cputlb.c | 42 +++------------------------------ hw/virtio/virtio-pci.c | 10 ++++---- memory.c | 33 ++++++++++---------------- memory_ldst.inc.c | 64 -------------------------------------------------- 4 files changed, 19 insertions(+), 130 deletions(-) -- 1.8.3.1 ?