@@ -470,7 +470,7 @@ process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
bytes = split_size;
if (tp->size + bytes > msh)
bytes = msh - tp->size;
- cpu_physical_memory_read(addr, tp->data + tp->size, bytes);
+ dma_memory_read(&s->dev.dma, addr, tp->data + tp->size, bytes);
if ((sz = tp->size + bytes) >= hdr && tp->size < hdr)
memmove(tp->header, tp->data, hdr);
tp->size = sz;
@@ -485,7 +485,7 @@ process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
// context descriptor TSE is not set, while data descriptor TSE is set
DBGOUT(TXERR, "TCP segmentaion Error\n");
} else {
- cpu_physical_memory_read(addr, tp->data + tp->size, split_size);
+ dma_memory_read(&s->dev.dma, addr, tp->data + tp->size, split_size);
tp->size += split_size;
}
@@ -501,7 +501,9 @@ process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
}
static uint32_t
-txdesc_writeback(target_phys_addr_t base, struct e1000_tx_desc *dp)
+txdesc_writeback(E1000State *s,
+ target_phys_addr_t base,
+ struct e1000_tx_desc *dp)
{
uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data);
@@ -510,8 +512,9 @@ txdesc_writeback(target_phys_addr_t base, struct e1000_tx_desc *dp)
txd_upper = (le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD) &
~(E1000_TXD_STAT_EC | E1000_TXD_STAT_LC | E1000_TXD_STAT_TU);
dp->upper.data = cpu_to_le32(txd_upper);
- cpu_physical_memory_write(base + ((char *)&dp->upper - (char *)dp),
- (void *)&dp->upper, sizeof(dp->upper));
+ dma_memory_write(&s->dev.dma,
+ base + ((char *)&dp->upper - (char *)dp),
+ (void *)&dp->upper, sizeof(dp->upper));
return E1000_ICR_TXDW;
}
@@ -530,14 +533,14 @@ start_xmit(E1000State *s)
while (s->mac_reg[TDH] != s->mac_reg[TDT]) {
base = ((uint64_t)s->mac_reg[TDBAH] << 32) + s->mac_reg[TDBAL] +
sizeof(struct e1000_tx_desc) * s->mac_reg[TDH];
- cpu_physical_memory_read(base, (void *)&desc, sizeof(desc));
+ dma_memory_read(&s->dev.dma, base, (void *)&desc, sizeof(desc));
DBGOUT(TX, "index %d: %p : %x %x\n", s->mac_reg[TDH],
(void *)(intptr_t)desc.buffer_addr, desc.lower.data,
desc.upper.data);
process_tx_desc(s, &desc);
- cause |= txdesc_writeback(base, &desc);
+ cause |= txdesc_writeback(s, base, &desc);
if (++s->mac_reg[TDH] * sizeof(desc) >= s->mac_reg[TDLEN])
s->mac_reg[TDH] = 0;
@@ -679,18 +682,19 @@ e1000_receive(VLANClientState *nc, const uint8_t *buf, size_t size)
}
base = ((uint64_t)s->mac_reg[RDBAH] << 32) + s->mac_reg[RDBAL] +
sizeof(desc) * s->mac_reg[RDH];
- cpu_physical_memory_read(base, (void *)&desc, sizeof(desc));
+ dma_memory_read(&s->dev.dma, base, (void *)&desc, sizeof(desc));
desc.special = vlan_special;
desc.status |= (vlan_status | E1000_RXD_STAT_DD);
if (desc.buffer_addr) {
- cpu_physical_memory_write(le64_to_cpu(desc.buffer_addr),
- (void *)(buf + vlan_offset), size);
+ dma_memory_write(&s->dev.dma,
+ le64_to_cpu(desc.buffer_addr),
+ (void *)(buf + vlan_offset), size);
desc.length = cpu_to_le16(size + fcs_len(s));
desc.status |= E1000_RXD_STAT_EOP|E1000_RXD_STAT_IXSM;
} else { // as per intel docs; skip descriptors with null buf addr
DBGOUT(RX, "Null RX descriptor!!\n");
}
- cpu_physical_memory_write(base, (void *)&desc, sizeof(desc));
+ dma_memory_write(&s->dev.dma, base, (void *)&desc, sizeof(desc));
if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN])
s->mac_reg[RDH] = 0;