@@ -450,9 +450,8 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
uint64_t num_dirty = 0;
unsigned long *dest = rb->bmap;
- /* start address and length is aligned at the start of a word? */
- if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) == start_global &&
- !(length & ((BITS_PER_LONG << TARGET_PAGE_BITS) - 1))) {
+ /* start address is aligned at the start of a word? */
+ if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) == start_global) {
int k;
int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
unsigned long * const *src;
In commit aa777e297c84 ("cpu_physical_memory_sync_dirty_bitmap: Another alignment fix"), ramblock length is required to align word pages when we choose the fast dirty sync path. The reason is that "If the Ramblock is less than 64 pages in length that long can contain bits representing two different RAMBlocks, but the code will update the bmap belinging to the 1st RAMBlock only while having updated the total dirty page count for both." This is right before commit 801110ab22be ("find_ram_offset: Align ram_addr_t allocation on long boundaries"), which align ram_addr_t allocation on long boundaries. So currently we wont "updated the total dirty page count for both". By removing the alignment constraint of length in fast path, we can always use fast dirty sync path if start_global is aligned to word page. Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com> --- include/exec/ram_addr.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-)