diff mbox

[2/3] Check continuous dirty and non-dirty pages.

Message ID 4B6FE5E1.1020200@lab.ntt.co.jp (mailing list archive)
State New, archived
Headers show

Commit Message

OHMURA Kei Feb. 8, 2010, 10:22 a.m. UTC
None
diff mbox

Patch

diff --git a/exec.c b/exec.c
index ade09cb..5770281 100644
--- a/exec.c
+++ b/exec.c
@@ -119,6 +119,7 @@  uint8_t *code_gen_ptr;
 #if !defined(CONFIG_USER_ONLY)
 int phys_ram_fd;
 uint8_t *phys_ram_dirty;
+int *phys_ram_dirty_by_word;
 uint8_t *bios_mem;
 static int in_migration;
 
@@ -1843,7 +1844,7 @@  static void tlb_protect_code(ram_addr_t ram_addr)
 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
                                     target_ulong vaddr)
 {
-    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
+    cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
 }
 
 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
@@ -1858,14 +1859,83 @@  static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
     }
 }
 
+int cpu_physical_memory_get_dirty_range(ram_addr_t start, ram_addr_t end, 
+                                        int dirty_flags)
+{
+    static unsigned long mask = 0;
+    static int cached_dirty_flags = 0;
+    uint8_t *p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
+    int *p_by_word = phys_ram_dirty_by_word +
+        (start >> TARGET_PAGE_BITS) / TARGET_LONG_BITS;
+
+    /*
+     * Since making the mask in every time this function called is too slow,
+     * we will cache the value.
+     */
+    if (dirty_flags != cached_dirty_flags) {
+        cached_dirty_flags = dirty_flags;
+        mask = unroll_flags_to_ul(dirty_flags);
+    }  
+
+    /*
+     * We can get the dirty-pages very fast, 
+     * when a lot of continuous pages are dirty.
+     */ 
+    if ((((start >> TARGET_PAGE_BITS) & (TARGET_LONG_BITS - 1)) == 0) && 
+        ((end - start) >> TARGET_PAGE_BITS) >= TARGET_LONG_BITS &&
+        *p_by_word == TARGET_LONG_BITS)
+        return TARGET_LONG_BITS;
+
+    if ((((start >> TARGET_PAGE_BITS) & (TARGET_LONG_SIZE - 1)) == 0) && 
+        ((end - start) >> TARGET_PAGE_BITS) >= TARGET_LONG_SIZE &&
+        (*(unsigned long *)p & mask) == mask)
+        return TARGET_LONG_SIZE;
+
+    return (cpu_physical_memory_get_dirty(start, dirty_flags) == dirty_flags);
+}
+
+int cpu_physical_memory_get_non_dirty_range(ram_addr_t start, ram_addr_t end, 
+                                            int dirty_flags)
+{
+    static unsigned long mask = 0;
+    static int cached_dirty_flags = 0;
+    uint8_t *p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
+    int *p_by_word = phys_ram_dirty_by_word +
+        (start >> TARGET_PAGE_BITS) / TARGET_LONG_BITS;
+
+    /* 
+     * Since making the mask in every time this function called is too slow,
+     * we will cache the value.
+     */
+    if (dirty_flags != cached_dirty_flags) {
+        cached_dirty_flags = dirty_flags;
+        mask = unroll_flags_to_ul(dirty_flags); 
+    }
+
+    /* 
+     * We can skip the non-dirty-pages very fast, 
+     * when a lot of continuous pages are not dirty. 
+     */
+    if ((((start >> TARGET_PAGE_BITS) & (TARGET_LONG_BITS - 1)) == 0) && 
+        ((end - start) >> TARGET_PAGE_BITS) >= TARGET_LONG_BITS &&
+        *p_by_word == 0)
+        return TARGET_LONG_BITS;
+
+    if ((((start >> TARGET_PAGE_BITS) & (TARGET_LONG_SIZE - 1)) == 0) &&
+        ((end - start) >> TARGET_PAGE_BITS) >= TARGET_LONG_SIZE &&
+        (*(unsigned long *)p & mask) == 0)
+        return TARGET_LONG_SIZE;
+    
+    return (cpu_physical_memory_get_dirty(start, dirty_flags) == 0);
+}
+
 /* Note: start and end must be within the same ram block.  */
 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
                                      int dirty_flags)
 {
     CPUState *env;
     unsigned long length, start1;
-    int i, mask, len;
-    uint8_t *p;
+    int i;
 
     start &= TARGET_PAGE_MASK;
     end = TARGET_PAGE_ALIGN(end);
@@ -1873,11 +1943,7 @@  void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
     length = end - start;
     if (length == 0)
         return;
-    len = length >> TARGET_PAGE_BITS;
-    mask = ~dirty_flags;
-    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
-    for(i = 0; i < len; i++)
-        p[i] &= mask;
+    cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);    
 
     /* we modify the TLB cache so that the dirty bit will be set again
        when accessing the range */
@@ -2535,6 +2601,7 @@  extern const char *mem_path;
 ram_addr_t qemu_ram_alloc(ram_addr_t size)
 {
     RAMBlock *new_block;
+    int i, *p;
 
     size = TARGET_PAGE_ALIGN(size);
     new_block = qemu_malloc(sizeof(*new_block));
@@ -2564,6 +2631,14 @@  ram_addr_t qemu_ram_alloc(ram_addr_t size)
     memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
            0xff, size >> TARGET_PAGE_BITS);
 
+    phys_ram_dirty_by_word = qemu_realloc(phys_ram_dirty_by_word,
+        TARGET_LONG_ALIGN((last_ram_offset + size) >> TARGET_PAGE_BITS) *
+        sizeof(*phys_ram_dirty_by_word));
+    p = phys_ram_dirty_by_word +
+        TARGET_LONG_ALIGN(last_ram_offset >> TARGET_PAGE_BITS);
+    for (i = 0; i < TARGET_LONG_ALIGN(size >> TARGET_PAGE_BITS); i++)
+        p[i] = TARGET_LONG_BITS;
+
     last_ram_offset += size;
 
     if (kvm_enabled())
@@ -2729,16 +2804,16 @@  static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
                                 uint32_t val)
 {
     int dirty_flags;
-    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
+    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
 #if !defined(CONFIG_USER_ONLY)
         tb_invalidate_phys_page_fast(ram_addr, 1);
-        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
+        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
 #endif
     }
     stb_p(qemu_get_ram_ptr(ram_addr), val);
     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
-    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
+    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
     /* we remove the notdirty callback only if the code has been
        flushed */
     if (dirty_flags == 0xff)
@@ -2749,16 +2824,16 @@  static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
                                 uint32_t val)
 {
     int dirty_flags;
-    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
+    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
 #if !defined(CONFIG_USER_ONLY)
         tb_invalidate_phys_page_fast(ram_addr, 2);
-        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
+        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
 #endif
     }
     stw_p(qemu_get_ram_ptr(ram_addr), val);
     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
-    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
+    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
     /* we remove the notdirty callback only if the code has been
        flushed */
     if (dirty_flags == 0xff)
@@ -2769,16 +2844,16 @@  static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
                                 uint32_t val)
 {
     int dirty_flags;
-    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
+    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
 #if !defined(CONFIG_USER_ONLY)
         tb_invalidate_phys_page_fast(ram_addr, 4);
-        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
+        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
 #endif
     }
     stl_p(qemu_get_ram_ptr(ram_addr), val);
     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
-    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
+    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
     /* we remove the notdirty callback only if the code has been
        flushed */
     if (dirty_flags == 0xff)
@@ -3230,8 +3305,8 @@  void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
                     /* invalidate code */
                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
                     /* set dirty bit */
-                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
-                        (0xff & ~CODE_DIRTY_FLAG);
+                    cpu_physical_memory_set_dirty_flags(
+                        addr1, (0xff & ~CODE_DIRTY_FLAG));
                 }
 		/* qemu doesn't execute guest code directly, but kvm does
 		   therefore flush instruction caches */
@@ -3444,8 +3519,8 @@  void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
                     /* invalidate code */
                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
                     /* set dirty bit */
-                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
-                        (0xff & ~CODE_DIRTY_FLAG);
+                    cpu_physical_memory_set_dirty_flags(
+                        addr1, (0xff & ~CODE_DIRTY_FLAG));
                 }
                 addr1 += l;
                 access_len -= l;
@@ -3581,8 +3656,8 @@  void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
                 /* invalidate code */
                 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
                 /* set dirty bit */
-                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
-                    (0xff & ~CODE_DIRTY_FLAG);
+                cpu_physical_memory_set_dirty_flags(
+                    addr1, (0xff & ~CODE_DIRTY_FLAG));
             }
         }
     }
@@ -3650,8 +3725,8 @@  void stl_phys(target_phys_addr_t addr, uint32_t val)
             /* invalidate code */
             tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
             /* set dirty bit */
-            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
-                (0xff & ~CODE_DIRTY_FLAG);
+            cpu_physical_memory_set_dirty_flags(addr1,
+                (0xff & ~CODE_DIRTY_FLAG));
         }
     }
 }