@@ -925,6 +925,7 @@ typedef struct RAMBlock {
uint32_t flags;
char idstr[256];
QLIST_ENTRY(RAMBlock) next;
+ QLIST_ENTRY(RAMBlock) next_mru;
#if defined(__linux__) && !defined(TARGET_S390X)
int fd;
#endif
@@ -933,6 +934,7 @@ typedef struct RAMBlock {
typedef struct RAMList {
uint8_t *phys_dirty;
QLIST_HEAD(ram, RAMBlock) blocks;
+ QLIST_HEAD(, RAMBlock) blocks_mru;
} RAMList;
extern RAMList ram_list;
@@ -113,7 +113,11 @@ static uint8_t *code_gen_ptr;
int phys_ram_fd;
static int in_migration;
-RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
+RAMList ram_list = {
+ .blocks = QLIST_HEAD_INITIALIZER(ram_list),
+ .blocks_mru = QLIST_HEAD_INITIALIZER(ram_list.blocks_mru)
+};
+
#endif
CPUState *first_cpu;
@@ -2973,6 +2977,7 @@ ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
new_block->length = size;
QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
+ QLIST_INSERT_HEAD(&ram_list.blocks_mru, new_block, next_mru);
ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
last_ram_offset() >> TARGET_PAGE_BITS);
@@ -2997,6 +3002,7 @@ void qemu_ram_free_from_ptr(ram_addr_t addr)
QLIST_FOREACH(block, &ram_list.blocks, next) {
if (addr == block->offset) {
QLIST_REMOVE(block, next);
+ QLIST_REMOVE(block, next_mru);
qemu_free(block);
return;
}
@@ -3010,6 +3016,7 @@ void qemu_ram_free(ram_addr_t addr)
QLIST_FOREACH(block, &ram_list.blocks, next) {
if (addr == block->offset) {
QLIST_REMOVE(block, next);
+ QLIST_REMOVE(block, next_mru);
if (block->flags & RAM_PREALLOC_MASK) {
;
} else if (mem_path) {
@@ -3113,12 +3120,12 @@ void *qemu_get_ram_ptr(ram_addr_t addr)
{
RAMBlock *block;
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ QLIST_FOREACH(block, &ram_list.blocks_mru, next_mru) {
if (addr - block->offset < block->length) {
/* Move this entry to to start of the list. */
if (block != QLIST_FIRST(&ram_list.blocks)) {
- QLIST_REMOVE(block, next);
- QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
+ QLIST_REMOVE(block, next_mru);
+ QLIST_INSERT_HEAD(&ram_list.blocks_mru, block, next_mru);
}
if (xen_mapcache_enabled()) {
/* We need to check if the requested address is in the RAM
@@ -3211,7 +3218,7 @@ int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
return 0;
}
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ QLIST_FOREACH(block, &ram_list.blocks_mru, next_mru) {
/* This case append when the block is not mapped. */
if (block->host == NULL) {
continue;
This patch creates a new list of RAM blocks in MRU order. So that separate locking rules can be applied to the regular RAM block list and the MRU list. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> --- cpu-all.h | 2 ++ exec.c | 17 ++++++++++++----- 2 files changed, 14 insertions(+), 5 deletions(-)