@@ -129,6 +129,7 @@ int parse_debug_env(const char *name, int max, int initial);
const char *qemu_ether_ntoa(const MACAddr *mac);
void page_size_init(void);
+void init_l1_page_table_param(void);
/* returns non-zero if dump is in progress, otherwise zero is
* returned. */
@@ -57,6 +57,7 @@
#include "qemu/bitmap.h"
#include "qemu/timer.h"
#include "exec/log.h"
+#include "qemu/error-report.h"
//#define DEBUG_TB_INVALIDATE
//#define DEBUG_FLUSH
@@ -99,25 +100,18 @@ typedef struct PageDesc {
#define V_L2_BITS 10
#define V_L2_SIZE (1 << V_L2_BITS)
-/* The bits remaining after N lower levels of page tables. */
-#define V_L1_BITS_REM \
- ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS)
-
-#if V_L1_BITS_REM < 4
-#define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS)
-#else
-#define V_L1_BITS V_L1_BITS_REM
-#endif
-
-#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
-
-#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
-
uintptr_t qemu_host_page_size;
intptr_t qemu_host_page_mask;
+/*
+ * L1 Mapping properties
+ */
+static unsigned long v_l1_bits;
+static unsigned long v_l1_size;
+static unsigned long v_l1_shift;
+
/* The bottom level has pointers to PageDesc */
-static void *l1_map[V_L1_SIZE];
+static void *l1_map;
/* code generation context */
TCGContext tcg_ctx;
@@ -127,6 +121,25 @@ TCGContext tcg_ctx;
__thread int have_tb_lock;
#endif
+void init_l1_page_table_param(void)
+{
+ uint32_t v_l1_bits_rem;
+
+ assert(TARGET_PAGE_BITS);
+ /* The bits remaining after N lower levels of page tables. */
+ v_l1_bits_rem = ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS);
+ if (v_l1_bits_rem < 4)
+ v_l1_bits = (v_l1_bits_rem + V_L2_BITS);
+ else
+ v_l1_bits = v_l1_bits_rem;
+
+ v_l1_size = ((target_ulong)1 << v_l1_bits);
+ v_l1_shift = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits);
+ l1_map = g_malloc0(v_l1_size * sizeof(void *));
+ if (!l1_map)
+ error_report("Allocation faile for L1 MAP table\n");
+}
+
void tb_lock(void)
{
#ifdef CONFIG_USER_ONLY
@@ -408,10 +421,10 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
int i;
/* Level 1. Always allocated. */
- lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
+ lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
/* Level 2..N-1. */
- for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) {
+ for (i = v_l1_shift / V_L2_BITS - 1; i > 0; i--) {
void **p = atomic_rcu_read(lp);
if (p == NULL) {
@@ -819,8 +832,8 @@ static void page_flush_tb(void)
{
int i;
- for (i = 0; i < V_L1_SIZE; i++) {
- page_flush_tb_1(V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
+ for (i = 0; i < v_l1_size; i++) {
+ page_flush_tb_1(v_l1_shift / V_L2_BITS - 1, l1_map + i);
}
}
@@ -1825,9 +1838,9 @@ int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
data.start = -1u;
data.prot = 0;
- for (i = 0; i < V_L1_SIZE; i++) {
- int rc = walk_memory_regions_1(&data, (target_ulong)i << (V_L1_SHIFT + TARGET_PAGE_BITS),
- V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
+ for (i = 0; i < v_l1_size; i++) {
+ int rc = walk_memory_regions_1(&data, (target_ulong)i << (v_l1_shift + TARGET_PAGE_BITS),
+ v_l1_shift / V_L2_BITS - 1, l1_map + i);
if (rc != 0) {
return rc;
}
@@ -4044,6 +4044,9 @@ int main(int argc, char **argv, char **envp)
}
object_property_add_child(object_get_root(), "machine",
OBJECT(current_machine), &error_abort);
+
+ init_l1_page_table_param();
+
cpu_exec_init_all();
if (machine_class->hw_version) {