@@ -8,7 +8,7 @@
#ifndef SYSEMU_TCG_H
#define SYSEMU_TCG_H
-void tcg_exec_init(unsigned long tb_size);
+void tcg_exec_init(unsigned long tb_size, bool mirror_rwx);
#ifdef CONFIG_TCG
extern bool tcg_allowed;
#define tcg_enabled() (tcg_allowed)
@@ -39,6 +39,7 @@ struct TCGState {
bool mttcg_enabled;
unsigned long tb_size;
+ bool mirror_jit;
};
typedef struct TCGState TCGState;
@@ -94,6 +95,11 @@ static void tcg_accel_instance_init(Object *obj)
TCGState *s = TCG_STATE(obj);
s->mttcg_enabled = default_mttcg_enabled();
+#if defined(CONFIG_MIRROR_JIT) && defined(CONFIG_DEBUG_TCG)
+ s->mirror_jit = true;
+#else
+ s->mirror_jit = false;
+#endif
}
bool mttcg_enabled;
@@ -102,7 +108,7 @@ static int tcg_init(MachineState *ms)
{
TCGState *s = TCG_STATE(current_accel());
- tcg_exec_init(s->tb_size * 1024 * 1024);
+ tcg_exec_init(s->tb_size * 1024 * 1024, s->mirror_jit);
mttcg_enabled = s->mttcg_enabled;
cpus_register_accel(&tcg_cpus);
@@ -168,6 +174,22 @@ static void tcg_set_tb_size(Object *obj, Visitor *v,
s->tb_size = value;
}
+#ifdef CONFIG_MIRROR_JIT
+static bool tcg_get_mirror_jit(Object *obj, Error **errp)
+{
+ TCGState *s = TCG_STATE(obj);
+
+ return s->mirror_jit;
+}
+
+static void tcg_set_mirror_jit(Object *obj, bool value, Error **errp)
+{
+ TCGState *s = TCG_STATE(obj);
+
+ s->mirror_jit = value;
+}
+#endif
+
static void tcg_accel_class_init(ObjectClass *oc, void *data)
{
AccelClass *ac = ACCEL_CLASS(oc);
@@ -185,6 +207,13 @@ static void tcg_accel_class_init(ObjectClass *oc, void *data)
object_class_property_set_description(oc, "tb-size",
"TCG translation block cache size");
+#ifdef CONFIG_MIRROR_JIT
+ object_class_property_add_bool(oc, "mirror-jit",
+ tcg_get_mirror_jit, tcg_set_mirror_jit);
+ object_class_property_set_description(oc, "mirror-jit",
+ "JIT pages mapped into separate RW and RX regions");
+#endif
+
}
static const TypeInfo tcg_accel_type = {
@@ -1027,12 +1027,15 @@ static inline void *split_cross_256mb(void *buf1, size_t size1)
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
__attribute__((aligned(CODE_GEN_ALIGN)));
-static inline void *alloc_code_gen_buffer(void)
+static inline void *alloc_code_gen_buffer(bool mirror_jit)
{
void *buf = static_code_gen_buffer;
void *end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
size_t size;
+ /* not applicable */
+ assert(!mirror_jit);
+
/* page-align the beginning and end of the buffer */
buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
@@ -1061,14 +1064,15 @@ static inline void *alloc_code_gen_buffer(void)
return buf;
}
#elif defined(_WIN32)
-static inline void *alloc_code_gen_buffer(void)
+static inline void *alloc_code_gen_buffer(bool mirror_jit)
{
size_t size = tcg_ctx->code_gen_buffer_size;
+ assert(!mirror_jit); /* not applicable */
return VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
PAGE_EXECUTE_READWRITE);
}
#else
-static inline void *alloc_code_gen_buffer(void)
+static inline void *alloc_code_gen_buffer(bool mirror_jit)
{
int prot = PROT_READ | PROT_EXEC;
int flags = 0;
@@ -1078,16 +1082,22 @@ static inline void *alloc_code_gen_buffer(void)
#if defined(CONFIG_MIRROR_JIT)
#if defined(CONFIG_LINUX)
- fd = qemu_memfd_create("tcg-jit", size, false, 0, 0, NULL);
- if (fd < 0) {
- return NULL;
+ if (mirror_jit) {
+ fd = qemu_memfd_create("tcg-jit", size, false, 0, 0, NULL);
+ if (fd < 0) {
+ return NULL;
+ }
+ tcg_ctx->code_gen_buffer_fd = fd;
+ flags |= MAP_SHARED;
+ } else {
+ prot |= PROT_WRITE;
+ flags |= MAP_ANONYMOUS | MAP_PRIVATE;
}
- tcg_ctx->code_gen_buffer_fd = fd;
- flags |= MAP_SHARED;
#else /* defined(CONFIG_LINUX) */
#error "Mirror JIT unimplemented for this platform."
#endif /* defined(CONFIG_LINUX) */
#else /* defined(CONFIG_MIRROR_JIT) */
+ assert(!mirror_jit);
prot |= PROT_WRITE;
flags |= MAP_ANONYMOUS | MAP_PRIVATE;
#endif /* defined(CONFIG_MIRROR_JIT) */
@@ -1162,7 +1172,7 @@ static inline void *alloc_jit_rw_mirror(void)
static inline void code_gen_alloc(size_t tb_size, bool mirror_jit)
{
tcg_ctx->code_gen_buffer_size = size_code_gen_buffer(tb_size);
- tcg_ctx->code_gen_buffer = alloc_code_gen_buffer();
+ tcg_ctx->code_gen_buffer = alloc_code_gen_buffer(mirror_jit);
if (tcg_ctx->code_gen_buffer == NULL) {
fprintf(stderr, "Could not allocate dynamic translator buffer\n");
exit(1);
@@ -1170,13 +1180,20 @@ static inline void code_gen_alloc(size_t tb_size, bool mirror_jit)
#if defined(CONFIG_MIRROR_JIT)
void *mirror;
- /* For platforms that need a mirror mapping for code execution */
- mirror = alloc_jit_rw_mirror();
- if (mirror == NULL) {
- fprintf(stderr, "Could not remap code buffer mirror\n");
- exit(1);
+ if (mirror_jit) {
+ /* For platforms that need a mirror mapping for code execution */
+ mirror = alloc_jit_rw_mirror();
+ if (mirror == NULL) {
+ fprintf(stderr, "Could not remap code buffer mirror\n");
+ exit(1);
+ }
+ } else {
+ /* If we disable mirror mapping run option */
+ mirror = tcg_ctx->code_gen_buffer;
}
tcg_ctx->code_rw_mirror_diff = mirror - tcg_ctx->code_gen_buffer;
+#else
+ assert(!mirror_jit);
#endif /* CONFIG_MIRROR_JIT */
}
@@ -1201,16 +1218,18 @@ static void tb_htable_init(void)
qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
}
-/* Must be called before using the QEMU cpus. 'tb_size' is the size
- (in bytes) allocated to the translation buffer. Zero means default
- size. */
-void tcg_exec_init(unsigned long tb_size)
+/*
+ * Must be called before using the QEMU cpus. 'tb_size' is the size
+ * (in bytes) allocated to the translation buffer. Zero means default
+ * size. mirror_jit separates RX and RW allocations.
+ */
+void tcg_exec_init(unsigned long tb_size, bool mirror_jit)
{
tcg_allowed = true;
cpu_gen_init();
page_init();
tb_htable_init();
- code_gen_alloc(tb_size);
+ code_gen_alloc(tb_size, mirror_jit);
#if defined(CONFIG_SOFTMMU)
/* There's no guest base to take into account, so go ahead and
initialize the prologue now. */
@@ -910,7 +910,7 @@ int main(int argc, char **argv)
}
/* init tcg before creating CPUs and to get qemu_host_page_size */
- tcg_exec_init(0);
+ tcg_exec_init(0, false);
cpu_type = parse_cpu_option(cpu_model);
cpu = cpu_create(cpu_type);
@@ -705,7 +705,7 @@ int main(int argc, char **argv, char **envp)
cpu_type = parse_cpu_option(cpu_model);
/* init tcg before creating CPUs and to get qemu_host_page_size */
- tcg_exec_init(0);
+ tcg_exec_init(0, false);
cpu = cpu_create(cpu_type);
env = cpu->env_ptr;
@@ -123,6 +123,9 @@ DEF("accel", HAS_ARG, QEMU_OPTION_accel,
" igd-passthru=on|off (enable Xen integrated Intel graphics passthrough, default=off)\n"
" kernel-irqchip=on|off|split controls accelerated irqchip support (default=on)\n"
" kvm-shadow-mem=size of KVM shadow MMU in bytes\n"
+#ifdef CONFIG_MIRROR_JIT
+ " mirror-jit=on|off (JIT pages mapped into separate RW and RX regions, default=off)\n"
+#endif
" tb-size=n (TCG translation block cache size)\n"
" thread=single|multi (enable multi-threaded TCG)\n", QEMU_ARCH_ALL)
SRST
@@ -148,6 +151,14 @@ SRST
``kvm-shadow-mem=size``
Defines the size of the KVM shadow MMU.
+#ifdef CONFIG_MIRROR_JIT
+ ``mirror-jit=on|off``
+ Useful for debugging TCG or running on a strict W^X platform. When
+ enabled, TB code is written to a mirror mapped RW address separate from
+ the RX address that is executed. (default=off, but on if built with
+ TCG debugging)
+
+#endif
``tb-size=n``
Controls the size (in MiB) of the TCG translation block cache.
On platforms that support mirror mapping, give the user the option to disable it. There is a performance penalty for mirror JIT and on some platforms, system security must be downgraded to use QEMU with mirror JIT turned off. To aid in testing, we still enable mirror JIT by default when configured with --enable-debug-tcg. Signed-off-by: Joelle van Dyne <j@getutm.app> --- include/sysemu/tcg.h | 2 +- accel/tcg/tcg-all.c | 31 ++++++++++++++++++++- accel/tcg/translate-all.c | 57 ++++++++++++++++++++++++++------------- bsd-user/main.c | 2 +- linux-user/main.c | 2 +- qemu-options.hx | 11 ++++++++ 6 files changed, 82 insertions(+), 23 deletions(-)