@@ -269,6 +269,7 @@ S: Supported
F: docs/misc/livepatch.markdown
F: tools/misc/xen-livepatch.c
F: xen/arch/*/livepatch*
+F: xen/arch/*/*/livepatch*
F: xen/common/livepatch*
F: xen/include/asm-*/livepatch.h
F: xen/include/xen/livepatch*
@@ -1108,7 +1108,7 @@ and the .data or .bss sections are of zero length.
The hypervisor should verify that the in-place patching would fit within
the code or data.
-### Trampoline (e9 opcode)
+### Trampoline (e9 opcode), x86
The e9 opcode used for jmpq uses a 32-bit signed displacement. That means
we are limited to up to 2GB of virtual address to place the new code
@@ -1143,3 +1143,15 @@ that in the hypervisor is advised.
The tool for generating payloads currently does perform a compile-time
check to ensure that the function to be replaced is large enough.
+#### Trampoline, ARM
+
+The unconditional branch instruction (for the encoding see the
+DDI 0406C.c and DDI 0487A.j Architecture Reference Manual's).
+with proper offset is used for an unconditional branch to the new code.
+This means that that `old_size` **MUST** be at least four bytes if patching
+in trampoline.
+
+The new code is placed in the 8M - 10M virtual address space while the
+Xen code is in 2M - 4M. That gives us enough space.
+
+The hypervisor also checks the displacement during loading of the payload.
@@ -59,6 +59,15 @@ ALL_OBJS := $(TARGET_SUBARCH)/head.o $(ALL_OBJS)
DEPS += $(TARGET_SUBARCH)/.head.o.d
+ifdef CONFIG_LIVEPATCH
+all_symbols = --all-symbols
+ifdef CONFIG_FAST_SYMBOL_LOOKUP
+all_symbols = --all-symbols --sort-by-name
+endif
+else
+all_symbols =
+endif
+
$(TARGET): $(TARGET)-syms $(TARGET).axf
$(OBJCOPY) -O binary -S $< $@
ifeq ($(CONFIG_ARM_64),y)
@@ -94,12 +103,12 @@ $(TARGET)-syms: prelink.o xen.lds $(BASEDIR)/common/symbols-dummy.o
$(LD) $(LDFLAGS) -T xen.lds -N prelink.o \
$(BASEDIR)/common/symbols-dummy.o -o $(@D)/.$(@F).0
$(NM) -pa --format=sysv $(@D)/.$(@F).0 \
- | $(BASEDIR)/tools/symbols --sysv --sort >$(@D)/.$(@F).0.S
+ | $(BASEDIR)/tools/symbols $(all_symbols) --sysv --sort >$(@D)/.$(@F).0.S
$(MAKE) -f $(BASEDIR)/Rules.mk $(@D)/.$(@F).0.o
$(LD) $(LDFLAGS) -T xen.lds -N prelink.o \
$(@D)/.$(@F).0.o -o $(@D)/.$(@F).1
$(NM) -pa --format=sysv $(@D)/.$(@F).1 \
- | $(BASEDIR)/tools/symbols --sysv --sort >$(@D)/.$(@F).1.S
+ | $(BASEDIR)/tools/symbols $(all_symbols) --sysv --sort >$(@D)/.$(@F).1.S
$(MAKE) -f $(BASEDIR)/Rules.mk $(@D)/.$(@F).1.o
$(LD) $(LDFLAGS) -T xen.lds -N prelink.o $(build_id_linker) \
$(@D)/.$(@F).1.o -o $@
@@ -4,6 +4,7 @@ obj-$(EARLY_PRINTK) += debug.o
obj-y += domctl.o
obj-y += domain.o
obj-y += entry.o
+obj-$(CONFIG_LIVEPATCH) += livepatch.o
obj-y += proc-v7.o proc-caxx.o
obj-y += smpboot.o
obj-y += traps.o
new file mode 100644
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2016 Oracle and/or its affiliates. All rights reserved.
+ */
+
+#include <xen/errno.h>
+#include <xen/lib.h>
+#include <xen/livepatch_elf.h>
+#include <xen/livepatch.h>
+
+void arch_livepatch_apply(struct livepatch_func *func)
+{
+}
+
+void arch_livepatch_revert(const struct livepatch_func *func)
+{
+}
+
+int arch_livepatch_verify_elf(const struct livepatch_elf *elf)
+{
+ return -EOPNOTSUPP;
+}
+
+int arch_livepatch_perform_rela(struct livepatch_elf *elf,
+ const struct livepatch_elf_sec *base,
+ const struct livepatch_elf_sec *rela)
+{
+ return -ENOSYS;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
@@ -6,6 +6,7 @@ obj-y += domctl.o
obj-y += domain.o
obj-y += entry.o
obj-y += insn.o
+obj-$(CONFIG_LIVEPATCH) += livepatch.o
obj-y += smpboot.o
obj-y += traps.o
obj-y += vfp.o
new file mode 100644
@@ -0,0 +1,485 @@
+/*
+ * Copyright (c) 2016 Oracle and/or its affiliates. All rights reserved.
+ */
+
+#include <xen/bitops.h>
+#include <xen/errno.h>
+#include <xen/lib.h>
+#include <xen/livepatch_elf.h>
+#include <xen/livepatch.h>
+#include <xen/mm.h>
+#include <xen/vmap.h>
+
+#include <asm/bitops.h>
+#include <asm/byteorder.h>
+#include <asm/insn.h>
+#include <asm/livepatch.h>
+
+void arch_livepatch_apply(struct livepatch_func *func)
+{
+ uint32_t insn;
+ uint32_t *new_ptr;
+ unsigned int i, len;
+
+ BUILD_BUG_ON(ARCH_PATCH_INSN_SIZE > sizeof(func->opaque));
+ BUILD_BUG_ON(ARCH_PATCH_INSN_SIZE != sizeof(insn));
+
+ ASSERT(vmap_of_xen_text);
+
+ len = livepatch_insn_len(func);
+ if ( !len )
+ return;
+
+ /* Save old ones. */
+ memcpy(func->opaque, func->old_addr, len);
+
+ if ( func->new_addr )
+ insn = aarch64_insn_gen_branch_imm((unsigned long)func->old_addr,
+ (unsigned long)func->new_addr,
+ AARCH64_INSN_BRANCH_NOLINK);
+ else
+ insn = aarch64_insn_gen_nop();
+
+ ASSERT(insn != AARCH64_BREAK_FAULT);
+
+ new_ptr = func->old_addr - (void *)_start + vmap_of_xen_text;
+ len = len / sizeof(uint32_t);
+
+ /* PATCH! */
+ for ( i = 0; i < len; i++ )
+ *(new_ptr + i) = insn;
+
+ /*
+ * When we upload the payload, it will go through the data cache
+ * (the region is cacheable). Until the data cache is cleaned, the data
+ * may not reach the memory. And in the case the data and instruction cache
+ * are separated, we may read invalid instruction from the memory because
+ * the data cache have not yet synced with the memory. Hence sync it.
+ */
+ if ( func->new_addr )
+ clean_and_invalidate_dcache_va_range(func->new_addr, func->new_size);
+ clean_and_invalidate_dcache_va_range(new_ptr, sizeof (*new_ptr) * len);
+}
+
+void arch_livepatch_revert(const struct livepatch_func *func)
+{
+ uint32_t *new_ptr;
+ unsigned int len;
+
+ new_ptr = func->old_addr - (void *)_start + vmap_of_xen_text;
+
+ len = livepatch_insn_len(func);
+ memcpy(new_ptr, func->opaque, len);
+
+ clean_and_invalidate_dcache_va_range(new_ptr, len);
+}
+
+int arch_livepatch_verify_elf(const struct livepatch_elf *elf)
+{
+ const Elf_Ehdr *hdr = elf->hdr;
+
+ if ( hdr->e_machine != EM_AARCH64 ||
+ hdr->e_ident[EI_CLASS] != ELFCLASS64 )
+ {
+ dprintk(XENLOG_ERR, LIVEPATCH "%s: Unsupported ELF Machine type!\n",
+ elf->name);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+enum aarch64_reloc_op {
+ RELOC_OP_NONE,
+ RELOC_OP_ABS,
+ RELOC_OP_PREL,
+ RELOC_OP_PAGE,
+};
+
+static u64 do_reloc(enum aarch64_reloc_op reloc_op, void *place, u64 val)
+{
+ switch ( reloc_op )
+ {
+ case RELOC_OP_ABS:
+ return val;
+
+ case RELOC_OP_PREL:
+ return val - (u64)place;
+
+ case RELOC_OP_PAGE:
+ return (val & ~0xfff) - ((u64)place & ~0xfff);
+
+ case RELOC_OP_NONE:
+ return 0;
+
+ }
+
+ dprintk(XENLOG_DEBUG, LIVEPATCH "do_reloc: unknown relocation operation %d\n", reloc_op);
+
+ return 0;
+}
+
+static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
+{
+ s64 sval = do_reloc(op, place, val);
+
+ switch ( len )
+ {
+ case 16:
+ *(s16 *)place = sval;
+ if ( sval < INT16_MIN || sval > UINT16_MAX )
+ return -EOVERFLOW;
+ break;
+
+ case 32:
+ *(s32 *)place = sval;
+ if ( sval < INT32_MIN || sval > UINT32_MAX )
+ return -EOVERFLOW;
+ break;
+
+ case 64:
+ *(s64 *)place = sval;
+ break;
+
+ default:
+ dprintk(XENLOG_DEBUG, LIVEPATCH "Invalid length (%d) for data relocation\n", len);
+ return 0;
+ }
+
+ return 0;
+}
+
+enum aarch64_insn_movw_imm_type {
+ AARCH64_INSN_IMM_MOVNZ,
+ AARCH64_INSN_IMM_MOVKZ,
+};
+
+static int reloc_insn_movw(enum aarch64_reloc_op op, void *dest, u64 val,
+ int lsb, enum aarch64_insn_movw_imm_type imm_type)
+{
+ u64 imm;
+ s64 sval;
+ u32 insn = *(u32 *)dest;
+
+ sval = do_reloc(op, dest, val);
+ imm = sval >> lsb;
+
+ if ( imm_type == AARCH64_INSN_IMM_MOVNZ )
+ {
+ /*
+ * For signed MOVW relocations, we have to manipulate the
+ * instruction encoding depending on whether or not the
+ * immediate is less than zero.
+ */
+ insn &= ~(3 << 29);
+ if ( sval >= 0 )
+ {
+ /* >=0: Set the instruction to MOVZ (opcode 10b). */
+ insn |= 2 << 29;
+ }
+ else
+ {
+ /*
+ * <0: Set the instruction to MOVN (opcode 00b).
+ * Since we've masked the opcode already, we
+ * don't need to do anything other than
+ * inverting the new immediate field.
+ */
+ imm = ~imm;
+ }
+ }
+
+ /* Update the instruction with the new encoding. */
+ insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
+ *(u32 *)dest = insn;
+
+ if ( imm > UINT16_MAX )
+ return -EOVERFLOW;
+
+ return 0;
+}
+
+static int reloc_insn_imm(enum aarch64_reloc_op op, void *dest, u64 val,
+ int lsb, int len, enum aarch64_insn_imm_type imm_type)
+{
+ u64 imm, imm_mask;
+ s64 sval;
+ u32 insn = *(u32 *)dest;
+
+ /* Calculate the relocation value. */
+ sval = do_reloc(op, dest, val);
+ sval >>= lsb;
+
+ /* Extract the value bits and shift them to bit 0. */
+ imm_mask = (BIT(lsb + len) - 1) >> lsb;
+ imm = sval & imm_mask;
+
+ /* Update the instruction's immediate field. */
+ insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
+ *(u32 *)dest = insn;
+
+ /*
+ * Extract the upper value bits (including the sign bit) and
+ * shift them to bit 0.
+ */
+ sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
+
+ /*
+ * Overflow has occurred if the upper bits are not all equal to
+ * the sign bit of the value.
+ */
+ if ( (u64)(sval + 1) >= 2 )
+ return -EOVERFLOW;
+ return 0;
+}
+
+int arch_livepatch_perform_rela(struct livepatch_elf *elf,
+ const struct livepatch_elf_sec *base,
+ const struct livepatch_elf_sec *rela)
+{
+ const Elf_RelA *r;
+ unsigned int symndx, i;
+ uint64_t val;
+ void *dest;
+ bool_t overflow_check;
+
+ for ( i = 0; i < (rela->sec->sh_size / rela->sec->sh_entsize); i++ )
+ {
+ int ovf = 0;
+
+ r = rela->data + i * rela->sec->sh_entsize;
+
+ symndx = ELF64_R_SYM(r->r_info);
+
+ if ( symndx > elf->nsym )
+ {
+ dprintk(XENLOG_ERR, LIVEPATCH "%s: Relative relocation wants symbol@%u which is past end!\n",
+ elf->name, symndx);
+ return -EINVAL;
+ }
+
+ dest = base->load_addr + r->r_offset; /* P */
+ val = elf->sym[symndx].sym->st_value + r->r_addend; /* S+A */
+
+ overflow_check = true;
+
+ /* ARM64 operations at minimum are always 32-bit. */
+ if ( r->r_offset >= base->sec->sh_size ||
+ (r->r_offset + sizeof(uint32_t)) > base->sec->sh_size )
+ goto bad_offset;
+
+ switch ( ELF64_R_TYPE(r->r_info) )
+ {
+ /* Data */
+ case R_AARCH64_ABS64:
+ if ( r->r_offset + sizeof(uint64_t) > base->sec->sh_size )
+ goto bad_offset;
+ overflow_check = false;
+ ovf = reloc_data(RELOC_OP_ABS, dest, val, 64);
+ break;
+
+ case R_AARCH64_ABS32:
+ ovf = reloc_data(RELOC_OP_ABS, dest, val, 32);
+ break;
+
+ case R_AARCH64_ABS16:
+ ovf = reloc_data(RELOC_OP_ABS, dest, val, 16);
+ break;
+
+ case R_AARCH64_PREL64:
+ if ( r->r_offset + sizeof(uint64_t) > base->sec->sh_size )
+ goto bad_offset;
+ overflow_check = false;
+ ovf = reloc_data(RELOC_OP_PREL, dest, val, 64);
+ break;
+
+ case R_AARCH64_PREL32:
+ ovf = reloc_data(RELOC_OP_PREL, dest, val, 32);
+ break;
+
+ case R_AARCH64_PREL16:
+ ovf = reloc_data(RELOC_OP_PREL, dest, val, 16);
+ break;
+
+ /* MOVW instruction relocations. */
+ case R_AARCH64_MOVW_UABS_G0_NC:
+ overflow_check = false;
+ /* Fallthrough. */
+
+ case R_AARCH64_MOVW_UABS_G0:
+ ovf = reloc_insn_movw(RELOC_OP_ABS, dest, val, 0,
+ AARCH64_INSN_IMM_MOVKZ);
+ break;
+
+ case R_AARCH64_MOVW_UABS_G1_NC:
+ overflow_check = false;
+ /* Fallthrough. */
+
+ case R_AARCH64_MOVW_UABS_G1:
+ ovf = reloc_insn_movw(RELOC_OP_ABS, dest, val, 16,
+ AARCH64_INSN_IMM_MOVKZ);
+ break;
+
+ case R_AARCH64_MOVW_UABS_G2_NC:
+ overflow_check = false;
+ /* Fallthrough. */
+
+ case R_AARCH64_MOVW_UABS_G2:
+ ovf = reloc_insn_movw(RELOC_OP_ABS, dest, val, 32,
+ AARCH64_INSN_IMM_MOVKZ);
+ break;
+
+ case R_AARCH64_MOVW_UABS_G3:
+ /* We're using the top bits so we can't overflow. */
+ overflow_check = false;
+ ovf = reloc_insn_movw(RELOC_OP_ABS, dest, val, 48,
+ AARCH64_INSN_IMM_MOVKZ);
+ break;
+
+ case R_AARCH64_MOVW_SABS_G0:
+ ovf = reloc_insn_movw(RELOC_OP_ABS, dest, val, 0,
+ AARCH64_INSN_IMM_MOVNZ);
+ break;
+
+ case R_AARCH64_MOVW_SABS_G1:
+ ovf = reloc_insn_movw(RELOC_OP_ABS, dest, val, 16,
+ AARCH64_INSN_IMM_MOVNZ);
+ break;
+
+ case R_AARCH64_MOVW_SABS_G2:
+ ovf = reloc_insn_movw(RELOC_OP_ABS, dest, val, 32,
+ AARCH64_INSN_IMM_MOVNZ);
+ break;
+
+ case R_AARCH64_MOVW_PREL_G0_NC:
+ overflow_check = false;
+ ovf = reloc_insn_movw(RELOC_OP_PREL, dest, val, 0,
+ AARCH64_INSN_IMM_MOVKZ);
+ break;
+
+ case R_AARCH64_MOVW_PREL_G0:
+ ovf = reloc_insn_movw(RELOC_OP_PREL, dest, val, 0,
+ AARCH64_INSN_IMM_MOVNZ);
+ break;
+
+ case R_AARCH64_MOVW_PREL_G1_NC:
+ overflow_check = false;
+ ovf = reloc_insn_movw(RELOC_OP_PREL, dest, val, 16,
+ AARCH64_INSN_IMM_MOVKZ);
+ break;
+
+ case R_AARCH64_MOVW_PREL_G1:
+ ovf = reloc_insn_movw(RELOC_OP_PREL, dest, val, 16,
+ AARCH64_INSN_IMM_MOVNZ);
+ break;
+
+ case R_AARCH64_MOVW_PREL_G2_NC:
+ overflow_check = false;
+ ovf = reloc_insn_movw(RELOC_OP_PREL, dest, val, 32,
+ AARCH64_INSN_IMM_MOVKZ);
+ break;
+
+ case R_AARCH64_MOVW_PREL_G2:
+ ovf = reloc_insn_movw(RELOC_OP_PREL, dest, val, 32,
+ AARCH64_INSN_IMM_MOVNZ);
+ break;
+
+ case R_AARCH64_MOVW_PREL_G3:
+ /* We're using the top bits so we can't overflow. */
+ overflow_check = false;
+ ovf = reloc_insn_movw(RELOC_OP_PREL, dest, val, 48,
+ AARCH64_INSN_IMM_MOVNZ);
+ break;
+
+ /* Instructions. */
+ case R_AARCH64_ADR_PREL_LO21:
+ ovf = reloc_insn_imm(RELOC_OP_PREL, dest, val, 0, 21,
+ AARCH64_INSN_IMM_ADR);
+ break;
+
+ case R_AARCH64_ADR_PREL_PG_HI21_NC:
+ overflow_check = false;
+ case R_AARCH64_ADR_PREL_PG_HI21:
+ ovf = reloc_insn_imm(RELOC_OP_PAGE, dest, val, 12, 21,
+ AARCH64_INSN_IMM_ADR);
+ break;
+
+ case R_AARCH64_LDST8_ABS_LO12_NC:
+ /* Fallthrough. */
+
+ case R_AARCH64_ADD_ABS_LO12_NC:
+ overflow_check = false;
+ ovf = reloc_insn_imm(RELOC_OP_ABS, dest, val, 0, 12,
+ AARCH64_INSN_IMM_12);
+ break;
+
+ case R_AARCH64_LDST16_ABS_LO12_NC:
+ overflow_check = false;
+ ovf = reloc_insn_imm(RELOC_OP_ABS, dest, val, 1, 11,
+ AARCH64_INSN_IMM_12);
+ break;
+
+ case R_AARCH64_LDST32_ABS_LO12_NC:
+ overflow_check = false;
+ ovf = reloc_insn_imm(RELOC_OP_ABS, dest, val, 2, 10,
+ AARCH64_INSN_IMM_12);
+ break;
+
+ case R_AARCH64_LDST64_ABS_LO12_NC:
+ overflow_check = false;
+ ovf = reloc_insn_imm(RELOC_OP_ABS, dest, val, 3, 9,
+ AARCH64_INSN_IMM_12);
+ break;
+
+ case R_AARCH64_LDST128_ABS_LO12_NC:
+ overflow_check = false;
+ ovf = reloc_insn_imm(RELOC_OP_ABS, dest, val, 4, 8,
+ AARCH64_INSN_IMM_12);
+ break;
+
+ case R_AARCH64_TSTBR14:
+ ovf = reloc_insn_imm(RELOC_OP_PREL, dest, val, 2, 19,
+ AARCH64_INSN_IMM_14);
+ break;
+
+ case R_AARCH64_CONDBR19:
+ ovf = reloc_insn_imm(RELOC_OP_PREL, dest, val, 2, 19,
+ AARCH64_INSN_IMM_19);
+ break;
+
+ case R_AARCH64_JUMP26:
+ case R_AARCH64_CALL26:
+ ovf = reloc_insn_imm(RELOC_OP_PREL, dest, val, 2, 26,
+ AARCH64_INSN_IMM_26);
+ break;
+
+ default:
+ dprintk(XENLOG_ERR, LIVEPATCH "%s: Unhandled relocation %lu\n",
+ elf->name, ELF64_R_TYPE(r->r_info));
+ return -EOPNOTSUPP;
+ }
+
+ if ( overflow_check && ovf == -EOVERFLOW )
+ {
+ dprintk(XENLOG_ERR, LIVEPATCH "%s: Overflow in relocation %u in %s for %s!\n",
+ elf->name, i, rela->name, base->name);
+ return ovf;
+ }
+ }
+ return 0;
+
+ bad_offset:
+ dprintk(XENLOG_ERR, LIVEPATCH "%s: Relative relocation offset is past %s section!\n",
+ elf->name, base->name);
+ return -EINVAL;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
@@ -13,6 +13,7 @@
#include <xen/hypercall.h>
#include <xen/init.h>
#include <xen/lib.h>
+#include <xen/livepatch.h>
#include <xen/sched.h>
#include <xen/softirq.h>
#include <xen/wait.h>
@@ -55,6 +56,11 @@ void idle_loop(void)
do_tasklet();
do_softirq();
+ /*
+ * We MUST be last (or before dsb, wfi). Otherwise after we get the
+ * softirq we would execute dsb,wfi (and sleep) and not patch.
+ */
+ check_for_livepatch_work();
}
}
@@ -6,44 +6,82 @@
#include <xen/lib.h>
#include <xen/livepatch_elf.h>
#include <xen/livepatch.h>
+#include <xen/vmap.h>
+
+#include <asm/livepatch.h>
+#include <asm/mm.h>
+
+void *vmap_of_xen_text;
int arch_livepatch_quiesce(void)
{
- return -ENOSYS;
+ mfn_t text_mfn;
+ unsigned int text_order;
+
+ if ( vmap_of_xen_text )
+ return -EINVAL;
+
+ text_mfn = _mfn(virt_to_mfn(_start));
+ text_order = get_order_from_bytes(_end - _start);
+
+ /*
+ * The text section is read-only. So re-map Xen to be able to patch
+ * the code.
+ */
+ vmap_of_xen_text = __vmap(&text_mfn, 1U << text_order, 1, 1, PAGE_HYPERVISOR,
+ VMAP_DEFAULT);
+
+ if ( !vmap_of_xen_text )
+ {
+ printk(XENLOG_ERR LIVEPATCH "Failed to setup vmap of hypervisor! (order=%u)\n",
+ text_order);
+ return -ENOMEM;
+ }
+
+ return 0;
}
void arch_livepatch_revive(void)
{
+ /*
+ * Nuke the instruction cache. Data cache has been cleaned before in
+ * arch_livepatch_[apply|revert].
+ */
+ invalidate_icache();
+
+ if ( vmap_of_xen_text )
+ vunmap(vmap_of_xen_text);
+
+ vmap_of_xen_text = NULL;
}
int arch_livepatch_verify_func(const struct livepatch_func *func)
{
- return -ENOSYS;
-}
+ /* If NOPing only do up to maximum amount we can put in the ->opaque. */
+ if ( !func->new_addr && (func->new_size > sizeof(func->opaque) ||
+ func->new_size % ARCH_PATCH_INSN_SIZE) )
+ return -EOPNOTSUPP;
-void arch_livepatch_apply(struct livepatch_func *func)
-{
-}
+ if ( func->old_size < ARCH_PATCH_INSN_SIZE )
+ return -EINVAL;
-void arch_livepatch_revert(const struct livepatch_func *func)
-{
+ return 0;
}
void arch_livepatch_post_action(void)
{
+ /* arch_livepatch_revive has nuked the instruction cache. */
}
void arch_livepatch_mask(void)
{
+ /* Mask System Error (SError) */
+ local_abort_disable();
}
void arch_livepatch_unmask(void)
{
-}
-
-int arch_livepatch_verify_elf(const struct livepatch_elf *elf)
-{
- return -ENOSYS;
+ local_abort_enable();
}
int arch_livepatch_perform_rel(struct livepatch_elf *elf,
@@ -53,20 +91,43 @@ int arch_livepatch_perform_rel(struct livepatch_elf *elf,
return -ENOSYS;
}
-int arch_livepatch_perform_rela(struct livepatch_elf *elf,
- const struct livepatch_elf_sec *base,
- const struct livepatch_elf_sec *rela)
-{
- return -ENOSYS;
-}
-
int arch_livepatch_secure(const void *va, unsigned int pages, enum va_type type)
{
- return -ENOSYS;
+ unsigned long start = (unsigned long)va;
+ unsigned int flags = 0;
+
+ ASSERT(va);
+ ASSERT(pages);
+
+ switch ( type )
+ {
+ case LIVEPATCH_VA_RX:
+ flags = PTE_RO; /* R set, NX clear */
+ break;
+
+ case LIVEPATCH_VA_RW:
+ flags = PTE_NX; /* R clear, NX set */
+ break;
+
+ case LIVEPATCH_VA_RO:
+ flags = PTE_NX | PTE_RO; /* R set, NX set */
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return modify_xen_mappings(start, start + pages * PAGE_SIZE, flags);
}
void __init arch_livepatch_init(void)
{
+ void *start, *end;
+
+ start = (void *)LIVEPATCH_VMAP_START;
+ end = (void *)LIVEPATCH_VMAP_END;
+
+ vm_init_type(VMAP_XEN, start, end);
}
/*
@@ -24,6 +24,7 @@
#include <xen/symbols.h>
#include <xen/irq.h>
#include <xen/lib.h>
+#include <xen/livepatch.h>
#include <xen/mm.h>
#include <xen/errno.h>
#include <xen/hypercall.h>
@@ -2689,6 +2690,11 @@ asmlinkage void leave_hypervisor_tail(void)
}
local_irq_enable();
do_softirq();
+ /*
+ * Must be the last one - as the IPI will trigger us to come here
+ * and we want to patch the hypervisor with almost no stack.
+ */
+ check_for_livepatch_work();
}
}
@@ -217,7 +217,7 @@ config CRYPTO
config LIVEPATCH
bool "Live patching support (TECH PREVIEW)"
default n
- depends on X86 && HAS_BUILD_ID = "y"
+ depends on !ARM_32 && HAS_BUILD_ID = "y"
---help---
Allows a running Xen hypervisor to be dynamically patched using
binary patches without rebooting. This is primarily used to binarily
@@ -80,6 +80,7 @@
* 4M - 6M Fixmap: special-purpose 4K mapping slots
* 6M - 8M Early boot mapping of FDT
* 8M - 10M Early relocation address (used when relocating Xen)
+ * and later for livepatch vmap (if compiled in)
*
* ARM32 layout:
* 0 - 10M <COMMON>
@@ -113,6 +114,10 @@
#define FIXMAP_ADDR(n) (_AT(vaddr_t,0x00400000) + (n) * PAGE_SIZE)
#define BOOT_FDT_VIRT_START _AT(vaddr_t,0x00600000)
#define BOOT_RELOC_VIRT_START _AT(vaddr_t,0x00800000)
+#ifdef CONFIG_LIVEPATCH
+#define LIVEPATCH_VMAP_START _AT(vaddr_t,0x00800000)
+#define LIVEPATCH_VMAP_END (LIVEPATCH_VMAP_START + MB(2))
+#endif
#define HYPERVISOR_VIRT_START XEN_VIRT_START
new file mode 100644
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2016 Oracle and/or its affiliates. All rights reserved.
+ *
+ */
+
+#ifndef __XEN_ARM_LIVEPATCH_H__
+#define __XEN_ARM_LIVEPATCH_H__
+
+/* On ARM32,64 instructions are always 4 bytes long. */
+#define ARCH_PATCH_INSN_SIZE 4
+
+/*
+ * The va of the hypervisor .text region. We need this as the
+ * normal va are write protected.
+ */
+extern void *vmap_of_xen_text;
+
+#endif /* __XEN_ARM_LIVEPATCH_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
@@ -177,6 +177,7 @@ typedef struct {
#define EM_IA_64 50 /* Intel Merced */
#define EM_X86_64 62 /* AMD x86-64 architecture */
#define EM_VAX 75 /* DEC VAX */
+#define EM_AARCH64 183 /* ARM 64-bit */
/* Version */
#define EV_NONE 0 /* Invalid */
@@ -353,12 +354,66 @@ typedef struct {
#define ELF64_R_TYPE(info) ((info) & 0xFFFFFFFF)
#define ELF64_R_INFO(s,t) (((s) << 32) + (u_int32_t)(t))
-/* x86-64 relocation types. We list only the ones Live Patch implements. */
+/*
+ * Relocation types for x86_64 and ARM 64. We list only the ones Live Patch
+ * implements.
+ */
#define R_X86_64_NONE 0 /* No reloc */
#define R_X86_64_64 1 /* Direct 64 bit */
#define R_X86_64_PC32 2 /* PC relative 32 bit signed */
#define R_X86_64_PLT32 4 /* 32 bit PLT address */
+/*
+ * S - address of symbol.
+ * A - addend for relocation (r_addend)
+ * P - address of the dest being relocated (derieved from r_offset)
+ * NC - No check for overflow.
+ *
+ * The defines also use _PREL for PC-relative address, and _NC is No Check.
+ */
+#define R_AARCH64_ABS64 257 /* Direct 64 bit. S+A, NC*/
+#define R_AARCH64_ABS32 258 /* Direct 32 bit. S+A */
+#define R_AARCH64_ABS16 259 /* Direct 16 bit, S+A */
+#define R_AARCH64_PREL64 260 /* S+A-P, NC */
+#define R_AARCH64_PREL32 261 /* S+A-P */
+#define R_AARCH64_PREL16 262 /* S+A-P */
+
+/* Instructions. */
+#define R_AARCH64_MOVW_UABS_G0 263
+#define R_AARCH64_MOVW_UABS_G0_NC 264
+#define R_AARCH64_MOVW_UABS_G1 265
+#define R_AARCH64_MOVW_UABS_G1_NC 266
+#define R_AARCH64_MOVW_UABS_G2 267
+#define R_AARCH64_MOVW_UABS_G2_NC 268
+#define R_AARCH64_MOVW_UABS_G3 269
+
+#define R_AARCH64_MOVW_SABS_G0 270
+#define R_AARCH64_MOVW_SABS_G1 271
+#define R_AARCH64_MOVW_SABS_G2 272
+
+#define R_AARCH64_ADR_PREL_LO21 274 /* ADR imm, [20:0]. S+A-P */
+#define R_AARCH64_ADR_PREL_PG_HI21 275 /* ADRP imm, [32:12]. Page(S+A) - Page(P).*/
+#define R_AARCH64_ADR_PREL_PG_HI21_NC 276
+#define R_AARCH64_ADD_ABS_LO12_NC 277 /* ADD imm. [11:0]. S+A, NC */
+
+#define R_AARCH64_TSTBR14 279
+#define R_AARCH64_CONDBR19 280 /* Bits 20:2, S+A-P */
+#define R_AARCH64_JUMP26 282 /* Bits 27:2, S+A-P */
+#define R_AARCH64_CALL26 283 /* Bits 27:2, S+A-P */
+#define R_AARCH64_LDST16_ABS_LO12_NC 284 /* LD/ST to bits 11:1, S+A, NC */
+#define R_AARCH64_LDST32_ABS_LO12_NC 285 /* LD/ST to bits 11:2, S+A, NC */
+#define R_AARCH64_LDST64_ABS_LO12_NC 286 /* LD/ST to bits 11:3, S+A, NC */
+#define R_AARCH64_LDST8_ABS_LO12_NC 278 /* LD/ST to bits 11:0, S+A, NC */
+#define R_AARCH64_LDST128_ABS_LO12_NC 299
+
+#define R_AARCH64_MOVW_PREL_G0 287
+#define R_AARCH64_MOVW_PREL_G0_NC 288
+#define R_AARCH64_MOVW_PREL_G1 289
+#define R_AARCH64_MOVW_PREL_G1_NC 290
+#define R_AARCH64_MOVW_PREL_G2 291
+#define R_AARCH64_MOVW_PREL_G2_NC 292
+#define R_AARCH64_MOVW_PREL_G3 293
+
/* Program Header */
typedef struct {
Elf32_Word p_type; /* segment type */
@@ -14,6 +14,15 @@
#define NULL ((void*)0)
#endif
+#define INT16_MIN (-32767-1)
+#define INT32_MIN (-2147483647-1)
+
+#define INT16_MAX (32767)
+#define INT32_MAX (2147483647)
+
+#define UINT16_MAX (65535)
+#define UINT32_MAX (4294967295U)
+
#define INT_MAX ((int)(~0U>>1))
#define INT_MIN (-INT_MAX - 1)
#define UINT_MAX (~0U)