@@ -6,6 +6,7 @@
#include <xen/lib.h>
#include <xen/livepatch_elf.h>
#include <xen/livepatch.h>
+#include <xen/pfn.h>
#include <xen/vmap.h>
#include <asm/cpufeature.h>
@@ -17,13 +18,15 @@
#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
void *vmap_of_xen_text;
+struct livepatch_va livepatch_stash;
-int arch_livepatch_quiesce(void)
+int arch_livepatch_quiesce(struct livepatch_func *func, size_t nfuncs)
{
mfn_t text_mfn;
unsigned int text_order;
+ int rc = 0;
- if ( vmap_of_xen_text )
+ if ( vmap_of_xen_text || livepatch_stash.va )
return -EINVAL;
text_mfn = virt_to_mfn(_start);
@@ -43,7 +46,29 @@ int arch_livepatch_quiesce(void)
return -ENOMEM;
}
- return 0;
+ if ( nfuncs )
+ {
+ unsigned long va = (unsigned long)func;
+ unsigned int offs = va & (PAGE_SIZE - 1);
+ unsigned int pages = PFN_UP(offs + nfuncs * sizeof(*func));
+
+ va &= PAGE_MASK;
+
+ rc = modify_xen_mappings(va, va + (pages * PAGE_SIZE), PTE_NX);
+ if ( rc )
+ {
+ printk(XENLOG_ERR LIVEPATCH "Failed to modify 0x%lx to RW\n", va);
+ vunmap(vmap_of_xen_text);
+ vmap_of_xen_text = NULL;
+ }
+ else
+ {
+ livepatch_stash.va = va;
+ livepatch_stash.pages = pages;
+ }
+ }
+
+ return rc;
}
void arch_livepatch_revive(void)
@@ -58,6 +83,20 @@ void arch_livepatch_revive(void)
vunmap(vmap_of_xen_text);
vmap_of_xen_text = NULL;
+
+ if ( livepatch_stash.va )
+ {
+ unsigned long va = livepatch_stash.va;
+
+ int rc = modify_xen_mappings(va, va + (livepatch_stash.pages * PAGE_SIZE),
+ PTE_NX | PTE_RO);
+ if ( rc )
+ printk(XENLOG_ERR LIVEPATCH "Failed to modify %lx to RO!\n", va);
+
+ livepatch_stash.va = 0;
+ livepatch_stash.pages = 0;
+
+ }
}
int arch_livepatch_verify_func(const struct livepatch_func *func)
@@ -14,7 +14,7 @@
#include <asm/nmi.h>
#include <asm/livepatch.h>
-int arch_livepatch_quiesce(void)
+int arch_livepatch_quiesce(struct livepatch_func *func, size_t nfuncs)
{
/* Disable WP to allow changes to read-only pages. */
write_cr0(read_cr0() & ~X86_CR0_WP);
@@ -56,6 +56,7 @@ struct payload {
int32_t rc; /* 0 or -XEN_EXX. */
bool reverted; /* Whether it was reverted. */
bool safe_to_reapply; /* Can apply safely after revert. */
+ bool funcs_ro; /* Are livepatch.funcs in .rodata section. */
struct list_head list; /* Linked to 'payload_list'. */
const void *text_addr; /* Virtual address of .text. */
size_t text_size; /* .. and its size. */
@@ -538,6 +539,10 @@ static int prepare_payload(struct payload *payload,
payload->funcs = sec->load_addr;
payload->nfuncs = sec->sec->sh_size / sizeof(*payload->funcs);
+ if ( sec->load_addr >= payload->ro_addr &&
+ sec->load_addr < (payload->ro_addr + payload->ro_size) )
+ payload->funcs_ro = true;
+
for ( i = 0; i < payload->nfuncs; i++ )
{
int rc;
@@ -1070,7 +1075,7 @@ static int apply_payload(struct payload *data)
printk(XENLOG_INFO LIVEPATCH "%s: Applying %u functions\n",
data->name, data->nfuncs);
- rc = arch_livepatch_quiesce();
+ rc = arch_livepatch_quiesce(data->funcs, data->funcs_ro ? data->nfuncs : 0);
if ( rc )
{
printk(XENLOG_ERR LIVEPATCH "%s: unable to quiesce!\n", data->name);
@@ -1111,7 +1116,7 @@ static int revert_payload(struct payload *data)
printk(XENLOG_INFO LIVEPATCH "%s: Reverting\n", data->name);
- rc = arch_livepatch_quiesce();
+ rc = arch_livepatch_quiesce(data->funcs, data->funcs_ro ? data->nfuncs : 0);
if ( rc )
{
printk(XENLOG_ERR LIVEPATCH "%s: unable to quiesce!\n", data->name);
@@ -17,6 +17,19 @@
*/
extern void *vmap_of_xen_text;
+/*
+ * The va of the livepatch .livepatch.funcs. Only used if said
+ * region is in RO VA and we need to modify it to RW during
+ * livepatching.
+ */
+struct livepatch_va
+{
+ unsigned long va;
+ unsigned int pages;
+};
+
+extern struct livepatch_va livepatch_stash;
+
/* These ranges are only for unconditional branches. */
#ifdef CONFIG_ARM_32
/* ARM32: A4.3 IN ARM DDI 0406C.c - we are using only ARM instructions in Xen.*/
@@ -104,7 +104,7 @@ static inline int livepatch_verify_distance(const struct livepatch_func *func)
* These functions are called around the critical region patching live code,
* for an architecture to take make appropratie global state adjustments.
*/
-int arch_livepatch_quiesce(void);
+int arch_livepatch_quiesce(struct livepatch_func *, size_t nfunc);
void arch_livepatch_revive(void);
void arch_livepatch_apply(struct livepatch_func *func);
This was found when porting livepatch-build-tools to ARM64/32 When livepatch-build-tools are built (and test-case thanks to: livepatch/tests: Make sure all .livepatch.funcs sections are read-only) the .livepatch.funcs are in read-only section. However the hypervisor uses the 'opaque' for its own purpose, that is stashing the original code. But the .livepatch_funcs section is in the RO vmap area so on ARM[32,64] we get a fault (as the secure_payload changes the VA to RO). On x86 the same protection is in place. In 'arch_livepatch_quiesce' we disable WP to allow changes to read-only pages (and in arch_livepatch_revive we re-enable the WP protection). On ARM[32,64] we do not have the luxury of a global register that can be changed after boot. In lieu of that we modify the VA of where the .livepatch.funcs is to RW. After we are done with patching we change it back to RO. To do this we need to stash during livepatching the va of the page in which the .livepatch.func resides, and the number of pages said section uses (or 1 at min). Equipped with that we can patch livepatch functions which have .livepatch_funcs in rodata section. An alternative is to add the 'W' flag during loading of the .livepatch_funcs which would result the section being in writeable region from the gecko. Suggested-by: Julien Grall <julien.grall@arm.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> --- Cc: Ross Lagerwall <ross.lagerwall@citrix.com> v4: First posting --- xen/arch/arm/livepatch.c | 45 ++++++++++++++++++++++++++++++++++++++--- xen/arch/x86/livepatch.c | 2 +- xen/common/livepatch.c | 9 +++++++-- xen/include/asm-arm/livepatch.h | 13 ++++++++++++ xen/include/xen/livepatch.h | 2 +- 5 files changed, 64 insertions(+), 7 deletions(-)