diff mbox

[v4,10/11] livepatch/arm[32, 64]: Modify .livepatch.funcs section to be RW when patching

Message ID 20170920223148.13137-11-konrad.wilk@oracle.com (mailing list archive)
State New, archived
Headers show

Commit Message

Konrad Rzeszutek Wilk Sept. 20, 2017, 10:31 p.m. UTC
This was found when porting livepatch-build-tools to ARM64/32

When livepatch-build-tools are built (and test-case thanks to:
livepatch/tests: Make sure all .livepatch.funcs sections are read-only)
the .livepatch.funcs are in read-only section.

However the hypervisor uses the 'opaque' for its own purpose, that
is stashing the original code. But the .livepatch_funcs section is
in the RO vmap area so on ARM[32,64] we get a fault (as the
secure_payload changes the VA to RO).

On x86 the same protection is in place. In 'arch_livepatch_quiesce'
we disable WP to allow changes to read-only pages (and in arch_livepatch_revive
we re-enable the WP protection).

On ARM[32,64] we do not have the luxury of a global register that can
be changed after boot. In lieu of that we modify the VA of where
the .livepatch.funcs is to RW. After we are done with patching
we change it back to RO.

To do this we need to stash during livepatching the va of the page
in which the .livepatch.func resides, and the number of pages said
section uses (or 1 at min).

Equipped with that we can patch livepatch functions which have
.livepatch_funcs in rodata section.

An alternative is to add the 'W' flag during loading of the
.livepatch_funcs which would result the section being in writeable
region from the gecko.

Suggested-by: Julien Grall <julien.grall@arm.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
---
Cc: Ross Lagerwall <ross.lagerwall@citrix.com>

v4: First posting
---
 xen/arch/arm/livepatch.c        | 45 ++++++++++++++++++++++++++++++++++++++---
 xen/arch/x86/livepatch.c        |  2 +-
 xen/common/livepatch.c          |  9 +++++++--
 xen/include/asm-arm/livepatch.h | 13 ++++++++++++
 xen/include/xen/livepatch.h     |  2 +-
 5 files changed, 64 insertions(+), 7 deletions(-)

Comments

Jan Beulich Sept. 21, 2017, 12:16 p.m. UTC | #1
>>> On 21.09.17 at 00:31, <konrad@kernel.org> wrote:
> @@ -43,7 +46,29 @@ int arch_livepatch_quiesce(void)
>          return -ENOMEM;
>      }
>  
> -    return 0;
> +    if ( nfuncs )
> +    {
> +        unsigned long va = (unsigned long)func;
> +        unsigned int offs = va & (PAGE_SIZE - 1);
> +        unsigned int pages = PFN_UP(offs + nfuncs * sizeof(*func));
> +
> +        va &= PAGE_MASK;
> +
> +        rc = modify_xen_mappings(va, va + (pages * PAGE_SIZE), PTE_NX);
> +        if ( rc )
> +        {
> +            printk(XENLOG_ERR LIVEPATCH "Failed to modify 0x%lx to RW\n", va);

%#lx ?

> +            vunmap(vmap_of_xen_text);
> +            vmap_of_xen_text = NULL;
> +        }
> +        else
> +        {
> +            livepatch_stash.va = va;
> +            livepatch_stash.pages = pages;
> +        }
> +    }

You're effectively doing all this behind the back of vmalloc() / vmap();
I'm not sure this is a good idea, but I'm also not a maintainer of this
code.

Jan
Julien Grall Sept. 21, 2017, 2:58 p.m. UTC | #2
Hi Jan,

On 21/09/17 13:16, Jan Beulich wrote:
>>>> On 21.09.17 at 00:31, <konrad@kernel.org> wrote:
>> @@ -43,7 +46,29 @@ int arch_livepatch_quiesce(void)
>>           return -ENOMEM;
>>       }
>>   
>> -    return 0;
>> +    if ( nfuncs )
>> +    {
>> +        unsigned long va = (unsigned long)func;
>> +        unsigned int offs = va & (PAGE_SIZE - 1);
>> +        unsigned int pages = PFN_UP(offs + nfuncs * sizeof(*func));
>> +
>> +        va &= PAGE_MASK;
>> +
>> +        rc = modify_xen_mappings(va, va + (pages * PAGE_SIZE), PTE_NX);
>> +        if ( rc )
>> +        {
>> +            printk(XENLOG_ERR LIVEPATCH "Failed to modify 0x%lx to RW\n", va);
> 
> %#lx ?
> 
>> +            vunmap(vmap_of_xen_text);
>> +            vmap_of_xen_text = NULL;
>> +        }
>> +        else
>> +        {
>> +            livepatch_stash.va = va;
>> +            livepatch_stash.pages = pages;
>> +        }
>> +    }
> 
> You're effectively doing all this behind the back of vmalloc() / vmap();
> I'm not sure this is a good idea, but I'm also not a maintainer of this
> code.

We already have place in the code (both x86 and Arm) modifying memory 
attributes on the back of vmalloc/vmap. See arch_livepatch_secure for 
instance.

I suggested this solution because it avoids to create a temporary 
mapping for the .livepatch.funcs section.

Do you foresee potentially issue of temporarily modifying permissions of 
a mapping?

Cheers,
Jan Beulich Sept. 21, 2017, 3:09 p.m. UTC | #3
>>> On 21.09.17 at 16:58, <julien.grall@arm.com> wrote:
> Do you foresee potentially issue of temporarily modifying permissions of 
> a mapping?

It's generally not a good idea imo, but perhaps it's fine here. I
assume the page permissions can't be adversely affected despite
their hard coding in the function invocations.

Jan
diff mbox

Patch

diff --git a/xen/arch/arm/livepatch.c b/xen/arch/arm/livepatch.c
index 76723f1f1a..f23df8597d 100644
--- a/xen/arch/arm/livepatch.c
+++ b/xen/arch/arm/livepatch.c
@@ -6,6 +6,7 @@ 
 #include <xen/lib.h>
 #include <xen/livepatch_elf.h>
 #include <xen/livepatch.h>
+#include <xen/pfn.h>
 #include <xen/vmap.h>
 
 #include <asm/cpufeature.h>
@@ -17,13 +18,15 @@ 
 #define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
 
 void *vmap_of_xen_text;
+struct livepatch_va livepatch_stash;
 
-int arch_livepatch_quiesce(void)
+int arch_livepatch_quiesce(struct livepatch_func *func, size_t nfuncs)
 {
     mfn_t text_mfn;
     unsigned int text_order;
+    int rc = 0;
 
-    if ( vmap_of_xen_text )
+    if ( vmap_of_xen_text || livepatch_stash.va )
         return -EINVAL;
 
     text_mfn = virt_to_mfn(_start);
@@ -43,7 +46,29 @@  int arch_livepatch_quiesce(void)
         return -ENOMEM;
     }
 
-    return 0;
+    if ( nfuncs )
+    {
+        unsigned long va = (unsigned long)func;
+        unsigned int offs = va & (PAGE_SIZE - 1);
+        unsigned int pages = PFN_UP(offs + nfuncs * sizeof(*func));
+
+        va &= PAGE_MASK;
+
+        rc = modify_xen_mappings(va, va + (pages * PAGE_SIZE), PTE_NX);
+        if ( rc )
+        {
+            printk(XENLOG_ERR LIVEPATCH "Failed to modify 0x%lx to RW\n", va);
+            vunmap(vmap_of_xen_text);
+            vmap_of_xen_text = NULL;
+        }
+        else
+        {
+            livepatch_stash.va = va;
+            livepatch_stash.pages = pages;
+        }
+    }
+
+    return rc;
 }
 
 void arch_livepatch_revive(void)
@@ -58,6 +83,20 @@  void arch_livepatch_revive(void)
         vunmap(vmap_of_xen_text);
 
     vmap_of_xen_text = NULL;
+
+    if ( livepatch_stash.va )
+    {
+        unsigned long va = livepatch_stash.va;
+
+        int rc = modify_xen_mappings(va, va + (livepatch_stash.pages * PAGE_SIZE),
+                                     PTE_NX | PTE_RO);
+        if ( rc )
+            printk(XENLOG_ERR LIVEPATCH "Failed to modify %lx to RO!\n", va);
+
+        livepatch_stash.va = 0;
+        livepatch_stash.pages = 0;
+
+    }
 }
 
 int arch_livepatch_verify_func(const struct livepatch_func *func)
diff --git a/xen/arch/x86/livepatch.c b/xen/arch/x86/livepatch.c
index 48d20fdacd..f5865370d5 100644
--- a/xen/arch/x86/livepatch.c
+++ b/xen/arch/x86/livepatch.c
@@ -14,7 +14,7 @@ 
 #include <asm/nmi.h>
 #include <asm/livepatch.h>
 
-int arch_livepatch_quiesce(void)
+int arch_livepatch_quiesce(struct livepatch_func *func, size_t nfuncs)
 {
     /* Disable WP to allow changes to read-only pages. */
     write_cr0(read_cr0() & ~X86_CR0_WP);
diff --git a/xen/common/livepatch.c b/xen/common/livepatch.c
index f736c3a7ea..af8998ec8d 100644
--- a/xen/common/livepatch.c
+++ b/xen/common/livepatch.c
@@ -56,6 +56,7 @@  struct payload {
     int32_t rc;                          /* 0 or -XEN_EXX. */
     bool reverted;                       /* Whether it was reverted. */
     bool safe_to_reapply;                /* Can apply safely after revert. */
+    bool funcs_ro;                       /* Are livepatch.funcs in .rodata section. */
     struct list_head list;               /* Linked to 'payload_list'. */
     const void *text_addr;               /* Virtual address of .text. */
     size_t text_size;                    /* .. and its size. */
@@ -538,6 +539,10 @@  static int prepare_payload(struct payload *payload,
     payload->funcs = sec->load_addr;
     payload->nfuncs = sec->sec->sh_size / sizeof(*payload->funcs);
 
+    if ( sec->load_addr >= payload->ro_addr &&
+         sec->load_addr < (payload->ro_addr + payload->ro_size) )
+        payload->funcs_ro = true;
+
     for ( i = 0; i < payload->nfuncs; i++ )
     {
         int rc;
@@ -1070,7 +1075,7 @@  static int apply_payload(struct payload *data)
     printk(XENLOG_INFO LIVEPATCH "%s: Applying %u functions\n",
             data->name, data->nfuncs);
 
-    rc = arch_livepatch_quiesce();
+    rc = arch_livepatch_quiesce(data->funcs, data->funcs_ro ? data->nfuncs : 0);
     if ( rc )
     {
         printk(XENLOG_ERR LIVEPATCH "%s: unable to quiesce!\n", data->name);
@@ -1111,7 +1116,7 @@  static int revert_payload(struct payload *data)
 
     printk(XENLOG_INFO LIVEPATCH "%s: Reverting\n", data->name);
 
-    rc = arch_livepatch_quiesce();
+    rc = arch_livepatch_quiesce(data->funcs, data->funcs_ro ? data->nfuncs : 0);
     if ( rc )
     {
         printk(XENLOG_ERR LIVEPATCH "%s: unable to quiesce!\n", data->name);
diff --git a/xen/include/asm-arm/livepatch.h b/xen/include/asm-arm/livepatch.h
index 6bca79deb9..cd13ca786d 100644
--- a/xen/include/asm-arm/livepatch.h
+++ b/xen/include/asm-arm/livepatch.h
@@ -17,6 +17,19 @@ 
  */
 extern void *vmap_of_xen_text;
 
+/*
+ * The va of the livepatch .livepatch.funcs. Only used if said
+ * region is in RO VA and we need to modify it to RW during
+ * livepatching.
+ */
+struct livepatch_va
+{
+    unsigned long va;
+    unsigned int pages;
+};
+
+extern struct livepatch_va livepatch_stash;
+
 /* These ranges are only for unconditional branches. */
 #ifdef CONFIG_ARM_32
 /* ARM32: A4.3 IN ARM DDI 0406C.c -  we are using only ARM instructions in Xen.*/
diff --git a/xen/include/xen/livepatch.h b/xen/include/xen/livepatch.h
index e9bab87f28..4aceaab637 100644
--- a/xen/include/xen/livepatch.h
+++ b/xen/include/xen/livepatch.h
@@ -104,7 +104,7 @@  static inline int livepatch_verify_distance(const struct livepatch_func *func)
  * These functions are called around the critical region patching live code,
  * for an architecture to take make appropratie global state adjustments.
  */
-int arch_livepatch_quiesce(void);
+int arch_livepatch_quiesce(struct livepatch_func *, size_t nfunc);
 void arch_livepatch_revive(void);
 
 void arch_livepatch_apply(struct livepatch_func *func);