diff mbox series

[v11,13/20] target/riscv: mmu changes for zicfiss shadow stack protection

Message ID 20240828174739.714313-14-debug@rivosinc.com (mailing list archive)
State New, archived
Headers show
Series riscv support for control flow integrity extensions | expand

Commit Message

Deepak Gupta Aug. 28, 2024, 5:47 p.m. UTC
zicfiss protects shadow stack using new page table encodings PTE.W=1,
PTE.R=0 and PTE.X=0. This encoding is reserved if zicfiss is not
implemented or if shadow stack are not enabled.
Loads on shadow stack memory are allowed while stores to shadow stack
memory leads to access faults. Shadow stack accesses to RO memory
leads to store page fault.

To implement special nature of shadow stack memory where only selected
stores (shadow stack stores from sspush) have to be allowed while rest
of regular stores disallowed, new MMU TLB index is created for shadow
stack.

Signed-off-by: Deepak Gupta <debug@rivosinc.com>
Suggested-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
---
 target/riscv/cpu_helper.c | 37 +++++++++++++++++++++++++++++++------
 target/riscv/internals.h  |  3 +++
 2 files changed, 34 insertions(+), 6 deletions(-)

Comments

Alistair Francis Aug. 28, 2024, 11:29 p.m. UTC | #1
On Thu, Aug 29, 2024 at 3:49 AM Deepak Gupta <debug@rivosinc.com> wrote:
>
> zicfiss protects shadow stack using new page table encodings PTE.W=1,
> PTE.R=0 and PTE.X=0. This encoding is reserved if zicfiss is not
> implemented or if shadow stack are not enabled.
> Loads on shadow stack memory are allowed while stores to shadow stack
> memory leads to access faults. Shadow stack accesses to RO memory
> leads to store page fault.
>
> To implement special nature of shadow stack memory where only selected
> stores (shadow stack stores from sspush) have to be allowed while rest
> of regular stores disallowed, new MMU TLB index is created for shadow
> stack.
>
> Signed-off-by: Deepak Gupta <debug@rivosinc.com>
> Suggested-by: Richard Henderson <richard.henderson@linaro.org>
> Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
> ---
>  target/riscv/cpu_helper.c | 37 +++++++++++++++++++++++++++++++------
>  target/riscv/internals.h  |  3 +++
>  2 files changed, 34 insertions(+), 6 deletions(-)
>
> diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
> index be4ac3d54e..39544cade6 100644
> --- a/target/riscv/cpu_helper.c
> +++ b/target/riscv/cpu_helper.c
> @@ -893,6 +893,8 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
>      hwaddr ppn;
>      int napot_bits = 0;
>      target_ulong napot_mask;
> +    bool is_sstack_idx = ((mmu_idx & MMU_IDX_SS_WRITE) == MMU_IDX_SS_WRITE);
> +    bool sstack_page = false;
>
>      /*
>       * Check if we should use the background registers for the two
> @@ -1101,21 +1103,36 @@ restart:
>          return TRANSLATE_FAIL;
>      }
>
> +    target_ulong rwx = pte & (PTE_R | PTE_W | PTE_X);
>      /* Check for reserved combinations of RWX flags. */
> -    switch (pte & (PTE_R | PTE_W | PTE_X)) {
> -    case PTE_W:
> +    switch (rwx) {
>      case PTE_W | PTE_X:
>          return TRANSLATE_FAIL;
> +    case PTE_W:
> +        /* if bcfi enabled, PTE_W is not reserved and shadow stack page */
> +        if (cpu_get_bcfien(env) && first_stage) {
> +            sstack_page = true;
> +            /* if ss index, read and write allowed. else only read allowed */
> +            rwx = is_sstack_idx ? PTE_R | PTE_W : PTE_R;
> +            break;
> +        }
> +        return TRANSLATE_FAIL;
> +    case PTE_R:
> +        /* shadow stack writes to readonly memory are page faults */
> +        if (is_sstack_idx && access_type == MMU_DATA_STORE) {
> +            return TRANSLATE_FAIL;
> +        }
> +        break;
>      }
>
>      int prot = 0;
> -    if (pte & PTE_R) {
> +    if (rwx & PTE_R) {
>          prot |= PAGE_READ;
>      }
> -    if (pte & PTE_W) {
> +    if (rwx & PTE_W) {
>          prot |= PAGE_WRITE;
>      }
> -    if (pte & PTE_X) {
> +    if (rwx & PTE_X) {
>          bool mxr = false;
>
>          /*
> @@ -1160,7 +1177,7 @@ restart:
>
>      if (!((prot >> access_type) & 1)) {
>          /* Access check failed */
> -        return TRANSLATE_FAIL;
> +        return sstack_page ? TRANSLATE_PMP_FAIL : TRANSLATE_FAIL;

Why is it a PMP error if it's a shadow stack page?

Alistair
Deepak Gupta Aug. 28, 2024, 11:45 p.m. UTC | #2
On Thu, Aug 29, 2024 at 09:29:49AM +1000, Alistair Francis wrote:
>On Thu, Aug 29, 2024 at 3:49 AM Deepak Gupta <debug@rivosinc.com> wrote:
>>
>> zicfiss protects shadow stack using new page table encodings PTE.W=1,
>> PTE.R=0 and PTE.X=0. This encoding is reserved if zicfiss is not
>> implemented or if shadow stack are not enabled.
>> Loads on shadow stack memory are allowed while stores to shadow stack
>> memory leads to access faults. Shadow stack accesses to RO memory
>> leads to store page fault.
>>
>> To implement special nature of shadow stack memory where only selected
>> stores (shadow stack stores from sspush) have to be allowed while rest
>> of regular stores disallowed, new MMU TLB index is created for shadow
>> stack.
>>
>> Signed-off-by: Deepak Gupta <debug@rivosinc.com>
>> Suggested-by: Richard Henderson <richard.henderson@linaro.org>
>> Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
>> ---
>>  target/riscv/cpu_helper.c | 37 +++++++++++++++++++++++++++++++------
>>  target/riscv/internals.h  |  3 +++
>>  2 files changed, 34 insertions(+), 6 deletions(-)
>>
>> diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
>> index be4ac3d54e..39544cade6 100644
>> --- a/target/riscv/cpu_helper.c
>> +++ b/target/riscv/cpu_helper.c
>> @@ -893,6 +893,8 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
>>      hwaddr ppn;
>>      int napot_bits = 0;
>>      target_ulong napot_mask;
>> +    bool is_sstack_idx = ((mmu_idx & MMU_IDX_SS_WRITE) == MMU_IDX_SS_WRITE);
>> +    bool sstack_page = false;
>>
>>      /*
>>       * Check if we should use the background registers for the two
>> @@ -1101,21 +1103,36 @@ restart:
>>          return TRANSLATE_FAIL;
>>      }
>>
>> +    target_ulong rwx = pte & (PTE_R | PTE_W | PTE_X);
>>      /* Check for reserved combinations of RWX flags. */
>> -    switch (pte & (PTE_R | PTE_W | PTE_X)) {
>> -    case PTE_W:
>> +    switch (rwx) {
>>      case PTE_W | PTE_X:
>>          return TRANSLATE_FAIL;
>> +    case PTE_W:
>> +        /* if bcfi enabled, PTE_W is not reserved and shadow stack page */
>> +        if (cpu_get_bcfien(env) && first_stage) {
>> +            sstack_page = true;
>> +            /* if ss index, read and write allowed. else only read allowed */
>> +            rwx = is_sstack_idx ? PTE_R | PTE_W : PTE_R;
>> +            break;
>> +        }
>> +        return TRANSLATE_FAIL;
>> +    case PTE_R:
>> +        /* shadow stack writes to readonly memory are page faults */
>> +        if (is_sstack_idx && access_type == MMU_DATA_STORE) {

While responding to your question, I noticed there is a bug here. Its a leftover from
previous patches where I was promoting shadow stack loads to stores. No need to check
`access_type == MMU_DATA_STORE` because we store unwind information as part of tcg
compile.

Will fix it.

>> +            return TRANSLATE_FAIL;
>> +        }
>> +        break;
>>      }
>>
>>      int prot = 0;
>> -    if (pte & PTE_R) {
>> +    if (rwx & PTE_R) {
>>          prot |= PAGE_READ;
>>      }
>> -    if (pte & PTE_W) {
>> +    if (rwx & PTE_W) {
>>          prot |= PAGE_WRITE;
>>      }
>> -    if (pte & PTE_X) {
>> +    if (rwx & PTE_X) {
>>          bool mxr = false;
>>
>>          /*
>> @@ -1160,7 +1177,7 @@ restart:
>>
>>      if (!((prot >> access_type) & 1)) {
>>          /* Access check failed */
>> -        return TRANSLATE_FAIL;
>> +        return sstack_page ? TRANSLATE_PMP_FAIL : TRANSLATE_FAIL;
>
>Why is it a PMP error if it's a shadow stack page?

A shadow stack page is readable by regular loads.
We are making sure of that in `case PTE_W` in above switch case.
But shadow stack page is not writeable via regular stores. And must raise
access fault. return code `TRANSLATE_PMP_FAIL` is translated to access fault
while raising fault.

>
>Alistair
Alistair Francis Aug. 29, 2024, 12:03 a.m. UTC | #3
On Thu, Aug 29, 2024 at 9:45 AM Deepak Gupta <debug@rivosinc.com> wrote:
>
> On Thu, Aug 29, 2024 at 09:29:49AM +1000, Alistair Francis wrote:
> >On Thu, Aug 29, 2024 at 3:49 AM Deepak Gupta <debug@rivosinc.com> wrote:
> >>
> >> zicfiss protects shadow stack using new page table encodings PTE.W=1,
> >> PTE.R=0 and PTE.X=0. This encoding is reserved if zicfiss is not
> >> implemented or if shadow stack are not enabled.
> >> Loads on shadow stack memory are allowed while stores to shadow stack
> >> memory leads to access faults. Shadow stack accesses to RO memory
> >> leads to store page fault.
> >>
> >> To implement special nature of shadow stack memory where only selected
> >> stores (shadow stack stores from sspush) have to be allowed while rest
> >> of regular stores disallowed, new MMU TLB index is created for shadow
> >> stack.
> >>
> >> Signed-off-by: Deepak Gupta <debug@rivosinc.com>
> >> Suggested-by: Richard Henderson <richard.henderson@linaro.org>
> >> Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
> >> ---
> >>  target/riscv/cpu_helper.c | 37 +++++++++++++++++++++++++++++++------
> >>  target/riscv/internals.h  |  3 +++
> >>  2 files changed, 34 insertions(+), 6 deletions(-)
> >>
> >> diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
> >> index be4ac3d54e..39544cade6 100644
> >> --- a/target/riscv/cpu_helper.c
> >> +++ b/target/riscv/cpu_helper.c
> >> @@ -893,6 +893,8 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
> >>      hwaddr ppn;
> >>      int napot_bits = 0;
> >>      target_ulong napot_mask;
> >> +    bool is_sstack_idx = ((mmu_idx & MMU_IDX_SS_WRITE) == MMU_IDX_SS_WRITE);
> >> +    bool sstack_page = false;
> >>
> >>      /*
> >>       * Check if we should use the background registers for the two
> >> @@ -1101,21 +1103,36 @@ restart:
> >>          return TRANSLATE_FAIL;
> >>      }
> >>
> >> +    target_ulong rwx = pte & (PTE_R | PTE_W | PTE_X);
> >>      /* Check for reserved combinations of RWX flags. */
> >> -    switch (pte & (PTE_R | PTE_W | PTE_X)) {
> >> -    case PTE_W:
> >> +    switch (rwx) {
> >>      case PTE_W | PTE_X:
> >>          return TRANSLATE_FAIL;
> >> +    case PTE_W:
> >> +        /* if bcfi enabled, PTE_W is not reserved and shadow stack page */
> >> +        if (cpu_get_bcfien(env) && first_stage) {
> >> +            sstack_page = true;
> >> +            /* if ss index, read and write allowed. else only read allowed */
> >> +            rwx = is_sstack_idx ? PTE_R | PTE_W : PTE_R;
> >> +            break;
> >> +        }
> >> +        return TRANSLATE_FAIL;
> >> +    case PTE_R:
> >> +        /* shadow stack writes to readonly memory are page faults */
> >> +        if (is_sstack_idx && access_type == MMU_DATA_STORE) {
>
> While responding to your question, I noticed there is a bug here. Its a leftover from
> previous patches where I was promoting shadow stack loads to stores. No need to check
> `access_type == MMU_DATA_STORE` because we store unwind information as part of tcg
> compile.
>
> Will fix it.
>
> >> +            return TRANSLATE_FAIL;
> >> +        }
> >> +        break;
> >>      }
> >>
> >>      int prot = 0;
> >> -    if (pte & PTE_R) {
> >> +    if (rwx & PTE_R) {
> >>          prot |= PAGE_READ;
> >>      }
> >> -    if (pte & PTE_W) {
> >> +    if (rwx & PTE_W) {
> >>          prot |= PAGE_WRITE;
> >>      }
> >> -    if (pte & PTE_X) {
> >> +    if (rwx & PTE_X) {
> >>          bool mxr = false;
> >>
> >>          /*
> >> @@ -1160,7 +1177,7 @@ restart:
> >>
> >>      if (!((prot >> access_type) & 1)) {
> >>          /* Access check failed */
> >> -        return TRANSLATE_FAIL;
> >> +        return sstack_page ? TRANSLATE_PMP_FAIL : TRANSLATE_FAIL;
> >
> >Why is it a PMP error if it's a shadow stack page?
>
> A shadow stack page is readable by regular loads.
> We are making sure of that in `case PTE_W` in above switch case.
> But shadow stack page is not writeable via regular stores. And must raise
> access fault. return code `TRANSLATE_PMP_FAIL` is translated to access fault
> while raising fault.

Ah, ok. It's worth commenting that we are returning TRANSLATE_PMP_FAIL
as that will be translated to an access fault

Alistair

>
> >
> >Alistair
Deepak Gupta Aug. 29, 2024, 12:17 a.m. UTC | #4
On Thu, Aug 29, 2024 at 10:03:04AM +1000, Alistair Francis wrote:
>On Thu, Aug 29, 2024 at 9:45 AM Deepak Gupta <debug@rivosinc.com> wrote:
>>
>> On Thu, Aug 29, 2024 at 09:29:49AM +1000, Alistair Francis wrote:
>> >On Thu, Aug 29, 2024 at 3:49 AM Deepak Gupta <debug@rivosinc.com> wrote:
>> >>
>> >>          prot |= PAGE_WRITE;
>> >>      }
>> >> -    if (pte & PTE_X) {
>> >> +    if (rwx & PTE_X) {
>> >>          bool mxr = false;
>> >>
>> >>          /*
>> >> @@ -1160,7 +1177,7 @@ restart:
>> >>
>> >>      if (!((prot >> access_type) & 1)) {
>> >>          /* Access check failed */
>> >> -        return TRANSLATE_FAIL;
>> >> +        return sstack_page ? TRANSLATE_PMP_FAIL : TRANSLATE_FAIL;
>> >
>> >Why is it a PMP error if it's a shadow stack page?
>>
>> A shadow stack page is readable by regular loads.
>> We are making sure of that in `case PTE_W` in above switch case.
>> But shadow stack page is not writeable via regular stores. And must raise
>> access fault. return code `TRANSLATE_PMP_FAIL` is translated to access fault
>> while raising fault.
>
>Ah, ok. It's worth commenting that we are returning TRANSLATE_PMP_FAIL
>as that will be translated to an access fault

Ack.

>
>Alistair
>
>>
>> >
>> >Alistair
diff mbox series

Patch

diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index be4ac3d54e..39544cade6 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -893,6 +893,8 @@  static int get_physical_address(CPURISCVState *env, hwaddr *physical,
     hwaddr ppn;
     int napot_bits = 0;
     target_ulong napot_mask;
+    bool is_sstack_idx = ((mmu_idx & MMU_IDX_SS_WRITE) == MMU_IDX_SS_WRITE);
+    bool sstack_page = false;
 
     /*
      * Check if we should use the background registers for the two
@@ -1101,21 +1103,36 @@  restart:
         return TRANSLATE_FAIL;
     }
 
+    target_ulong rwx = pte & (PTE_R | PTE_W | PTE_X);
     /* Check for reserved combinations of RWX flags. */
-    switch (pte & (PTE_R | PTE_W | PTE_X)) {
-    case PTE_W:
+    switch (rwx) {
     case PTE_W | PTE_X:
         return TRANSLATE_FAIL;
+    case PTE_W:
+        /* if bcfi enabled, PTE_W is not reserved and shadow stack page */
+        if (cpu_get_bcfien(env) && first_stage) {
+            sstack_page = true;
+            /* if ss index, read and write allowed. else only read allowed */
+            rwx = is_sstack_idx ? PTE_R | PTE_W : PTE_R;
+            break;
+        }
+        return TRANSLATE_FAIL;
+    case PTE_R:
+        /* shadow stack writes to readonly memory are page faults */
+        if (is_sstack_idx && access_type == MMU_DATA_STORE) {
+            return TRANSLATE_FAIL;
+        }
+        break;
     }
 
     int prot = 0;
-    if (pte & PTE_R) {
+    if (rwx & PTE_R) {
         prot |= PAGE_READ;
     }
-    if (pte & PTE_W) {
+    if (rwx & PTE_W) {
         prot |= PAGE_WRITE;
     }
-    if (pte & PTE_X) {
+    if (rwx & PTE_X) {
         bool mxr = false;
 
         /*
@@ -1160,7 +1177,7 @@  restart:
 
     if (!((prot >> access_type) & 1)) {
         /* Access check failed */
-        return TRANSLATE_FAIL;
+        return sstack_page ? TRANSLATE_PMP_FAIL : TRANSLATE_FAIL;
     }
 
     target_ulong updated_pte = pte;
@@ -1347,9 +1364,17 @@  void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
         break;
     case MMU_DATA_LOAD:
         cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
+        /* shadow stack mis aligned accesses are access faults */
+        if (mmu_idx & MMU_IDX_SS_WRITE) {
+            cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
+        }
         break;
     case MMU_DATA_STORE:
         cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
+        /* shadow stack mis aligned accesses are access faults */
+        if (mmu_idx & MMU_IDX_SS_WRITE) {
+            cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
+        }
         break;
     default:
         g_assert_not_reached();
diff --git a/target/riscv/internals.h b/target/riscv/internals.h
index 0ac17bc5ad..ddbdee885b 100644
--- a/target/riscv/internals.h
+++ b/target/riscv/internals.h
@@ -30,12 +30,15 @@ 
  *  - U+2STAGE          0b100
  *  - S+2STAGE          0b101
  *  - S+SUM+2STAGE      0b110
+ *  - Shadow stack+U   0b1000
+ *  - Shadow stack+S   0b1001
  */
 #define MMUIdx_U            0
 #define MMUIdx_S            1
 #define MMUIdx_S_SUM        2
 #define MMUIdx_M            3
 #define MMU_2STAGE_BIT      (1 << 2)
+#define MMU_IDX_SS_WRITE    (1 << 3)
 
 static inline int mmuidx_priv(int mmu_idx)
 {