Message ID | 20230828165958.1714079-2-puranjay12@gmail.com (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | BPF |
Headers | show |
Series | bpf, riscv: use BPF prog pack allocator in BPF JIT | expand |
On 2023/8/29 0:59, Puranjay Mohan wrote: > The patch_insn_write() function currently doesn't work for multiple pages > of instructions, therefore patch_text_nosync() will fail with a page fault > if called with lengths spanning multiple pages. > > This commit extends the patch_insn_write() function to support multiple > pages by copying at max 2 pages at a time in a loop. This implementation > is similar to text_poke_copy() function of x86. > > Signed-off-by: Puranjay Mohan <puranjay12@gmail.com> > Reviewed-by: Björn Töpel <bjorn@rivosinc.com> > --- > arch/riscv/kernel/patch.c | 37 ++++++++++++++++++++++++++++++++----- > 1 file changed, 32 insertions(+), 5 deletions(-) > > diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c > index 575e71d6c8ae..2c97e246f4dc 100644 > --- a/arch/riscv/kernel/patch.c > +++ b/arch/riscv/kernel/patch.c > @@ -53,12 +53,18 @@ static void patch_unmap(int fixmap) > } > NOKPROBE_SYMBOL(patch_unmap); > > -static int patch_insn_write(void *addr, const void *insn, size_t len) > +static int __patch_insn_write(void *addr, const void *insn, size_t len) > { > void *waddr = addr; > bool across_pages = (((uintptr_t) addr & ~PAGE_MASK) + len) > PAGE_SIZE; > int ret; > > + /* > + * Only two pages can be mapped at a time for writing. > + */ > + if (len + offset_in_page(addr) > 2 * PAGE_SIZE) > + return -EINVAL; > + > /* > * Before reaching here, it was expected to lock the text_mutex > * already, so we don't need to give another lock here and could > @@ -74,7 +80,7 @@ static int patch_insn_write(void *addr, const void *insn, size_t len) > lockdep_assert_held(&text_mutex); > > if (across_pages) > - patch_map(addr + len, FIX_TEXT_POKE1); > + patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1); > > waddr = patch_map(addr, FIX_TEXT_POKE0); > > @@ -87,15 +93,36 @@ static int patch_insn_write(void *addr, const void *insn, size_t len) > > return ret; > } > -NOKPROBE_SYMBOL(patch_insn_write); > +NOKPROBE_SYMBOL(__patch_insn_write); > #else > -static int patch_insn_write(void *addr, const void *insn, size_t len) > +static int __patch_insn_write(void *addr, const void *insn, size_t len) > { > return copy_to_kernel_nofault(addr, insn, len); > } > -NOKPROBE_SYMBOL(patch_insn_write); > +NOKPROBE_SYMBOL(__patch_insn_write); > #endif /* CONFIG_MMU */ > > +static int patch_insn_write(void *addr, const void *insn, size_t len) > +{ > + size_t patched = 0; > + size_t size; > + int ret = 0; > + > + /* > + * Copy the instructions to the destination address, two pages at a time > + * because __patch_insn_write() can only handle len <= 2 * PAGE_SIZE. > + */ > + while (patched < len && !ret) { > + size = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(addr + patched), len - patched); > + ret = __patch_insn_write(addr + patched, insn + patched, size); > + > + patched += size; > + } > + > + return ret; > +} > +NOKPROBE_SYMBOL(patch_insn_write); > + > int patch_text_nosync(void *addr, const void *insns, size_t len) > { > u32 *tp = addr; Looks good to me, Reviewed-by: Pu Lehui <pulehui@huawei.com>
diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c index 575e71d6c8ae..2c97e246f4dc 100644 --- a/arch/riscv/kernel/patch.c +++ b/arch/riscv/kernel/patch.c @@ -53,12 +53,18 @@ static void patch_unmap(int fixmap) } NOKPROBE_SYMBOL(patch_unmap); -static int patch_insn_write(void *addr, const void *insn, size_t len) +static int __patch_insn_write(void *addr, const void *insn, size_t len) { void *waddr = addr; bool across_pages = (((uintptr_t) addr & ~PAGE_MASK) + len) > PAGE_SIZE; int ret; + /* + * Only two pages can be mapped at a time for writing. + */ + if (len + offset_in_page(addr) > 2 * PAGE_SIZE) + return -EINVAL; + /* * Before reaching here, it was expected to lock the text_mutex * already, so we don't need to give another lock here and could @@ -74,7 +80,7 @@ static int patch_insn_write(void *addr, const void *insn, size_t len) lockdep_assert_held(&text_mutex); if (across_pages) - patch_map(addr + len, FIX_TEXT_POKE1); + patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1); waddr = patch_map(addr, FIX_TEXT_POKE0); @@ -87,15 +93,36 @@ static int patch_insn_write(void *addr, const void *insn, size_t len) return ret; } -NOKPROBE_SYMBOL(patch_insn_write); +NOKPROBE_SYMBOL(__patch_insn_write); #else -static int patch_insn_write(void *addr, const void *insn, size_t len) +static int __patch_insn_write(void *addr, const void *insn, size_t len) { return copy_to_kernel_nofault(addr, insn, len); } -NOKPROBE_SYMBOL(patch_insn_write); +NOKPROBE_SYMBOL(__patch_insn_write); #endif /* CONFIG_MMU */ +static int patch_insn_write(void *addr, const void *insn, size_t len) +{ + size_t patched = 0; + size_t size; + int ret = 0; + + /* + * Copy the instructions to the destination address, two pages at a time + * because __patch_insn_write() can only handle len <= 2 * PAGE_SIZE. + */ + while (patched < len && !ret) { + size = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(addr + patched), len - patched); + ret = __patch_insn_write(addr + patched, insn + patched, size); + + patched += size; + } + + return ret; +} +NOKPROBE_SYMBOL(patch_insn_write); + int patch_text_nosync(void *addr, const void *insns, size_t len) { u32 *tp = addr;