@@ -87,13 +87,12 @@ The following keys are defined:
emulated via software, either in or below the kernel. These accesses are
always extremely slow.
- * :c:macro:`RISCV_HWPROBE_MISALIGNED_SLOW`: Misaligned accesses are supported
- in hardware, but are slower than the cooresponding aligned accesses
- sequences.
+ * :c:macro:`RISCV_HWPROBE_MISALIGNED_SLOW`: Misaligned accesses are slower
+ than equivalent byte accesses. Misaligned accesses may be supported
+ directly in hardware, or trapped and emulated by software.
- * :c:macro:`RISCV_HWPROBE_MISALIGNED_FAST`: Misaligned accesses are supported
- in hardware and are faster than the cooresponding aligned accesses
- sequences.
+ * :c:macro:`RISCV_HWPROBE_MISALIGNED_FAST`: Misaligned accesses are faster
+ than equivalent byte accesses.
* :c:macro:`RISCV_HWPROBE_MISALIGNED_UNSUPPORTED`: Misaligned accesses are
not supported at all and will generate a misaligned address fault.
@@ -30,4 +30,6 @@ DECLARE_PER_CPU(long, misaligned_access_speed);
/* Per-cpu ISA extensions. */
extern struct riscv_isainfo hart_isa[NR_CPUS];
+void check_unaligned_access(int cpu);
+
#endif
@@ -38,6 +38,7 @@ extra-y += vmlinux.lds
obj-y += head.o
obj-y += soc.o
obj-$(CONFIG_RISCV_ALTERNATIVE) += alternative.o
+obj-y += copy-unaligned.o
obj-y += cpu.o
obj-y += cpufeature.o
obj-y += entry.o
new file mode 100644
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2023 Rivos Inc. */
+
+#include <linux/linkage.h>
+#include <asm/asm.h>
+
+ .text
+
+/* void __riscv_copy_words_unaligned(void *, const void *, size_t) */
+/* Performs a memcpy without aligning buffers, using word loads and stores. */
+/* Note: The size is truncated to a multiple of 8 * SZREG */
+ENTRY(__riscv_copy_words_unaligned)
+ andi a4, a2, ~((8*SZREG)-1)
+ beqz a4, 2f
+ add a3, a1, a4
+1:
+ REG_L a4, 0(a1)
+ REG_L a5, SZREG(a1)
+ REG_L a6, 2*SZREG(a1)
+ REG_L a7, 3*SZREG(a1)
+ REG_L t0, 4*SZREG(a1)
+ REG_L t1, 5*SZREG(a1)
+ REG_L t2, 6*SZREG(a1)
+ REG_L t3, 7*SZREG(a1)
+ REG_S a4, 0(a0)
+ REG_S a5, SZREG(a0)
+ REG_S a6, 2*SZREG(a0)
+ REG_S a7, 3*SZREG(a0)
+ REG_S t0, 4*SZREG(a0)
+ REG_S t1, 5*SZREG(a0)
+ REG_S t2, 6*SZREG(a0)
+ REG_S t3, 7*SZREG(a0)
+ addi a0, a0, 8*SZREG
+ addi a1, a1, 8*SZREG
+ bltu a1, a3, 1b
+
+2:
+ ret
+END(__riscv_copy_words_unaligned)
+
+/* void __riscv_copy_bytes_unaligned(void *, const void *, size_t) */
+/* Performs a memcpy without aligning buffers, using only byte accesses. */
+/* Note: The size is truncated to a multiple of 8 */
+ENTRY(__riscv_copy_bytes_unaligned)
+ andi a4, a2, ~(8-1)
+ beqz a4, 2f
+ add a3, a1, a4
+1:
+ lb a4, 0(a1)
+ lb a5, 1(a1)
+ lb a6, 2(a1)
+ lb a7, 3(a1)
+ lb t0, 4(a1)
+ lb t1, 5(a1)
+ lb t2, 6(a1)
+ lb t3, 7(a1)
+ sb a4, 0(a0)
+ sb a5, 1(a0)
+ sb a6, 2(a0)
+ sb a7, 3(a0)
+ sb t0, 4(a0)
+ sb t1, 5(a0)
+ sb t2, 6(a0)
+ sb t3, 7(a0)
+ addi a0, a0, 8
+ addi a1, a1, 8
+ bltu a1, a3, 1b
+
+2:
+ ret
+END(__riscv_copy_bytes_unaligned)
new file mode 100644
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos, Inc.
+ */
+#ifndef __RISCV_KERNEL_COPY_UNALIGNED_H
+#define __RISCV_KERNEL_COPY_UNALIGNED_H
+
+#include <linux/types.h>
+
+void __riscv_copy_words_unaligned(void *dst, const void *src, size_t size);
+void __riscv_copy_bytes_unaligned(void *dst, const void *src, size_t size);
+
+#endif /* __RISCV_KERNEL_COPY_UNALIGNED_H */
@@ -19,12 +19,19 @@
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/hwcap.h>
+#include <asm/hwprobe.h>
#include <asm/patch.h>
#include <asm/processor.h>
#include <asm/vector.h>
+#include "copy-unaligned.h"
+
#define NUM_ALPHA_EXTS ('z' - 'a' + 1)
+#define MISALIGNED_ACCESS_JIFFIES_LG2 1
+#define MISALIGNED_BUFFER_SIZE 0x4000
+#define MISALIGNED_COPY_SIZE ((MISALIGNED_BUFFER_SIZE / 2) - 0x80)
+
unsigned long elf_hwcap __read_mostly;
/* Host ISA bitmap */
@@ -555,6 +562,103 @@ unsigned long riscv_get_elf_hwcap(void)
return hwcap;
}
+void check_unaligned_access(int cpu)
+{
+ u64 start_cycles, end_cycles;
+ u64 word_cycles;
+ u64 byte_cycles;
+ int ratio;
+ unsigned long start_jiffies, now;
+ struct page *page;
+ void *dst;
+ void *src;
+ long speed = RISCV_HWPROBE_MISALIGNED_SLOW;
+
+ page = alloc_pages(GFP_NOWAIT, get_order(MISALIGNED_BUFFER_SIZE));
+ if (!page) {
+ pr_warn("Can't alloc pages to measure memcpy performance");
+ return;
+ }
+
+ /* Make an unaligned destination buffer. */
+ dst = (void *)((unsigned long)page_address(page) | 0x1);
+ /* Unalign src as well, but differently (off by 1 + 2 = 3). */
+ src = dst + (MISALIGNED_BUFFER_SIZE / 2);
+ src += 2;
+ word_cycles = -1ULL;
+ /* Do a warmup. */
+ __riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
+ preempt_disable();
+ start_jiffies = jiffies;
+ while ((now = jiffies) == start_jiffies)
+ cpu_relax();
+
+ /*
+ * For a fixed amount of time, repeatedly try the function, and take
+ * the best time in cycles as the measurement.
+ */
+ while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) {
+ start_cycles = get_cycles64();
+ /* Ensure the CSR read can't reorder WRT to the copy. */
+ mb();
+ __riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
+ /* Ensure the copy ends before the end time is snapped. */
+ mb();
+ end_cycles = get_cycles64();
+ if ((end_cycles - start_cycles) < word_cycles)
+ word_cycles = end_cycles - start_cycles;
+ }
+
+ byte_cycles = -1ULL;
+ __riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
+ start_jiffies = jiffies;
+ while ((now = jiffies) == start_jiffies)
+ cpu_relax();
+
+ while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) {
+ start_cycles = get_cycles64();
+ mb();
+ __riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
+ mb();
+ end_cycles = get_cycles64();
+ if ((end_cycles - start_cycles) < byte_cycles)
+ byte_cycles = end_cycles - start_cycles;
+ }
+
+ preempt_enable();
+
+ /* Don't divide by zero. */
+ if (!word_cycles || !byte_cycles) {
+ pr_warn("cpu%d: rdtime lacks granularity needed to measure unaligned access speed\n",
+ cpu);
+
+ goto out;
+ }
+
+ if (word_cycles < byte_cycles)
+ speed = RISCV_HWPROBE_MISALIGNED_FAST;
+
+ ratio = (byte_cycles * 100) / word_cycles;
+ pr_info("cpu%d: Ratio of byte access time to unaligned word access is %d.%02d, unaligned accesses are %s\n",
+ cpu,
+ ratio / 100,
+ ratio % 100,
+ (speed == RISCV_HWPROBE_MISALIGNED_FAST) ? "fast" : "slow");
+
+ per_cpu(misaligned_access_speed, cpu) = speed;
+
+out:
+ __free_pages(page, get_order(MISALIGNED_BUFFER_SIZE));
+}
+
+static int check_unaligned_access_boot_cpu(void)
+{
+ check_unaligned_access(0);
+ return 0;
+}
+
+arch_initcall(check_unaligned_access_boot_cpu);
+
#ifdef CONFIG_RISCV_ALTERNATIVE
/*
* Alternative patch sites consider 48 bits when determining when to patch
@@ -26,6 +26,7 @@
#include <linux/sched/task_stack.h>
#include <linux/sched/mm.h>
#include <asm/cpu_ops.h>
+#include <asm/cpufeature.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/numa.h>
@@ -245,6 +246,7 @@ asmlinkage __visible void smp_callin(void)
numa_add_cpu(curr_cpuid);
set_cpu_online(curr_cpuid, 1);
+ check_unaligned_access(curr_cpuid);
probe_vendor_features(curr_cpuid);
if (has_vector()) {