@@ -106,4 +106,12 @@ static inline unsigned long host_s2_pgtable_pages(void)
return res;
}
+#define KVM_FFA_MBOX_NR_PAGES 1
+
+static inline unsigned long hyp_ffa_proxy_pages(void)
+{
+ /* A page each for the hypervisor's RX and TX mailboxes. */
+ return 2 * KVM_FFA_MBOX_NR_PAGES;
+}
+
#endif /* __ARM64_KVM_PKVM_H__ */
@@ -11,7 +11,7 @@
#define FFA_MIN_FUNC_NUM 0x60
#define FFA_MAX_FUNC_NUM 0x7F
-int hyp_ffa_init(void);
+int hyp_ffa_init(void *pages);
bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt);
#endif /* __KVM_HYP_FFA_H */
@@ -28,8 +28,11 @@
#include <linux/arm-smccc.h>
#include <linux/arm_ffa.h>
+#include <asm/kvm_pkvm.h>
+
#include <nvhe/ffa.h>
#include <nvhe/trap_handler.h>
+#include <nvhe/spinlock.h>
/*
* "ID value 0 must be returned at the Non-secure physical FF-A instance"
@@ -37,6 +40,19 @@
*/
#define HOST_FFA_ID 0
+struct kvm_ffa_buffers {
+ hyp_spinlock_t lock;
+ void *tx;
+ void *rx;
+};
+
+/*
+ * Note that we don't currently lock these buffers explicitly, instead
+ * relying on the locking of the host FFA buffers as we only have one
+ * client.
+ */
+static struct kvm_ffa_buffers hyp_buffers;
+
static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno)
{
*res = (struct arm_smccc_res) {
@@ -111,7 +127,7 @@ bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt)
return true;
}
-int hyp_ffa_init(void)
+int hyp_ffa_init(void *pages)
{
struct arm_smccc_res res;
@@ -132,5 +148,11 @@ int hyp_ffa_init(void)
if (res.a2 != HOST_FFA_ID)
return -EINVAL;
+ hyp_buffers = (struct kvm_ffa_buffers) {
+ .lock = __HYP_SPIN_LOCK_UNLOCKED,
+ .tx = pages,
+ .rx = pages + (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE),
+ };
+
return 0;
}
@@ -29,6 +29,7 @@ static void *vmemmap_base;
static void *vm_table_base;
static void *hyp_pgt_base;
static void *host_s2_pgt_base;
+static void *ffa_proxy_pages;
static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
static struct hyp_pool hpool;
@@ -58,6 +59,11 @@ static int divide_memory_pool(void *virt, unsigned long size)
if (!host_s2_pgt_base)
return -ENOMEM;
+ nr_pages = hyp_ffa_proxy_pages();
+ ffa_proxy_pages = hyp_early_alloc_contig(nr_pages);
+ if (!ffa_proxy_pages)
+ return -ENOMEM;
+
return 0;
}
@@ -315,7 +321,7 @@ void __noreturn __pkvm_init_finalise(void)
if (ret)
goto out;
- ret = hyp_ffa_init();
+ ret = hyp_ffa_init(ffa_proxy_pages);
if (ret)
goto out;
@@ -74,6 +74,7 @@ void __init kvm_hyp_reserve(void)
hyp_mem_pages += host_s2_pgtable_pages();
hyp_mem_pages += hyp_vm_table_pages();
hyp_mem_pages += hyp_vmemmap_pages(STRUCT_HYP_PAGE_SIZE);
+ hyp_mem_pages += hyp_ffa_proxy_pages();
/*
* Try to allocate a PMD-aligned region to reduce TLB pressure once