@@ -393,17 +393,12 @@ struct pkvm_mem_transition {
enum pkvm_component_id id;
/* Address in the initiator's address space */
u64 addr;
-
- union {
- struct {
- /* Address in the completer's address space */
- u64 completer_addr;
- } host;
- };
} initiator;
struct {
enum pkvm_component_id id;
+ /* Address in the completer's address space */
+ u64 addr;
} completer;
};
@@ -471,43 +466,35 @@ static int __host_set_page_state_range(u64 addr, u64 size,
return host_stage2_idmap_locked(addr, size, prot);
}
-static int host_request_owned_transition(u64 *completer_addr,
- const struct pkvm_mem_transition *tx)
+static int host_request_owned_transition(const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
u64 addr = tx->initiator.addr;
- *completer_addr = tx->initiator.host.completer_addr;
return __host_check_page_state_range(addr, size, PKVM_PAGE_OWNED);
}
-static int host_request_unshare(u64 *completer_addr,
- const struct pkvm_mem_transition *tx)
+static int host_request_unshare(const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
u64 addr = tx->initiator.addr;
- *completer_addr = tx->initiator.host.completer_addr;
return __host_check_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED);
}
-static int host_initiate_share(u64 *completer_addr,
- const struct pkvm_mem_transition *tx)
+static int host_initiate_share(const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
u64 addr = tx->initiator.addr;
- *completer_addr = tx->initiator.host.completer_addr;
return __host_set_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED);
}
-static int host_initiate_unshare(u64 *completer_addr,
- const struct pkvm_mem_transition *tx)
+static int host_initiate_unshare(const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
u64 addr = tx->initiator.addr;
- *completer_addr = tx->initiator.host.completer_addr;
return __host_set_page_state_range(addr, size, PKVM_PAGE_OWNED);
}
@@ -537,7 +524,7 @@ static bool __hyp_ack_skip_pgtable_check(const struct pkvm_mem_transition *tx)
tx->initiator.id != PKVM_ID_HOST);
}
-static int hyp_ack_share(u64 addr, const struct pkvm_mem_transition *tx,
+static int hyp_ack_share(const struct pkvm_mem_transition *tx,
enum kvm_pgtable_prot perms)
{
u64 size = tx->nr_pages * PAGE_SIZE;
@@ -548,34 +535,35 @@ static int hyp_ack_share(u64 addr, const struct pkvm_mem_transition *tx,
if (__hyp_ack_skip_pgtable_check(tx))
return 0;
- return __hyp_check_page_state_range(addr, size, PKVM_NOPAGE);
+ return __hyp_check_page_state_range(tx->completer.addr, size, PKVM_NOPAGE);
}
-static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx)
+static int hyp_ack_unshare(const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
if (__hyp_ack_skip_pgtable_check(tx))
return 0;
- return __hyp_check_page_state_range(addr, size,
+ return __hyp_check_page_state_range(tx->completer.addr, size,
PKVM_PAGE_SHARED_BORROWED);
}
-static int hyp_complete_share(u64 addr, const struct pkvm_mem_transition *tx,
+static int hyp_complete_share(const struct pkvm_mem_transition *tx,
enum kvm_pgtable_prot perms)
{
- void *start = (void *)addr, *end = start + (tx->nr_pages * PAGE_SIZE);
+ void *start = (void *)tx->completer.addr;
+ void *end = start + (tx->nr_pages * PAGE_SIZE);
enum kvm_pgtable_prot prot;
prot = pkvm_mkstate(perms, PKVM_PAGE_SHARED_BORROWED);
return pkvm_create_mappings_locked(start, end, prot);
}
-static int hyp_complete_unshare(u64 addr, const struct pkvm_mem_transition *tx)
+static int hyp_complete_unshare(const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
- int ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, addr, size);
+ int ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, tx->completer.addr, size);
return (ret != size) ? -EFAULT : 0;
}
@@ -583,12 +571,11 @@ static int hyp_complete_unshare(u64 addr, const struct pkvm_mem_transition *tx)
static int check_share(struct pkvm_mem_share *share)
{
const struct pkvm_mem_transition *tx = &share->tx;
- u64 completer_addr;
int ret;
switch (tx->initiator.id) {
case PKVM_ID_HOST:
- ret = host_request_owned_transition(&completer_addr, tx);
+ ret = host_request_owned_transition(tx);
break;
default:
ret = -EINVAL;
@@ -599,7 +586,7 @@ static int check_share(struct pkvm_mem_share *share)
switch (tx->completer.id) {
case PKVM_ID_HYP:
- ret = hyp_ack_share(completer_addr, tx, share->completer_prot);
+ ret = hyp_ack_share(tx, share->completer_prot);
break;
default:
ret = -EINVAL;
@@ -611,12 +598,11 @@ static int check_share(struct pkvm_mem_share *share)
static int __do_share(struct pkvm_mem_share *share)
{
const struct pkvm_mem_transition *tx = &share->tx;
- u64 completer_addr;
int ret;
switch (tx->initiator.id) {
case PKVM_ID_HOST:
- ret = host_initiate_share(&completer_addr, tx);
+ ret = host_initiate_share(tx);
break;
default:
ret = -EINVAL;
@@ -627,7 +613,7 @@ static int __do_share(struct pkvm_mem_share *share)
switch (tx->completer.id) {
case PKVM_ID_HYP:
- ret = hyp_complete_share(completer_addr, tx, share->completer_prot);
+ ret = hyp_complete_share(tx, share->completer_prot);
break;
default:
ret = -EINVAL;
@@ -659,12 +645,11 @@ static int do_share(struct pkvm_mem_share *share)
static int check_unshare(struct pkvm_mem_share *share)
{
const struct pkvm_mem_transition *tx = &share->tx;
- u64 completer_addr;
int ret;
switch (tx->initiator.id) {
case PKVM_ID_HOST:
- ret = host_request_unshare(&completer_addr, tx);
+ ret = host_request_unshare(tx);
break;
default:
ret = -EINVAL;
@@ -675,7 +660,7 @@ static int check_unshare(struct pkvm_mem_share *share)
switch (tx->completer.id) {
case PKVM_ID_HYP:
- ret = hyp_ack_unshare(completer_addr, tx);
+ ret = hyp_ack_unshare(tx);
break;
default:
ret = -EINVAL;
@@ -687,12 +672,11 @@ static int check_unshare(struct pkvm_mem_share *share)
static int __do_unshare(struct pkvm_mem_share *share)
{
const struct pkvm_mem_transition *tx = &share->tx;
- u64 completer_addr;
int ret;
switch (tx->initiator.id) {
case PKVM_ID_HOST:
- ret = host_initiate_unshare(&completer_addr, tx);
+ ret = host_initiate_unshare(tx);
break;
default:
ret = -EINVAL;
@@ -703,7 +687,7 @@ static int __do_unshare(struct pkvm_mem_share *share)
switch (tx->completer.id) {
case PKVM_ID_HYP:
- ret = hyp_complete_unshare(completer_addr, tx);
+ ret = hyp_complete_unshare(tx);
break;
default:
ret = -EINVAL;
@@ -743,12 +727,10 @@ int __pkvm_host_share_hyp(u64 pfn)
.initiator = {
.id = PKVM_ID_HOST,
.addr = host_addr,
- .host = {
- .completer_addr = hyp_addr,
- },
},
.completer = {
.id = PKVM_ID_HYP,
+ .addr = hyp_addr,
},
},
.completer_prot = PAGE_HYP,
@@ -776,12 +758,10 @@ int __pkvm_host_unshare_hyp(u64 pfn)
.initiator = {
.id = PKVM_ID_HOST,
.addr = host_addr,
- .host = {
- .completer_addr = hyp_addr,
- },
},
.completer = {
.id = PKVM_ID_HYP,
+ .addr = hyp_addr,
},
},
.completer_prot = PAGE_HYP,
The layout of struct pkvm_mem_transition is a bit weird; the destination address for the transition is actually stashed in the initiator address context. Even weirder so, that address is thrown inside a union and return from helpers by use of an out pointer. Rip out the whole mess and move the destination address into the destination context sub-struct. No functional change intended. Signed-off-by: Oliver Upton <oliver.upton@linux.dev> --- arch/arm64/kvm/hyp/nvhe/mem_protect.c | 70 ++++++++++----------------- 1 file changed, 25 insertions(+), 45 deletions(-)