@@ -436,6 +436,7 @@ struct stage2_map_data {
kvm_pte_t attr;
kvm_pte_t *anchor;
+ kvm_pte_t *follow;
struct kvm_s2_mmu *mmu;
struct kvm_mmu_memory_cache *memcache;
@@ -550,13 +551,13 @@ static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level,
kvm_set_invalid_pte(ptep);
/*
- * Invalidate the whole stage-2, as we may have numerous leaf
- * entries below us which would otherwise need invalidating
- * individually.
+ * If there is an existing table entry and block mapping is needed here,
+ * then set the anchor and replace it with a block entry. The sub-level
+ * mappings will later be unmapped lazily.
*/
- kvm_call_hyp(__kvm_tlb_flush_vmid, data->mmu);
data->anchor = ptep;
- return 0;
+ data->follow = kvm_pte_follow(*ptep);
+ return stage2_coalesce_tables_into_block(addr, level, ptep, data);
}
static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
@@ -608,20 +609,18 @@ static int stage2_map_walk_table_post(u64 addr, u64 end, u32 level,
kvm_pte_t *ptep,
struct stage2_map_data *data)
{
- int ret = 0;
-
if (!data->anchor)
return 0;
- free_page((unsigned long)kvm_pte_follow(*ptep));
- put_page(virt_to_page(ptep));
-
- if (data->anchor == ptep) {
+ if (data->anchor != ptep) {
+ free_page((unsigned long)kvm_pte_follow(*ptep));
+ put_page(virt_to_page(ptep));
+ } else {
+ free_page((unsigned long)data->follow);
data->anchor = NULL;
- ret = stage2_map_walk_leaf(addr, end, level, ptep, data);
}
- return ret;
+ return 0;
}
/*
With new handling of coalescing tables, we can install the block entry before unmap of the old table mappings. So make the installation in stage2_map_walk_table_pre(), and elide the installation from function stage2_map_walk_table_post(). Signed-off-by: Yanan Wang <wangyanan55@huawei.com> --- arch/arm64/kvm/hyp/pgtable.c | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-)