@@ -118,7 +118,8 @@ static bool need_split_memcache_topup_or_resched(struct kvm *kvm)
}
static int kvm_mmu_split_huge_pages(struct kvm *kvm, phys_addr_t addr,
- phys_addr_t end)
+ phys_addr_t end,
+ enum kvm_pgtable_walk_flags flags)
{
struct kvm_mmu_memory_cache *cache;
struct kvm_pgtable *pgt;
@@ -153,7 +154,8 @@ static int kvm_mmu_split_huge_pages(struct kvm *kvm, phys_addr_t addr,
return -EINVAL;
next = __stage2_range_addr_end(addr, end, chunk_size);
- ret = kvm_pgtable_stage2_split(pgt, addr, next - addr, cache, 0);
+ ret = kvm_pgtable_stage2_split(pgt, addr, next - addr, cache,
+ flags);
if (ret)
break;
} while (addr = next, addr != end);
@@ -1112,7 +1114,7 @@ static void kvm_mmu_split_memory_region(struct kvm *kvm, int slot)
end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
write_lock(&kvm->mmu_lock);
- kvm_mmu_split_huge_pages(kvm, start, end);
+ kvm_mmu_split_huge_pages(kvm, start, end, 0);
write_unlock(&kvm->mmu_lock);
}
@@ -1149,7 +1151,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
* again.
*/
if (kvm_dirty_log_manual_protect_and_init_set(kvm))
- kvm_mmu_split_huge_pages(kvm, start, end);
+ kvm_mmu_split_huge_pages(kvm, start, end, 0);
write_unlock(&kvm->mmu_lock);
}
Pass enum kvm_pgtable_walk_flags{} to kvm_mmu_split_huge_pages(). Use 0 as the flag value to make it no-op. In future commit kvm_mmu_split_huge_pages() will be used under both MMU read lock and MMU write lock. Flag allows to pass intent to use shared or non-shared page walkers to split the huge pages. Signed-off-by: Vipin Sharma <vipinsh@google.com> --- arch/arm64/kvm/mmu.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-)