@@ -506,7 +506,7 @@ static int __kvmppc_svm_page_out(struct vm_area_struct *vma,
unsigned long end, unsigned long page_shift,
struct kvm *kvm, unsigned long gpa)
{
- unsigned long src_pfn, dst_pfn = 0;
+ unsigned long src_pfn = 0, dst_pfn = 0;
struct migrate_vma mig;
struct page *dpage, *spage;
struct kvmppc_uvmem_page_pvt *pvt;
@@ -732,7 +732,7 @@ static int kvmppc_svm_page_in(struct vm_area_struct *vma,
unsigned long page_shift,
bool pagein)
{
- unsigned long src_pfn, dst_pfn = 0;
+ unsigned long src_pfn = 0, dst_pfn = 0;
struct migrate_vma mig;
struct page *spage;
unsigned long pfn;
@@ -696,6 +696,8 @@ static int dmirror_migrate(struct dmirror *dmirror,
if (next > vma->vm_end)
next = vma->vm_end;
+ memset(src_pfns, 0, ARRAY_SIZE(src_pfns));
+ memset(dst_pfns, 0, ARRAY_SIZE(dst_pfns));
args.vma = vma;
args.src = src_pfns;
args.dst = dst_pfns;
@@ -1025,8 +1027,8 @@ static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf)
{
struct migrate_vma args;
- unsigned long src_pfns;
- unsigned long dst_pfns;
+ unsigned long src_pfns = 0;
+ unsigned long dst_pfns = 0;
struct page *rpage;
struct dmirror *dmirror;
vm_fault_t ret;
@@ -2874,7 +2874,6 @@ int migrate_vma_setup(struct migrate_vma *args)
if (!args->src || !args->dst)
return -EINVAL;
- memset(args->src, 0, sizeof(*args->src) * nr_pages);
args->cpages = 0;
args->npages = 0;
Currently migrate_vma_setup() zeros both src and dst pfn arrays. This means it is not possible to pass per-pfn flags to migrate_vma_setup(). A future patch introduces per-pfn flags for migrate_vma_setup(), so ensure existing callers will not be affected by having the caller zero both src and dst pfn arrays. Signed-off-by: Alistair Popple <apopple@nvidia.com> --- arch/powerpc/kvm/book3s_hv_uvmem.c | 4 ++-- lib/test_hmm.c | 6 ++++-- mm/migrate.c | 1 - 3 files changed, 6 insertions(+), 5 deletions(-)