@@ -1198,8 +1198,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
sparse->header.version = 1;
sparse->nr_areas = nr_areas;
cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
- sparse->areas[0].offset =
- PAGE_ALIGN(vgpu_aperture_offset(vgpu));
+ sparse->areas[0].offset = 0;
sparse->areas[0].size = vgpu_aperture_sz(vgpu);
break;
@@ -48,8 +48,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT;
- vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
- vgpu_aperture_gmadr_base(vgpu);
+ vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) = 0;
vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
vgpu_aperture_sz(vgpu);
vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
@@ -524,6 +523,9 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+ u64 maddr = vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base));
+ u64 unmaddr = vgpu_vreg_t(vgpu,
+ vgtif_reg(avail_rs.nonmappable_gmadr.base));
unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
gvt_dbg_core("------------------------------------------\n");
@@ -556,6 +558,10 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
intel_vgpu_reset_mmio(vgpu, dmlr);
populate_pvinfo_page(vgpu);
+ vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
+ maddr;
+ vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
+ unmaddr;
intel_vgpu_reset_display(vgpu);
if (dmlr) {